1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass transforms simple global variables that never have their address 11 // taken. If obviously true, it marks read/write globals as constant, deletes 12 // variables only stored to, etc. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/GlobalOpt.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/Twine.h" 23 #include "llvm/ADT/iterator_range.h" 24 #include "llvm/Analysis/BlockFrequencyInfo.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Transforms/Utils/Local.h" 30 #include "llvm/BinaryFormat/Dwarf.h" 31 #include "llvm/IR/Attributes.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/Constant.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DebugInfoMetadata.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Dominators.h" 41 #include "llvm/IR/Function.h" 42 #include "llvm/IR/GetElementPtrTypeIterator.h" 43 #include "llvm/IR/GlobalAlias.h" 44 #include "llvm/IR/GlobalValue.h" 45 #include "llvm/IR/GlobalVariable.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instruction.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Module.h" 51 #include "llvm/IR/Operator.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/Use.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/ValueHandle.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/AtomicOrdering.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/Debug.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Support/MathExtras.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "llvm/Transforms/IPO.h" 66 #include "llvm/Transforms/Utils/CtorUtils.h" 67 #include "llvm/Transforms/Utils/Evaluator.h" 68 #include "llvm/Transforms/Utils/GlobalStatus.h" 69 #include <cassert> 70 #include <cstdint> 71 #include <utility> 72 #include <vector> 73 74 using namespace llvm; 75 76 #define DEBUG_TYPE "globalopt" 77 78 STATISTIC(NumMarked , "Number of globals marked constant"); 79 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 80 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 81 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 82 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 83 STATISTIC(NumDeleted , "Number of globals deleted"); 84 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 85 STATISTIC(NumLocalized , "Number of globals localized"); 86 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 87 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 88 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 89 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 90 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 91 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 92 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 93 STATISTIC(NumInternalFunc, "Number of internal functions"); 94 STATISTIC(NumColdCC, "Number of functions marked coldcc"); 95 96 static cl::opt<bool> 97 EnableColdCCStressTest("enable-coldcc-stress-test", 98 cl::desc("Enable stress test of coldcc by adding " 99 "calling conv to all internal functions."), 100 cl::init(false), cl::Hidden); 101 102 static cl::opt<int> ColdCCRelFreq( 103 "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, 104 cl::desc( 105 "Maximum block frequency, expressed as a percentage of caller's " 106 "entry frequency, for a call site to be considered cold for enabling" 107 "coldcc")); 108 109 /// Is this global variable possibly used by a leak checker as a root? If so, 110 /// we might not really want to eliminate the stores to it. 111 static bool isLeakCheckerRoot(GlobalVariable *GV) { 112 // A global variable is a root if it is a pointer, or could plausibly contain 113 // a pointer. There are two challenges; one is that we could have a struct 114 // the has an inner member which is a pointer. We recurse through the type to 115 // detect these (up to a point). The other is that we may actually be a union 116 // of a pointer and another type, and so our LLVM type is an integer which 117 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 118 // potentially contained here. 119 120 if (GV->hasPrivateLinkage()) 121 return false; 122 123 SmallVector<Type *, 4> Types; 124 Types.push_back(GV->getValueType()); 125 126 unsigned Limit = 20; 127 do { 128 Type *Ty = Types.pop_back_val(); 129 switch (Ty->getTypeID()) { 130 default: break; 131 case Type::PointerTyID: return true; 132 case Type::ArrayTyID: 133 case Type::VectorTyID: { 134 SequentialType *STy = cast<SequentialType>(Ty); 135 Types.push_back(STy->getElementType()); 136 break; 137 } 138 case Type::StructTyID: { 139 StructType *STy = cast<StructType>(Ty); 140 if (STy->isOpaque()) return true; 141 for (StructType::element_iterator I = STy->element_begin(), 142 E = STy->element_end(); I != E; ++I) { 143 Type *InnerTy = *I; 144 if (isa<PointerType>(InnerTy)) return true; 145 if (isa<CompositeType>(InnerTy)) 146 Types.push_back(InnerTy); 147 } 148 break; 149 } 150 } 151 if (--Limit == 0) return true; 152 } while (!Types.empty()); 153 return false; 154 } 155 156 /// Given a value that is stored to a global but never read, determine whether 157 /// it's safe to remove the store and the chain of computation that feeds the 158 /// store. 159 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { 160 do { 161 if (isa<Constant>(V)) 162 return true; 163 if (!V->hasOneUse()) 164 return false; 165 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 166 isa<GlobalValue>(V)) 167 return false; 168 if (isAllocationFn(V, TLI)) 169 return true; 170 171 Instruction *I = cast<Instruction>(V); 172 if (I->mayHaveSideEffects()) 173 return false; 174 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 175 if (!GEP->hasAllConstantIndices()) 176 return false; 177 } else if (I->getNumOperands() != 1) { 178 return false; 179 } 180 181 V = I->getOperand(0); 182 } while (true); 183 } 184 185 /// This GV is a pointer root. Loop over all users of the global and clean up 186 /// any that obviously don't assign the global a value that isn't dynamically 187 /// allocated. 188 static bool CleanupPointerRootUsers(GlobalVariable *GV, 189 const TargetLibraryInfo *TLI) { 190 // A brief explanation of leak checkers. The goal is to find bugs where 191 // pointers are forgotten, causing an accumulating growth in memory 192 // usage over time. The common strategy for leak checkers is to whitelist the 193 // memory pointed to by globals at exit. This is popular because it also 194 // solves another problem where the main thread of a C++ program may shut down 195 // before other threads that are still expecting to use those globals. To 196 // handle that case, we expect the program may create a singleton and never 197 // destroy it. 198 199 bool Changed = false; 200 201 // If Dead[n].first is the only use of a malloc result, we can delete its 202 // chain of computation and the store to the global in Dead[n].second. 203 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 204 205 // Constants can't be pointers to dynamically allocated memory. 206 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end(); 207 UI != E;) { 208 User *U = *UI++; 209 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 210 Value *V = SI->getValueOperand(); 211 if (isa<Constant>(V)) { 212 Changed = true; 213 SI->eraseFromParent(); 214 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 215 if (I->hasOneUse()) 216 Dead.push_back(std::make_pair(I, SI)); 217 } 218 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 219 if (isa<Constant>(MSI->getValue())) { 220 Changed = true; 221 MSI->eraseFromParent(); 222 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 223 if (I->hasOneUse()) 224 Dead.push_back(std::make_pair(I, MSI)); 225 } 226 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 227 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 228 if (MemSrc && MemSrc->isConstant()) { 229 Changed = true; 230 MTI->eraseFromParent(); 231 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 232 if (I->hasOneUse()) 233 Dead.push_back(std::make_pair(I, MTI)); 234 } 235 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 236 if (CE->use_empty()) { 237 CE->destroyConstant(); 238 Changed = true; 239 } 240 } else if (Constant *C = dyn_cast<Constant>(U)) { 241 if (isSafeToDestroyConstant(C)) { 242 C->destroyConstant(); 243 // This could have invalidated UI, start over from scratch. 244 Dead.clear(); 245 CleanupPointerRootUsers(GV, TLI); 246 return true; 247 } 248 } 249 } 250 251 for (int i = 0, e = Dead.size(); i != e; ++i) { 252 if (IsSafeComputationToRemove(Dead[i].first, TLI)) { 253 Dead[i].second->eraseFromParent(); 254 Instruction *I = Dead[i].first; 255 do { 256 if (isAllocationFn(I, TLI)) 257 break; 258 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 259 if (!J) 260 break; 261 I->eraseFromParent(); 262 I = J; 263 } while (true); 264 I->eraseFromParent(); 265 } 266 } 267 268 return Changed; 269 } 270 271 /// We just marked GV constant. Loop over all users of the global, cleaning up 272 /// the obvious ones. This is largely just a quick scan over the use list to 273 /// clean up the easy and obvious cruft. This returns true if it made a change. 274 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 275 const DataLayout &DL, 276 TargetLibraryInfo *TLI) { 277 bool Changed = false; 278 // Note that we need to use a weak value handle for the worklist items. When 279 // we delete a constant array, we may also be holding pointer to one of its 280 // elements (or an element of one of its elements if we're dealing with an 281 // array of arrays) in the worklist. 282 SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end()); 283 while (!WorkList.empty()) { 284 Value *UV = WorkList.pop_back_val(); 285 if (!UV) 286 continue; 287 288 User *U = cast<User>(UV); 289 290 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 291 if (Init) { 292 // Replace the load with the initializer. 293 LI->replaceAllUsesWith(Init); 294 LI->eraseFromParent(); 295 Changed = true; 296 } 297 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 298 // Store must be unreachable or storing Init into the global. 299 SI->eraseFromParent(); 300 Changed = true; 301 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 302 if (CE->getOpcode() == Instruction::GetElementPtr) { 303 Constant *SubInit = nullptr; 304 if (Init) 305 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 306 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); 307 } else if ((CE->getOpcode() == Instruction::BitCast && 308 CE->getType()->isPointerTy()) || 309 CE->getOpcode() == Instruction::AddrSpaceCast) { 310 // Pointer cast, delete any stores and memsets to the global. 311 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI); 312 } 313 314 if (CE->use_empty()) { 315 CE->destroyConstant(); 316 Changed = true; 317 } 318 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 319 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 320 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 321 // and will invalidate our notion of what Init is. 322 Constant *SubInit = nullptr; 323 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 324 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>( 325 ConstantFoldInstruction(GEP, DL, TLI)); 326 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 327 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 328 329 // If the initializer is an all-null value and we have an inbounds GEP, 330 // we already know what the result of any load from that GEP is. 331 // TODO: Handle splats. 332 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 333 SubInit = Constant::getNullValue(GEP->getResultElementType()); 334 } 335 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); 336 337 if (GEP->use_empty()) { 338 GEP->eraseFromParent(); 339 Changed = true; 340 } 341 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 342 if (MI->getRawDest() == V) { 343 MI->eraseFromParent(); 344 Changed = true; 345 } 346 347 } else if (Constant *C = dyn_cast<Constant>(U)) { 348 // If we have a chain of dead constantexprs or other things dangling from 349 // us, and if they are all dead, nuke them without remorse. 350 if (isSafeToDestroyConstant(C)) { 351 C->destroyConstant(); 352 CleanupConstantGlobalUsers(V, Init, DL, TLI); 353 return true; 354 } 355 } 356 } 357 return Changed; 358 } 359 360 /// Return true if the specified instruction is a safe user of a derived 361 /// expression from a global that we want to SROA. 362 static bool isSafeSROAElementUse(Value *V) { 363 // We might have a dead and dangling constant hanging off of here. 364 if (Constant *C = dyn_cast<Constant>(V)) 365 return isSafeToDestroyConstant(C); 366 367 Instruction *I = dyn_cast<Instruction>(V); 368 if (!I) return false; 369 370 // Loads are ok. 371 if (isa<LoadInst>(I)) return true; 372 373 // Stores *to* the pointer are ok. 374 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 375 return SI->getOperand(0) != V; 376 377 // Otherwise, it must be a GEP. 378 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 379 if (!GEPI) return false; 380 381 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 382 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 383 return false; 384 385 for (User *U : GEPI->users()) 386 if (!isSafeSROAElementUse(U)) 387 return false; 388 return true; 389 } 390 391 /// U is a direct user of the specified global value. Look at it and its uses 392 /// and decide whether it is safe to SROA this global. 393 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 394 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 395 if (!isa<GetElementPtrInst>(U) && 396 (!isa<ConstantExpr>(U) || 397 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 398 return false; 399 400 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 401 // don't like < 3 operand CE's, and we don't like non-constant integer 402 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 403 // value of C. 404 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 405 !cast<Constant>(U->getOperand(1))->isNullValue() || 406 !isa<ConstantInt>(U->getOperand(2))) 407 return false; 408 409 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 410 ++GEPI; // Skip over the pointer index. 411 412 // If this is a use of an array allocation, do a bit more checking for sanity. 413 if (GEPI.isSequential()) { 414 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 415 416 // Check to make sure that index falls within the array. If not, 417 // something funny is going on, so we won't do the optimization. 418 // 419 if (GEPI.isBoundedSequential() && 420 Idx->getZExtValue() >= GEPI.getSequentialNumElements()) 421 return false; 422 423 // We cannot scalar repl this level of the array unless any array 424 // sub-indices are in-range constants. In particular, consider: 425 // A[0][i]. We cannot know that the user isn't doing invalid things like 426 // allowing i to index an out-of-range subscript that accesses A[1]. 427 // 428 // Scalar replacing *just* the outer index of the array is probably not 429 // going to be a win anyway, so just give up. 430 for (++GEPI; // Skip array index. 431 GEPI != E; 432 ++GEPI) { 433 if (GEPI.isStruct()) 434 continue; 435 436 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 437 if (!IdxVal || 438 (GEPI.isBoundedSequential() && 439 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) 440 return false; 441 } 442 } 443 444 return llvm::all_of(U->users(), 445 [](User *UU) { return isSafeSROAElementUse(UU); }); 446 } 447 448 /// Look at all uses of the global and decide whether it is safe for us to 449 /// perform this transformation. 450 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 451 for (User *U : GV->users()) 452 if (!IsUserOfGlobalSafeForSRA(U, GV)) 453 return false; 454 455 return true; 456 } 457 458 /// Copy over the debug info for a variable to its SRA replacements. 459 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV, 460 uint64_t FragmentOffsetInBits, 461 uint64_t FragmentSizeInBits, 462 unsigned NumElements) { 463 SmallVector<DIGlobalVariableExpression *, 1> GVs; 464 GV->getDebugInfo(GVs); 465 for (auto *GVE : GVs) { 466 DIVariable *Var = GVE->getVariable(); 467 DIExpression *Expr = GVE->getExpression(); 468 if (NumElements > 1) { 469 if (auto E = DIExpression::createFragmentExpression( 470 Expr, FragmentOffsetInBits, FragmentSizeInBits)) 471 Expr = *E; 472 else 473 return; 474 } 475 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr); 476 NGV->addDebugInfo(NGVE); 477 } 478 } 479 480 /// Perform scalar replacement of aggregates on the specified global variable. 481 /// This opens the door for other optimizations by exposing the behavior of the 482 /// program in a more fine-grained way. We have determined that this 483 /// transformation is safe already. We return the first global variable we 484 /// insert so that the caller can reprocess it. 485 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 486 // Make sure this global only has simple uses that we can SRA. 487 if (!GlobalUsersSafeToSRA(GV)) 488 return nullptr; 489 490 assert(GV->hasLocalLinkage()); 491 Constant *Init = GV->getInitializer(); 492 Type *Ty = Init->getType(); 493 494 std::vector<GlobalVariable *> NewGlobals; 495 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 496 497 // Get the alignment of the global, either explicit or target-specific. 498 unsigned StartAlignment = GV->getAlignment(); 499 if (StartAlignment == 0) 500 StartAlignment = DL.getABITypeAlignment(GV->getType()); 501 502 if (StructType *STy = dyn_cast<StructType>(Ty)) { 503 unsigned NumElements = STy->getNumElements(); 504 NewGlobals.reserve(NumElements); 505 const StructLayout &Layout = *DL.getStructLayout(STy); 506 for (unsigned i = 0, e = NumElements; i != e; ++i) { 507 Constant *In = Init->getAggregateElement(i); 508 assert(In && "Couldn't get element of initializer?"); 509 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 510 GlobalVariable::InternalLinkage, 511 In, GV->getName()+"."+Twine(i), 512 GV->getThreadLocalMode(), 513 GV->getType()->getAddressSpace()); 514 NGV->setExternallyInitialized(GV->isExternallyInitialized()); 515 NGV->copyAttributesFrom(GV); 516 Globals.push_back(NGV); 517 NewGlobals.push_back(NGV); 518 519 // Calculate the known alignment of the field. If the original aggregate 520 // had 256 byte alignment for example, something might depend on that: 521 // propagate info to each field. 522 uint64_t FieldOffset = Layout.getElementOffset(i); 523 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 524 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) 525 NGV->setAlignment(NewAlign); 526 527 // Copy over the debug info for the variable. 528 uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType()); 529 uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(i); 530 transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, NumElements); 531 } 532 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 533 unsigned NumElements = STy->getNumElements(); 534 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 535 return nullptr; // It's not worth it. 536 NewGlobals.reserve(NumElements); 537 auto ElTy = STy->getElementType(); 538 uint64_t EltSize = DL.getTypeAllocSize(ElTy); 539 unsigned EltAlign = DL.getABITypeAlignment(ElTy); 540 uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy); 541 for (unsigned i = 0, e = NumElements; i != e; ++i) { 542 Constant *In = Init->getAggregateElement(i); 543 assert(In && "Couldn't get element of initializer?"); 544 545 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 546 GlobalVariable::InternalLinkage, 547 In, GV->getName()+"."+Twine(i), 548 GV->getThreadLocalMode(), 549 GV->getType()->getAddressSpace()); 550 NGV->setExternallyInitialized(GV->isExternallyInitialized()); 551 NGV->copyAttributesFrom(GV); 552 Globals.push_back(NGV); 553 NewGlobals.push_back(NGV); 554 555 // Calculate the known alignment of the field. If the original aggregate 556 // had 256 byte alignment for example, something might depend on that: 557 // propagate info to each field. 558 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 559 if (NewAlign > EltAlign) 560 NGV->setAlignment(NewAlign); 561 transferSRADebugInfo(GV, NGV, FragmentSizeInBits * i, FragmentSizeInBits, 562 NumElements); 563 } 564 } 565 566 if (NewGlobals.empty()) 567 return nullptr; 568 569 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n"); 570 571 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 572 573 // Loop over all of the uses of the global, replacing the constantexpr geps, 574 // with smaller constantexpr geps or direct references. 575 while (!GV->use_empty()) { 576 User *GEP = GV->user_back(); 577 assert(((isa<ConstantExpr>(GEP) && 578 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 579 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 580 581 // Ignore the 1th operand, which has to be zero or else the program is quite 582 // broken (undefined). Get the 2nd operand, which is the structure or array 583 // index. 584 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 585 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 586 587 Value *NewPtr = NewGlobals[Val]; 588 Type *NewTy = NewGlobals[Val]->getValueType(); 589 590 // Form a shorter GEP if needed. 591 if (GEP->getNumOperands() > 3) { 592 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 593 SmallVector<Constant*, 8> Idxs; 594 Idxs.push_back(NullInt); 595 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 596 Idxs.push_back(CE->getOperand(i)); 597 NewPtr = 598 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs); 599 } else { 600 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 601 SmallVector<Value*, 8> Idxs; 602 Idxs.push_back(NullInt); 603 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 604 Idxs.push_back(GEPI->getOperand(i)); 605 NewPtr = GetElementPtrInst::Create( 606 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI); 607 } 608 } 609 GEP->replaceAllUsesWith(NewPtr); 610 611 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 612 GEPI->eraseFromParent(); 613 else 614 cast<ConstantExpr>(GEP)->destroyConstant(); 615 } 616 617 // Delete the old global, now that it is dead. 618 Globals.erase(GV); 619 ++NumSRA; 620 621 // Loop over the new globals array deleting any globals that are obviously 622 // dead. This can arise due to scalarization of a structure or an array that 623 // has elements that are dead. 624 unsigned FirstGlobal = 0; 625 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 626 if (NewGlobals[i]->use_empty()) { 627 Globals.erase(NewGlobals[i]); 628 if (FirstGlobal == i) ++FirstGlobal; 629 } 630 631 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr; 632 } 633 634 /// Return true if all users of the specified value will trap if the value is 635 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid 636 /// reprocessing them. 637 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 638 SmallPtrSetImpl<const PHINode*> &PHIs) { 639 for (const User *U : V->users()) 640 if (isa<LoadInst>(U)) { 641 // Will trap. 642 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 643 if (SI->getOperand(0) == V) { 644 //cerr << "NONTRAPPING USE: " << *U; 645 return false; // Storing the value. 646 } 647 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 648 if (CI->getCalledValue() != V) { 649 //cerr << "NONTRAPPING USE: " << *U; 650 return false; // Not calling the ptr 651 } 652 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 653 if (II->getCalledValue() != V) { 654 //cerr << "NONTRAPPING USE: " << *U; 655 return false; // Not calling the ptr 656 } 657 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 658 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 659 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 660 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 661 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 662 // If we've already seen this phi node, ignore it, it has already been 663 // checked. 664 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 665 return false; 666 } else if (isa<ICmpInst>(U) && 667 isa<ConstantPointerNull>(U->getOperand(1))) { 668 // Ignore icmp X, null 669 } else { 670 //cerr << "NONTRAPPING USE: " << *U; 671 return false; 672 } 673 674 return true; 675 } 676 677 /// Return true if all uses of any loads from GV will trap if the loaded value 678 /// is null. Note that this also permits comparisons of the loaded value 679 /// against null, as a special case. 680 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 681 for (const User *U : GV->users()) 682 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 683 SmallPtrSet<const PHINode*, 8> PHIs; 684 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 685 return false; 686 } else if (isa<StoreInst>(U)) { 687 // Ignore stores to the global. 688 } else { 689 // We don't know or understand this user, bail out. 690 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 691 return false; 692 } 693 return true; 694 } 695 696 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 697 bool Changed = false; 698 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) { 699 Instruction *I = cast<Instruction>(*UI++); 700 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 701 LI->setOperand(0, NewV); 702 Changed = true; 703 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 704 if (SI->getOperand(1) == V) { 705 SI->setOperand(1, NewV); 706 Changed = true; 707 } 708 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 709 CallSite CS(I); 710 if (CS.getCalledValue() == V) { 711 // Calling through the pointer! Turn into a direct call, but be careful 712 // that the pointer is not also being passed as an argument. 713 CS.setCalledFunction(NewV); 714 Changed = true; 715 bool PassedAsArg = false; 716 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 717 if (CS.getArgument(i) == V) { 718 PassedAsArg = true; 719 CS.setArgument(i, NewV); 720 } 721 722 if (PassedAsArg) { 723 // Being passed as an argument also. Be careful to not invalidate UI! 724 UI = V->user_begin(); 725 } 726 } 727 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 728 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 729 ConstantExpr::getCast(CI->getOpcode(), 730 NewV, CI->getType())); 731 if (CI->use_empty()) { 732 Changed = true; 733 CI->eraseFromParent(); 734 } 735 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 736 // Should handle GEP here. 737 SmallVector<Constant*, 8> Idxs; 738 Idxs.reserve(GEPI->getNumOperands()-1); 739 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 740 i != e; ++i) 741 if (Constant *C = dyn_cast<Constant>(*i)) 742 Idxs.push_back(C); 743 else 744 break; 745 if (Idxs.size() == GEPI->getNumOperands()-1) 746 Changed |= OptimizeAwayTrappingUsesOfValue( 747 GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs)); 748 if (GEPI->use_empty()) { 749 Changed = true; 750 GEPI->eraseFromParent(); 751 } 752 } 753 } 754 755 return Changed; 756 } 757 758 /// The specified global has only one non-null value stored into it. If there 759 /// are uses of the loaded value that would trap if the loaded value is 760 /// dynamically null, then we know that they cannot be reachable with a null 761 /// optimize away the load. 762 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 763 const DataLayout &DL, 764 TargetLibraryInfo *TLI) { 765 bool Changed = false; 766 767 // Keep track of whether we are able to remove all the uses of the global 768 // other than the store that defines it. 769 bool AllNonStoreUsesGone = true; 770 771 // Replace all uses of loads with uses of uses of the stored value. 772 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){ 773 User *GlobalUser = *GUI++; 774 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 775 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 776 // If we were able to delete all uses of the loads 777 if (LI->use_empty()) { 778 LI->eraseFromParent(); 779 Changed = true; 780 } else { 781 AllNonStoreUsesGone = false; 782 } 783 } else if (isa<StoreInst>(GlobalUser)) { 784 // Ignore the store that stores "LV" to the global. 785 assert(GlobalUser->getOperand(1) == GV && 786 "Must be storing *to* the global"); 787 } else { 788 AllNonStoreUsesGone = false; 789 790 // If we get here we could have other crazy uses that are transitively 791 // loaded. 792 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 793 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 794 isa<BitCastInst>(GlobalUser) || 795 isa<GetElementPtrInst>(GlobalUser)) && 796 "Only expect load and stores!"); 797 } 798 } 799 800 if (Changed) { 801 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV 802 << "\n"); 803 ++NumGlobUses; 804 } 805 806 // If we nuked all of the loads, then none of the stores are needed either, 807 // nor is the global. 808 if (AllNonStoreUsesGone) { 809 if (isLeakCheckerRoot(GV)) { 810 Changed |= CleanupPointerRootUsers(GV, TLI); 811 } else { 812 Changed = true; 813 CleanupConstantGlobalUsers(GV, nullptr, DL, TLI); 814 } 815 if (GV->use_empty()) { 816 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 817 Changed = true; 818 GV->eraseFromParent(); 819 ++NumDeleted; 820 } 821 } 822 return Changed; 823 } 824 825 /// Walk the use list of V, constant folding all of the instructions that are 826 /// foldable. 827 static void ConstantPropUsersOf(Value *V, const DataLayout &DL, 828 TargetLibraryInfo *TLI) { 829 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; ) 830 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 831 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 832 I->replaceAllUsesWith(NewC); 833 834 // Advance UI to the next non-I use to avoid invalidating it! 835 // Instructions could multiply use V. 836 while (UI != E && *UI == I) 837 ++UI; 838 if (isInstructionTriviallyDead(I, TLI)) 839 I->eraseFromParent(); 840 } 841 } 842 843 /// This function takes the specified global variable, and transforms the 844 /// program as if it always contained the result of the specified malloc. 845 /// Because it is always the result of the specified malloc, there is no reason 846 /// to actually DO the malloc. Instead, turn the malloc into a global, and any 847 /// loads of GV as uses of the new global. 848 static GlobalVariable * 849 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, 850 ConstantInt *NElements, const DataLayout &DL, 851 TargetLibraryInfo *TLI) { 852 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI 853 << '\n'); 854 855 Type *GlobalType; 856 if (NElements->getZExtValue() == 1) 857 GlobalType = AllocTy; 858 else 859 // If we have an array allocation, the global variable is of an array. 860 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 861 862 // Create the new global variable. The contents of the malloc'd memory is 863 // undefined, so initialize with an undef value. 864 GlobalVariable *NewGV = new GlobalVariable( 865 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage, 866 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr, 867 GV->getThreadLocalMode()); 868 869 // If there are bitcast users of the malloc (which is typical, usually we have 870 // a malloc + bitcast) then replace them with uses of the new global. Update 871 // other users to use the global as well. 872 BitCastInst *TheBC = nullptr; 873 while (!CI->use_empty()) { 874 Instruction *User = cast<Instruction>(CI->user_back()); 875 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 876 if (BCI->getType() == NewGV->getType()) { 877 BCI->replaceAllUsesWith(NewGV); 878 BCI->eraseFromParent(); 879 } else { 880 BCI->setOperand(0, NewGV); 881 } 882 } else { 883 if (!TheBC) 884 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 885 User->replaceUsesOfWith(CI, TheBC); 886 } 887 } 888 889 Constant *RepValue = NewGV; 890 if (NewGV->getType() != GV->getValueType()) 891 RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType()); 892 893 // If there is a comparison against null, we will insert a global bool to 894 // keep track of whether the global was initialized yet or not. 895 GlobalVariable *InitBool = 896 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 897 GlobalValue::InternalLinkage, 898 ConstantInt::getFalse(GV->getContext()), 899 GV->getName()+".init", GV->getThreadLocalMode()); 900 bool InitBoolUsed = false; 901 902 // Loop over all uses of GV, processing them in turn. 903 while (!GV->use_empty()) { 904 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) { 905 // The global is initialized when the store to it occurs. 906 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, 907 SI->getOrdering(), SI->getSyncScopeID(), SI); 908 SI->eraseFromParent(); 909 continue; 910 } 911 912 LoadInst *LI = cast<LoadInst>(GV->user_back()); 913 while (!LI->use_empty()) { 914 Use &LoadUse = *LI->use_begin(); 915 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser()); 916 if (!ICI) { 917 LoadUse = RepValue; 918 continue; 919 } 920 921 // Replace the cmp X, 0 with a use of the bool value. 922 // Sink the load to where the compare was, if atomic rules allow us to. 923 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, 924 LI->getOrdering(), LI->getSyncScopeID(), 925 LI->isUnordered() ? (Instruction*)ICI : LI); 926 InitBoolUsed = true; 927 switch (ICI->getPredicate()) { 928 default: llvm_unreachable("Unknown ICmp Predicate!"); 929 case ICmpInst::ICMP_ULT: 930 case ICmpInst::ICMP_SLT: // X < null -> always false 931 LV = ConstantInt::getFalse(GV->getContext()); 932 break; 933 case ICmpInst::ICMP_ULE: 934 case ICmpInst::ICMP_SLE: 935 case ICmpInst::ICMP_EQ: 936 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 937 break; 938 case ICmpInst::ICMP_NE: 939 case ICmpInst::ICMP_UGE: 940 case ICmpInst::ICMP_SGE: 941 case ICmpInst::ICMP_UGT: 942 case ICmpInst::ICMP_SGT: 943 break; // no change. 944 } 945 ICI->replaceAllUsesWith(LV); 946 ICI->eraseFromParent(); 947 } 948 LI->eraseFromParent(); 949 } 950 951 // If the initialization boolean was used, insert it, otherwise delete it. 952 if (!InitBoolUsed) { 953 while (!InitBool->use_empty()) // Delete initializations 954 cast<StoreInst>(InitBool->user_back())->eraseFromParent(); 955 delete InitBool; 956 } else 957 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool); 958 959 // Now the GV is dead, nuke it and the malloc.. 960 GV->eraseFromParent(); 961 CI->eraseFromParent(); 962 963 // To further other optimizations, loop over all users of NewGV and try to 964 // constant prop them. This will promote GEP instructions with constant 965 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 966 ConstantPropUsersOf(NewGV, DL, TLI); 967 if (RepValue != NewGV) 968 ConstantPropUsersOf(RepValue, DL, TLI); 969 970 return NewGV; 971 } 972 973 /// Scan the use-list of V checking to make sure that there are no complex uses 974 /// of V. We permit simple things like dereferencing the pointer, but not 975 /// storing through the address, unless it is to the specified global. 976 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 977 const GlobalVariable *GV, 978 SmallPtrSetImpl<const PHINode*> &PHIs) { 979 for (const User *U : V->users()) { 980 const Instruction *Inst = cast<Instruction>(U); 981 982 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 983 continue; // Fine, ignore. 984 } 985 986 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 987 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 988 return false; // Storing the pointer itself... bad. 989 continue; // Otherwise, storing through it, or storing into GV... fine. 990 } 991 992 // Must index into the array and into the struct. 993 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 994 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 995 return false; 996 continue; 997 } 998 999 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 1000 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 1001 // cycles. 1002 if (PHIs.insert(PN).second) 1003 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 1004 return false; 1005 continue; 1006 } 1007 1008 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 1009 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 1010 return false; 1011 continue; 1012 } 1013 1014 return false; 1015 } 1016 return true; 1017 } 1018 1019 /// The Alloc pointer is stored into GV somewhere. Transform all uses of the 1020 /// allocation into loads from the global and uses of the resultant pointer. 1021 /// Further, delete the store into GV. This assumes that these value pass the 1022 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1023 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1024 GlobalVariable *GV) { 1025 while (!Alloc->use_empty()) { 1026 Instruction *U = cast<Instruction>(*Alloc->user_begin()); 1027 Instruction *InsertPt = U; 1028 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1029 // If this is the store of the allocation into the global, remove it. 1030 if (SI->getOperand(1) == GV) { 1031 SI->eraseFromParent(); 1032 continue; 1033 } 1034 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1035 // Insert the load in the corresponding predecessor, not right before the 1036 // PHI. 1037 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator(); 1038 } else if (isa<BitCastInst>(U)) { 1039 // Must be bitcast between the malloc and store to initialize the global. 1040 ReplaceUsesOfMallocWithGlobal(U, GV); 1041 U->eraseFromParent(); 1042 continue; 1043 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1044 // If this is a "GEP bitcast" and the user is a store to the global, then 1045 // just process it as a bitcast. 1046 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1047 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back())) 1048 if (SI->getOperand(1) == GV) { 1049 // Must be bitcast GEP between the malloc and store to initialize 1050 // the global. 1051 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1052 GEPI->eraseFromParent(); 1053 continue; 1054 } 1055 } 1056 1057 // Insert a load from the global, and use it instead of the malloc. 1058 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1059 U->replaceUsesOfWith(Alloc, NL); 1060 } 1061 } 1062 1063 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to 1064 /// perform heap SRA on. This permits GEP's that index through the array and 1065 /// struct field, icmps of null, and PHIs. 1066 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1067 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs, 1068 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) { 1069 // We permit two users of the load: setcc comparing against the null 1070 // pointer, and a getelementptr of a specific form. 1071 for (const User *U : V->users()) { 1072 const Instruction *UI = cast<Instruction>(U); 1073 1074 // Comparison against null is ok. 1075 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) { 1076 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1077 return false; 1078 continue; 1079 } 1080 1081 // getelementptr is also ok, but only a simple form. 1082 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) { 1083 // Must index into the array and into the struct. 1084 if (GEPI->getNumOperands() < 3) 1085 return false; 1086 1087 // Otherwise the GEP is ok. 1088 continue; 1089 } 1090 1091 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1092 if (!LoadUsingPHIsPerLoad.insert(PN).second) 1093 // This means some phi nodes are dependent on each other. 1094 // Avoid infinite looping! 1095 return false; 1096 if (!LoadUsingPHIs.insert(PN).second) 1097 // If we have already analyzed this PHI, then it is safe. 1098 continue; 1099 1100 // Make sure all uses of the PHI are simple enough to transform. 1101 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1102 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1103 return false; 1104 1105 continue; 1106 } 1107 1108 // Otherwise we don't know what this is, not ok. 1109 return false; 1110 } 1111 1112 return true; 1113 } 1114 1115 /// If all users of values loaded from GV are simple enough to perform HeapSRA, 1116 /// return true. 1117 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1118 Instruction *StoredVal) { 1119 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1120 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1121 for (const User *U : GV->users()) 1122 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 1123 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1124 LoadUsingPHIsPerLoad)) 1125 return false; 1126 LoadUsingPHIsPerLoad.clear(); 1127 } 1128 1129 // If we reach here, we know that all uses of the loads and transitive uses 1130 // (through PHI nodes) are simple enough to transform. However, we don't know 1131 // that all inputs the to the PHI nodes are in the same equivalence sets. 1132 // Check to verify that all operands of the PHIs are either PHIS that can be 1133 // transformed, loads from GV, or MI itself. 1134 for (const PHINode *PN : LoadUsingPHIs) { 1135 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1136 Value *InVal = PN->getIncomingValue(op); 1137 1138 // PHI of the stored value itself is ok. 1139 if (InVal == StoredVal) continue; 1140 1141 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1142 // One of the PHIs in our set is (optimistically) ok. 1143 if (LoadUsingPHIs.count(InPN)) 1144 continue; 1145 return false; 1146 } 1147 1148 // Load from GV is ok. 1149 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1150 if (LI->getOperand(0) == GV) 1151 continue; 1152 1153 // UNDEF? NULL? 1154 1155 // Anything else is rejected. 1156 return false; 1157 } 1158 } 1159 1160 return true; 1161 } 1162 1163 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1164 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1165 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { 1166 std::vector<Value *> &FieldVals = InsertedScalarizedValues[V]; 1167 1168 if (FieldNo >= FieldVals.size()) 1169 FieldVals.resize(FieldNo+1); 1170 1171 // If we already have this value, just reuse the previously scalarized 1172 // version. 1173 if (Value *FieldVal = FieldVals[FieldNo]) 1174 return FieldVal; 1175 1176 // Depending on what instruction this is, we have several cases. 1177 Value *Result; 1178 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1179 // This is a scalarized version of the load from the global. Just create 1180 // a new Load of the scalarized global. 1181 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1182 InsertedScalarizedValues, 1183 PHIsToRewrite), 1184 LI->getName()+".f"+Twine(FieldNo), LI); 1185 } else { 1186 PHINode *PN = cast<PHINode>(V); 1187 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1188 // field. 1189 1190 PointerType *PTy = cast<PointerType>(PN->getType()); 1191 StructType *ST = cast<StructType>(PTy->getElementType()); 1192 1193 unsigned AS = PTy->getAddressSpace(); 1194 PHINode *NewPN = 1195 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS), 1196 PN->getNumIncomingValues(), 1197 PN->getName()+".f"+Twine(FieldNo), PN); 1198 Result = NewPN; 1199 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1200 } 1201 1202 return FieldVals[FieldNo] = Result; 1203 } 1204 1205 /// Given a load instruction and a value derived from the load, rewrite the 1206 /// derived value to use the HeapSRoA'd load. 1207 static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1208 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1209 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) { 1210 // If this is a comparison against null, handle it. 1211 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1212 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1213 // If we have a setcc of the loaded pointer, we can use a setcc of any 1214 // field. 1215 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1216 InsertedScalarizedValues, PHIsToRewrite); 1217 1218 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1219 Constant::getNullValue(NPtr->getType()), 1220 SCI->getName()); 1221 SCI->replaceAllUsesWith(New); 1222 SCI->eraseFromParent(); 1223 return; 1224 } 1225 1226 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1227 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1228 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1229 && "Unexpected GEPI!"); 1230 1231 // Load the pointer for this field. 1232 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1233 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1234 InsertedScalarizedValues, PHIsToRewrite); 1235 1236 // Create the new GEP idx vector. 1237 SmallVector<Value*, 8> GEPIdx; 1238 GEPIdx.push_back(GEPI->getOperand(1)); 1239 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1240 1241 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx, 1242 GEPI->getName(), GEPI); 1243 GEPI->replaceAllUsesWith(NGEPI); 1244 GEPI->eraseFromParent(); 1245 return; 1246 } 1247 1248 // Recursively transform the users of PHI nodes. This will lazily create the 1249 // PHIs that are needed for individual elements. Keep track of what PHIs we 1250 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1251 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1252 // already been seen first by another load, so its uses have already been 1253 // processed. 1254 PHINode *PN = cast<PHINode>(LoadUser); 1255 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 1256 std::vector<Value *>())).second) 1257 return; 1258 1259 // If this is the first time we've seen this PHI, recursively process all 1260 // users. 1261 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) { 1262 Instruction *User = cast<Instruction>(*UI++); 1263 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1264 } 1265 } 1266 1267 /// We are performing Heap SRoA on a global. Ptr is a value loaded from the 1268 /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead. 1269 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1270 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1271 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues, 1272 std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) { 1273 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) { 1274 Instruction *User = cast<Instruction>(*UI++); 1275 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1276 } 1277 1278 if (Load->use_empty()) { 1279 Load->eraseFromParent(); 1280 InsertedScalarizedValues.erase(Load); 1281 } 1282 } 1283 1284 /// CI is an allocation of an array of structures. Break it up into multiple 1285 /// allocations of arrays of the fields. 1286 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1287 Value *NElems, const DataLayout &DL, 1288 const TargetLibraryInfo *TLI) { 1289 LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI 1290 << '\n'); 1291 Type *MAT = getMallocAllocatedType(CI, TLI); 1292 StructType *STy = cast<StructType>(MAT); 1293 1294 // There is guaranteed to be at least one use of the malloc (storing 1295 // it into GV). If there are other uses, change them to be uses of 1296 // the global to simplify later code. This also deletes the store 1297 // into GV. 1298 ReplaceUsesOfMallocWithGlobal(CI, GV); 1299 1300 // Okay, at this point, there are no users of the malloc. Insert N 1301 // new mallocs at the same place as CI, and N globals. 1302 std::vector<Value *> FieldGlobals; 1303 std::vector<Value *> FieldMallocs; 1304 1305 SmallVector<OperandBundleDef, 1> OpBundles; 1306 CI->getOperandBundlesAsDefs(OpBundles); 1307 1308 unsigned AS = GV->getType()->getPointerAddressSpace(); 1309 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1310 Type *FieldTy = STy->getElementType(FieldNo); 1311 PointerType *PFieldTy = PointerType::get(FieldTy, AS); 1312 1313 GlobalVariable *NGV = new GlobalVariable( 1314 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage, 1315 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo), 1316 nullptr, GV->getThreadLocalMode()); 1317 NGV->copyAttributesFrom(GV); 1318 FieldGlobals.push_back(NGV); 1319 1320 unsigned TypeSize = DL.getTypeAllocSize(FieldTy); 1321 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1322 TypeSize = DL.getStructLayout(ST)->getSizeInBytes(); 1323 Type *IntPtrTy = DL.getIntPtrType(CI->getType()); 1324 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1325 ConstantInt::get(IntPtrTy, TypeSize), 1326 NElems, OpBundles, nullptr, 1327 CI->getName() + ".f" + Twine(FieldNo)); 1328 FieldMallocs.push_back(NMI); 1329 new StoreInst(NMI, NGV, CI); 1330 } 1331 1332 // The tricky aspect of this transformation is handling the case when malloc 1333 // fails. In the original code, malloc failing would set the result pointer 1334 // of malloc to null. In this case, some mallocs could succeed and others 1335 // could fail. As such, we emit code that looks like this: 1336 // F0 = malloc(field0) 1337 // F1 = malloc(field1) 1338 // F2 = malloc(field2) 1339 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1340 // if (F0) { free(F0); F0 = 0; } 1341 // if (F1) { free(F1); F1 = 0; } 1342 // if (F2) { free(F2); F2 = 0; } 1343 // } 1344 // The malloc can also fail if its argument is too large. 1345 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1346 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1347 ConstantZero, "isneg"); 1348 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1349 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1350 Constant::getNullValue(FieldMallocs[i]->getType()), 1351 "isnull"); 1352 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1353 } 1354 1355 // Split the basic block at the old malloc. 1356 BasicBlock *OrigBB = CI->getParent(); 1357 BasicBlock *ContBB = 1358 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont"); 1359 1360 // Create the block to check the first condition. Put all these blocks at the 1361 // end of the function as they are unlikely to be executed. 1362 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1363 "malloc_ret_null", 1364 OrigBB->getParent()); 1365 1366 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1367 // branch on RunningOr. 1368 OrigBB->getTerminator()->eraseFromParent(); 1369 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1370 1371 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1372 // pointer, because some may be null while others are not. 1373 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1374 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1375 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1376 Constant::getNullValue(GVVal->getType())); 1377 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1378 OrigBB->getParent()); 1379 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1380 OrigBB->getParent()); 1381 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1382 Cmp, NullPtrBlock); 1383 1384 // Fill in FreeBlock. 1385 CallInst::CreateFree(GVVal, OpBundles, BI); 1386 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1387 FreeBlock); 1388 BranchInst::Create(NextBlock, FreeBlock); 1389 1390 NullPtrBlock = NextBlock; 1391 } 1392 1393 BranchInst::Create(ContBB, NullPtrBlock); 1394 1395 // CI is no longer needed, remove it. 1396 CI->eraseFromParent(); 1397 1398 /// As we process loads, if we can't immediately update all uses of the load, 1399 /// keep track of what scalarized loads are inserted for a given load. 1400 DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues; 1401 InsertedScalarizedValues[GV] = FieldGlobals; 1402 1403 std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite; 1404 1405 // Okay, the malloc site is completely handled. All of the uses of GV are now 1406 // loads, and all uses of those loads are simple. Rewrite them to use loads 1407 // of the per-field globals instead. 1408 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) { 1409 Instruction *User = cast<Instruction>(*UI++); 1410 1411 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1412 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1413 continue; 1414 } 1415 1416 // Must be a store of null. 1417 StoreInst *SI = cast<StoreInst>(User); 1418 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1419 "Unexpected heap-sra user!"); 1420 1421 // Insert a store of null into each global. 1422 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1423 Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType(); 1424 Constant *Null = Constant::getNullValue(ValTy); 1425 new StoreInst(Null, FieldGlobals[i], SI); 1426 } 1427 // Erase the original store. 1428 SI->eraseFromParent(); 1429 } 1430 1431 // While we have PHIs that are interesting to rewrite, do it. 1432 while (!PHIsToRewrite.empty()) { 1433 PHINode *PN = PHIsToRewrite.back().first; 1434 unsigned FieldNo = PHIsToRewrite.back().second; 1435 PHIsToRewrite.pop_back(); 1436 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1437 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1438 1439 // Add all the incoming values. This can materialize more phis. 1440 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1441 Value *InVal = PN->getIncomingValue(i); 1442 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1443 PHIsToRewrite); 1444 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1445 } 1446 } 1447 1448 // Drop all inter-phi links and any loads that made it this far. 1449 for (DenseMap<Value *, std::vector<Value *>>::iterator 1450 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1451 I != E; ++I) { 1452 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1453 PN->dropAllReferences(); 1454 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1455 LI->dropAllReferences(); 1456 } 1457 1458 // Delete all the phis and loads now that inter-references are dead. 1459 for (DenseMap<Value *, std::vector<Value *>>::iterator 1460 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1461 I != E; ++I) { 1462 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1463 PN->eraseFromParent(); 1464 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1465 LI->eraseFromParent(); 1466 } 1467 1468 // The old global is now dead, remove it. 1469 GV->eraseFromParent(); 1470 1471 ++NumHeapSRA; 1472 return cast<GlobalVariable>(FieldGlobals[0]); 1473 } 1474 1475 /// This function is called when we see a pointer global variable with a single 1476 /// value stored it that is a malloc or cast of malloc. 1477 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, 1478 Type *AllocTy, 1479 AtomicOrdering Ordering, 1480 const DataLayout &DL, 1481 TargetLibraryInfo *TLI) { 1482 // If this is a malloc of an abstract type, don't touch it. 1483 if (!AllocTy->isSized()) 1484 return false; 1485 1486 // We can't optimize this global unless all uses of it are *known* to be 1487 // of the malloc value, not of the null initializer value (consider a use 1488 // that compares the global's value against zero to see if the malloc has 1489 // been reached). To do this, we check to see if all uses of the global 1490 // would trap if the global were null: this proves that they must all 1491 // happen after the malloc. 1492 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1493 return false; 1494 1495 // We can't optimize this if the malloc itself is used in a complex way, 1496 // for example, being stored into multiple globals. This allows the 1497 // malloc to be stored into the specified global, loaded icmp'd, and 1498 // GEP'd. These are all things we could transform to using the global 1499 // for. 1500 SmallPtrSet<const PHINode*, 8> PHIs; 1501 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1502 return false; 1503 1504 // If we have a global that is only initialized with a fixed size malloc, 1505 // transform the program to use global memory instead of malloc'd memory. 1506 // This eliminates dynamic allocation, avoids an indirection accessing the 1507 // data, and exposes the resultant global to further GlobalOpt. 1508 // We cannot optimize the malloc if we cannot determine malloc array size. 1509 Value *NElems = getMallocArraySize(CI, DL, TLI, true); 1510 if (!NElems) 1511 return false; 1512 1513 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1514 // Restrict this transformation to only working on small allocations 1515 // (2048 bytes currently), as we don't want to introduce a 16M global or 1516 // something. 1517 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) { 1518 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); 1519 return true; 1520 } 1521 1522 // If the allocation is an array of structures, consider transforming this 1523 // into multiple malloc'd arrays, one for each field. This is basically 1524 // SRoA for malloc'd memory. 1525 1526 if (Ordering != AtomicOrdering::NotAtomic) 1527 return false; 1528 1529 // If this is an allocation of a fixed size array of structs, analyze as a 1530 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1531 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1532 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1533 AllocTy = AT->getElementType(); 1534 1535 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1536 if (!AllocSTy) 1537 return false; 1538 1539 // This the structure has an unreasonable number of fields, leave it 1540 // alone. 1541 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1542 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1543 1544 // If this is a fixed size array, transform the Malloc to be an alloc of 1545 // structs. malloc [100 x struct],1 -> malloc struct, 100 1546 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { 1547 Type *IntPtrTy = DL.getIntPtrType(CI->getType()); 1548 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes(); 1549 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1550 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1551 SmallVector<OperandBundleDef, 1> OpBundles; 1552 CI->getOperandBundlesAsDefs(OpBundles); 1553 Instruction *Malloc = 1554 CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements, 1555 OpBundles, nullptr, CI->getName()); 1556 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1557 CI->replaceAllUsesWith(Cast); 1558 CI->eraseFromParent(); 1559 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 1560 CI = cast<CallInst>(BCI->getOperand(0)); 1561 else 1562 CI = cast<CallInst>(Malloc); 1563 } 1564 1565 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL, 1566 TLI); 1567 return true; 1568 } 1569 1570 return false; 1571 } 1572 1573 // Try to optimize globals based on the knowledge that only one value (besides 1574 // its initializer) is ever stored to the global. 1575 static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1576 AtomicOrdering Ordering, 1577 const DataLayout &DL, 1578 TargetLibraryInfo *TLI) { 1579 // Ignore no-op GEPs and bitcasts. 1580 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1581 1582 // If we are dealing with a pointer global that is initialized to null and 1583 // only has one (non-null) value stored into it, then we can optimize any 1584 // users of the loaded value (often calls and loads) that would trap if the 1585 // value was null. 1586 if (GV->getInitializer()->getType()->isPointerTy() && 1587 GV->getInitializer()->isNullValue()) { 1588 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1589 if (GV->getInitializer()->getType() != SOVC->getType()) 1590 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1591 1592 // Optimize away any trapping uses of the loaded value. 1593 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) 1594 return true; 1595 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { 1596 Type *MallocType = getMallocAllocatedType(CI, TLI); 1597 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, 1598 Ordering, DL, TLI)) 1599 return true; 1600 } 1601 } 1602 1603 return false; 1604 } 1605 1606 /// At this point, we have learned that the only two values ever stored into GV 1607 /// are its initializer and OtherVal. See if we can shrink the global into a 1608 /// boolean and select between the two values whenever it is used. This exposes 1609 /// the values to other scalar optimizations. 1610 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1611 Type *GVElType = GV->getValueType(); 1612 1613 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1614 // an FP value, pointer or vector, don't do this optimization because a select 1615 // between them is very expensive and unlikely to lead to later 1616 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1617 // where v1 and v2 both require constant pool loads, a big loss. 1618 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1619 GVElType->isFloatingPointTy() || 1620 GVElType->isPointerTy() || GVElType->isVectorTy()) 1621 return false; 1622 1623 // Walk the use list of the global seeing if all the uses are load or store. 1624 // If there is anything else, bail out. 1625 for (User *U : GV->users()) 1626 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1627 return false; 1628 1629 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n"); 1630 1631 // Create the new global, initializing it to false. 1632 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1633 false, 1634 GlobalValue::InternalLinkage, 1635 ConstantInt::getFalse(GV->getContext()), 1636 GV->getName()+".b", 1637 GV->getThreadLocalMode(), 1638 GV->getType()->getAddressSpace()); 1639 NewGV->copyAttributesFrom(GV); 1640 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV); 1641 1642 Constant *InitVal = GV->getInitializer(); 1643 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1644 "No reason to shrink to bool!"); 1645 1646 SmallVector<DIGlobalVariableExpression *, 1> GVs; 1647 GV->getDebugInfo(GVs); 1648 1649 // If initialized to zero and storing one into the global, we can use a cast 1650 // instead of a select to synthesize the desired value. 1651 bool IsOneZero = false; 1652 bool EmitOneOrZero = true; 1653 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)){ 1654 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1655 1656 if (ConstantInt *CIInit = dyn_cast<ConstantInt>(GV->getInitializer())){ 1657 uint64_t ValInit = CIInit->getZExtValue(); 1658 uint64_t ValOther = CI->getZExtValue(); 1659 uint64_t ValMinus = ValOther - ValInit; 1660 1661 for(auto *GVe : GVs){ 1662 DIGlobalVariable *DGV = GVe->getVariable(); 1663 DIExpression *E = GVe->getExpression(); 1664 1665 // It is expected that the address of global optimized variable is on 1666 // top of the stack. After optimization, value of that variable will 1667 // be ether 0 for initial value or 1 for other value. The following 1668 // expression should return constant integer value depending on the 1669 // value at global object address: 1670 // val * (ValOther - ValInit) + ValInit: 1671 // DW_OP_deref DW_OP_constu <ValMinus> 1672 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value 1673 SmallVector<uint64_t, 12> Ops = { 1674 dwarf::DW_OP_deref, dwarf::DW_OP_constu, ValMinus, 1675 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit, 1676 dwarf::DW_OP_plus}; 1677 E = DIExpression::prependOpcodes(E, Ops, DIExpression::WithStackValue); 1678 DIGlobalVariableExpression *DGVE = 1679 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E); 1680 NewGV->addDebugInfo(DGVE); 1681 } 1682 EmitOneOrZero = false; 1683 } 1684 } 1685 1686 if (EmitOneOrZero) { 1687 // FIXME: This will only emit address for debugger on which will 1688 // be written only 0 or 1. 1689 for(auto *GV : GVs) 1690 NewGV->addDebugInfo(GV); 1691 } 1692 1693 while (!GV->use_empty()) { 1694 Instruction *UI = cast<Instruction>(GV->user_back()); 1695 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1696 // Change the store into a boolean store. 1697 bool StoringOther = SI->getOperand(0) == OtherVal; 1698 // Only do this if we weren't storing a loaded value. 1699 Value *StoreVal; 1700 if (StoringOther || SI->getOperand(0) == InitVal) { 1701 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1702 StoringOther); 1703 } else { 1704 // Otherwise, we are storing a previously loaded copy. To do this, 1705 // change the copy from copying the original value to just copying the 1706 // bool. 1707 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1708 1709 // If we've already replaced the input, StoredVal will be a cast or 1710 // select instruction. If not, it will be a load of the original 1711 // global. 1712 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1713 assert(LI->getOperand(0) == GV && "Not a copy!"); 1714 // Insert a new load, to preserve the saved value. 1715 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1716 LI->getOrdering(), LI->getSyncScopeID(), LI); 1717 } else { 1718 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1719 "This is not a form that we understand!"); 1720 StoreVal = StoredVal->getOperand(0); 1721 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1722 } 1723 } 1724 new StoreInst(StoreVal, NewGV, false, 0, 1725 SI->getOrdering(), SI->getSyncScopeID(), SI); 1726 } else { 1727 // Change the load into a load of bool then a select. 1728 LoadInst *LI = cast<LoadInst>(UI); 1729 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1730 LI->getOrdering(), LI->getSyncScopeID(), LI); 1731 Value *NSI; 1732 if (IsOneZero) 1733 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1734 else 1735 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1736 NSI->takeName(LI); 1737 LI->replaceAllUsesWith(NSI); 1738 } 1739 UI->eraseFromParent(); 1740 } 1741 1742 // Retain the name of the old global variable. People who are debugging their 1743 // programs may expect these variables to be named the same. 1744 NewGV->takeName(GV); 1745 GV->eraseFromParent(); 1746 return true; 1747 } 1748 1749 static bool deleteIfDead( 1750 GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 1751 GV.removeDeadConstantUsers(); 1752 1753 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration()) 1754 return false; 1755 1756 if (const Comdat *C = GV.getComdat()) 1757 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C)) 1758 return false; 1759 1760 bool Dead; 1761 if (auto *F = dyn_cast<Function>(&GV)) 1762 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead(); 1763 else 1764 Dead = GV.use_empty(); 1765 if (!Dead) 1766 return false; 1767 1768 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n"); 1769 GV.eraseFromParent(); 1770 ++NumDeleted; 1771 return true; 1772 } 1773 1774 static bool isPointerValueDeadOnEntryToFunction( 1775 const Function *F, GlobalValue *GV, 1776 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1777 // Find all uses of GV. We expect them all to be in F, and if we can't 1778 // identify any of the uses we bail out. 1779 // 1780 // On each of these uses, identify if the memory that GV points to is 1781 // used/required/live at the start of the function. If it is not, for example 1782 // if the first thing the function does is store to the GV, the GV can 1783 // possibly be demoted. 1784 // 1785 // We don't do an exhaustive search for memory operations - simply look 1786 // through bitcasts as they're quite common and benign. 1787 const DataLayout &DL = GV->getParent()->getDataLayout(); 1788 SmallVector<LoadInst *, 4> Loads; 1789 SmallVector<StoreInst *, 4> Stores; 1790 for (auto *U : GV->users()) { 1791 if (Operator::getOpcode(U) == Instruction::BitCast) { 1792 for (auto *UU : U->users()) { 1793 if (auto *LI = dyn_cast<LoadInst>(UU)) 1794 Loads.push_back(LI); 1795 else if (auto *SI = dyn_cast<StoreInst>(UU)) 1796 Stores.push_back(SI); 1797 else 1798 return false; 1799 } 1800 continue; 1801 } 1802 1803 Instruction *I = dyn_cast<Instruction>(U); 1804 if (!I) 1805 return false; 1806 assert(I->getParent()->getParent() == F); 1807 1808 if (auto *LI = dyn_cast<LoadInst>(I)) 1809 Loads.push_back(LI); 1810 else if (auto *SI = dyn_cast<StoreInst>(I)) 1811 Stores.push_back(SI); 1812 else 1813 return false; 1814 } 1815 1816 // We have identified all uses of GV into loads and stores. Now check if all 1817 // of them are known not to depend on the value of the global at the function 1818 // entry point. We do this by ensuring that every load is dominated by at 1819 // least one store. 1820 auto &DT = LookupDomTree(*const_cast<Function *>(F)); 1821 1822 // The below check is quadratic. Check we're not going to do too many tests. 1823 // FIXME: Even though this will always have worst-case quadratic time, we 1824 // could put effort into minimizing the average time by putting stores that 1825 // have been shown to dominate at least one load at the beginning of the 1826 // Stores array, making subsequent dominance checks more likely to succeed 1827 // early. 1828 // 1829 // The threshold here is fairly large because global->local demotion is a 1830 // very powerful optimization should it fire. 1831 const unsigned Threshold = 100; 1832 if (Loads.size() * Stores.size() > Threshold) 1833 return false; 1834 1835 for (auto *L : Loads) { 1836 auto *LTy = L->getType(); 1837 if (none_of(Stores, [&](const StoreInst *S) { 1838 auto *STy = S->getValueOperand()->getType(); 1839 // The load is only dominated by the store if DomTree says so 1840 // and the number of bits loaded in L is less than or equal to 1841 // the number of bits stored in S. 1842 return DT.dominates(S, L) && 1843 DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy); 1844 })) 1845 return false; 1846 } 1847 // All loads have known dependences inside F, so the global can be localized. 1848 return true; 1849 } 1850 1851 /// C may have non-instruction users. Can all of those users be turned into 1852 /// instructions? 1853 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) { 1854 // We don't do this exhaustively. The most common pattern that we really need 1855 // to care about is a constant GEP or constant bitcast - so just looking 1856 // through one single ConstantExpr. 1857 // 1858 // The set of constants that this function returns true for must be able to be 1859 // handled by makeAllConstantUsesInstructions. 1860 for (auto *U : C->users()) { 1861 if (isa<Instruction>(U)) 1862 continue; 1863 if (!isa<ConstantExpr>(U)) 1864 // Non instruction, non-constantexpr user; cannot convert this. 1865 return false; 1866 for (auto *UU : U->users()) 1867 if (!isa<Instruction>(UU)) 1868 // A constantexpr used by another constant. We don't try and recurse any 1869 // further but just bail out at this point. 1870 return false; 1871 } 1872 1873 return true; 1874 } 1875 1876 /// C may have non-instruction users, and 1877 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the 1878 /// non-instruction users to instructions. 1879 static void makeAllConstantUsesInstructions(Constant *C) { 1880 SmallVector<ConstantExpr*,4> Users; 1881 for (auto *U : C->users()) { 1882 if (isa<ConstantExpr>(U)) 1883 Users.push_back(cast<ConstantExpr>(U)); 1884 else 1885 // We should never get here; allNonInstructionUsersCanBeMadeInstructions 1886 // should not have returned true for C. 1887 assert( 1888 isa<Instruction>(U) && 1889 "Can't transform non-constantexpr non-instruction to instruction!"); 1890 } 1891 1892 SmallVector<Value*,4> UUsers; 1893 for (auto *U : Users) { 1894 UUsers.clear(); 1895 for (auto *UU : U->users()) 1896 UUsers.push_back(UU); 1897 for (auto *UU : UUsers) { 1898 Instruction *UI = cast<Instruction>(UU); 1899 Instruction *NewU = U->getAsInstruction(); 1900 NewU->insertBefore(UI); 1901 UI->replaceUsesOfWith(U, NewU); 1902 } 1903 // We've replaced all the uses, so destroy the constant. (destroyConstant 1904 // will update value handles and metadata.) 1905 U->destroyConstant(); 1906 } 1907 } 1908 1909 /// Analyze the specified global variable and optimize 1910 /// it if possible. If we make a change, return true. 1911 static bool processInternalGlobal( 1912 GlobalVariable *GV, const GlobalStatus &GS, TargetLibraryInfo *TLI, 1913 function_ref<DominatorTree &(Function &)> LookupDomTree) { 1914 auto &DL = GV->getParent()->getDataLayout(); 1915 // If this is a first class global and has only one accessing function and 1916 // this function is non-recursive, we replace the global with a local alloca 1917 // in this function. 1918 // 1919 // NOTE: It doesn't make sense to promote non-single-value types since we 1920 // are just replacing static memory to stack memory. 1921 // 1922 // If the global is in different address space, don't bring it to stack. 1923 if (!GS.HasMultipleAccessingFunctions && 1924 GS.AccessingFunction && 1925 GV->getValueType()->isSingleValueType() && 1926 GV->getType()->getAddressSpace() == 0 && 1927 !GV->isExternallyInitialized() && 1928 allNonInstructionUsersCanBeMadeInstructions(GV) && 1929 GS.AccessingFunction->doesNotRecurse() && 1930 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV, 1931 LookupDomTree)) { 1932 const DataLayout &DL = GV->getParent()->getDataLayout(); 1933 1934 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n"); 1935 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1936 ->getEntryBlock().begin()); 1937 Type *ElemTy = GV->getValueType(); 1938 // FIXME: Pass Global's alignment when globals have alignment 1939 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr, 1940 GV->getName(), &FirstI); 1941 if (!isa<UndefValue>(GV->getInitializer())) 1942 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1943 1944 makeAllConstantUsesInstructions(GV); 1945 1946 GV->replaceAllUsesWith(Alloca); 1947 GV->eraseFromParent(); 1948 ++NumLocalized; 1949 return true; 1950 } 1951 1952 // If the global is never loaded (but may be stored to), it is dead. 1953 // Delete it now. 1954 if (!GS.IsLoaded) { 1955 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n"); 1956 1957 bool Changed; 1958 if (isLeakCheckerRoot(GV)) { 1959 // Delete any constant stores to the global. 1960 Changed = CleanupPointerRootUsers(GV, TLI); 1961 } else { 1962 // Delete any stores we can find to the global. We may not be able to 1963 // make it completely dead though. 1964 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1965 } 1966 1967 // If the global is dead now, delete it. 1968 if (GV->use_empty()) { 1969 GV->eraseFromParent(); 1970 ++NumDeleted; 1971 Changed = true; 1972 } 1973 return Changed; 1974 1975 } 1976 if (GS.StoredType <= GlobalStatus::InitializerStored) { 1977 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 1978 GV->setConstant(true); 1979 1980 // Clean up any obviously simplifiable users now. 1981 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1982 1983 // If the global is dead now, just nuke it. 1984 if (GV->use_empty()) { 1985 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1986 << "all users and delete global!\n"); 1987 GV->eraseFromParent(); 1988 ++NumDeleted; 1989 return true; 1990 } 1991 1992 // Fall through to the next check; see if we can optimize further. 1993 ++NumMarked; 1994 } 1995 if (!GV->getInitializer()->getType()->isSingleValueType()) { 1996 const DataLayout &DL = GV->getParent()->getDataLayout(); 1997 if (SRAGlobal(GV, DL)) 1998 return true; 1999 } 2000 if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) { 2001 // If the initial value for the global was an undef value, and if only 2002 // one other value was stored into it, we can just change the 2003 // initializer to be the stored value, then delete all stores to the 2004 // global. This allows us to mark it constant. 2005 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 2006 if (isa<UndefValue>(GV->getInitializer())) { 2007 // Change the initial value here. 2008 GV->setInitializer(SOVConstant); 2009 2010 // Clean up any obviously simplifiable users now. 2011 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 2012 2013 if (GV->use_empty()) { 2014 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to " 2015 << "simplify all users and delete global!\n"); 2016 GV->eraseFromParent(); 2017 ++NumDeleted; 2018 } 2019 ++NumSubstitute; 2020 return true; 2021 } 2022 2023 // Try to optimize globals based on the knowledge that only one value 2024 // (besides its initializer) is ever stored to the global. 2025 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI)) 2026 return true; 2027 2028 // Otherwise, if the global was not a boolean, we can shrink it to be a 2029 // boolean. 2030 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { 2031 if (GS.Ordering == AtomicOrdering::NotAtomic) { 2032 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 2033 ++NumShrunkToBool; 2034 return true; 2035 } 2036 } 2037 } 2038 } 2039 2040 return false; 2041 } 2042 2043 /// Analyze the specified global variable and optimize it if possible. If we 2044 /// make a change, return true. 2045 static bool 2046 processGlobal(GlobalValue &GV, TargetLibraryInfo *TLI, 2047 function_ref<DominatorTree &(Function &)> LookupDomTree) { 2048 if (GV.getName().startswith("llvm.")) 2049 return false; 2050 2051 GlobalStatus GS; 2052 2053 if (GlobalStatus::analyzeGlobal(&GV, GS)) 2054 return false; 2055 2056 bool Changed = false; 2057 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) { 2058 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global 2059 : GlobalValue::UnnamedAddr::Local; 2060 if (NewUnnamedAddr != GV.getUnnamedAddr()) { 2061 GV.setUnnamedAddr(NewUnnamedAddr); 2062 NumUnnamed++; 2063 Changed = true; 2064 } 2065 } 2066 2067 // Do more involved optimizations if the global is internal. 2068 if (!GV.hasLocalLinkage()) 2069 return Changed; 2070 2071 auto *GVar = dyn_cast<GlobalVariable>(&GV); 2072 if (!GVar) 2073 return Changed; 2074 2075 if (GVar->isConstant() || !GVar->hasInitializer()) 2076 return Changed; 2077 2078 return processInternalGlobal(GVar, GS, TLI, LookupDomTree) || Changed; 2079 } 2080 2081 /// Walk all of the direct calls of the specified function, changing them to 2082 /// FastCC. 2083 static void ChangeCalleesToFastCall(Function *F) { 2084 for (User *U : F->users()) { 2085 if (isa<BlockAddress>(U)) 2086 continue; 2087 CallSite CS(cast<Instruction>(U)); 2088 CS.setCallingConv(CallingConv::Fast); 2089 } 2090 } 2091 2092 static AttributeList StripNest(LLVMContext &C, AttributeList Attrs) { 2093 // There can be at most one attribute set with a nest attribute. 2094 unsigned NestIndex; 2095 if (Attrs.hasAttrSomewhere(Attribute::Nest, &NestIndex)) 2096 return Attrs.removeAttribute(C, NestIndex, Attribute::Nest); 2097 return Attrs; 2098 } 2099 2100 static void RemoveNestAttribute(Function *F) { 2101 F->setAttributes(StripNest(F->getContext(), F->getAttributes())); 2102 for (User *U : F->users()) { 2103 if (isa<BlockAddress>(U)) 2104 continue; 2105 CallSite CS(cast<Instruction>(U)); 2106 CS.setAttributes(StripNest(F->getContext(), CS.getAttributes())); 2107 } 2108 } 2109 2110 /// Return true if this is a calling convention that we'd like to change. The 2111 /// idea here is that we don't want to mess with the convention if the user 2112 /// explicitly requested something with performance implications like coldcc, 2113 /// GHC, or anyregcc. 2114 static bool hasChangeableCC(Function *F) { 2115 CallingConv::ID CC = F->getCallingConv(); 2116 2117 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc? 2118 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall) 2119 return false; 2120 2121 // FIXME: Change CC for the whole chain of musttail calls when possible. 2122 // 2123 // Can't change CC of the function that either has musttail calls, or is a 2124 // musttail callee itself 2125 for (User *U : F->users()) { 2126 if (isa<BlockAddress>(U)) 2127 continue; 2128 CallInst* CI = dyn_cast<CallInst>(U); 2129 if (!CI) 2130 continue; 2131 2132 if (CI->isMustTailCall()) 2133 return false; 2134 } 2135 2136 for (BasicBlock &BB : *F) 2137 if (BB.getTerminatingMustTailCall()) 2138 return false; 2139 2140 return true; 2141 } 2142 2143 /// Return true if the block containing the call site has a BlockFrequency of 2144 /// less than ColdCCRelFreq% of the entry block. 2145 static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) { 2146 const BranchProbability ColdProb(ColdCCRelFreq, 100); 2147 auto CallSiteBB = CS.getInstruction()->getParent(); 2148 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB); 2149 auto CallerEntryFreq = 2150 CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock())); 2151 return CallSiteFreq < CallerEntryFreq * ColdProb; 2152 } 2153 2154 // This function checks if the input function F is cold at all call sites. It 2155 // also looks each call site's containing function, returning false if the 2156 // caller function contains other non cold calls. The input vector AllCallsCold 2157 // contains a list of functions that only have call sites in cold blocks. 2158 static bool 2159 isValidCandidateForColdCC(Function &F, 2160 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2161 const std::vector<Function *> &AllCallsCold) { 2162 2163 if (F.user_empty()) 2164 return false; 2165 2166 for (User *U : F.users()) { 2167 if (isa<BlockAddress>(U)) 2168 continue; 2169 2170 CallSite CS(cast<Instruction>(U)); 2171 Function *CallerFunc = CS.getInstruction()->getParent()->getParent(); 2172 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc); 2173 if (!isColdCallSite(CS, CallerBFI)) 2174 return false; 2175 auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc); 2176 if (It == AllCallsCold.end()) 2177 return false; 2178 } 2179 return true; 2180 } 2181 2182 static void changeCallSitesToColdCC(Function *F) { 2183 for (User *U : F->users()) { 2184 if (isa<BlockAddress>(U)) 2185 continue; 2186 CallSite CS(cast<Instruction>(U)); 2187 CS.setCallingConv(CallingConv::Cold); 2188 } 2189 } 2190 2191 // This function iterates over all the call instructions in the input Function 2192 // and checks that all call sites are in cold blocks and are allowed to use the 2193 // coldcc calling convention. 2194 static bool 2195 hasOnlyColdCalls(Function &F, 2196 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) { 2197 for (BasicBlock &BB : F) { 2198 for (Instruction &I : BB) { 2199 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2200 CallSite CS(cast<Instruction>(CI)); 2201 // Skip over isline asm instructions since they aren't function calls. 2202 if (CI->isInlineAsm()) 2203 continue; 2204 Function *CalledFn = CI->getCalledFunction(); 2205 if (!CalledFn) 2206 return false; 2207 if (!CalledFn->hasLocalLinkage()) 2208 return false; 2209 // Skip over instrinsics since they won't remain as function calls. 2210 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic) 2211 continue; 2212 // Check if it's valid to use coldcc calling convention. 2213 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() || 2214 CalledFn->hasAddressTaken()) 2215 return false; 2216 BlockFrequencyInfo &CallerBFI = GetBFI(F); 2217 if (!isColdCallSite(CS, CallerBFI)) 2218 return false; 2219 } 2220 } 2221 } 2222 return true; 2223 } 2224 2225 static bool 2226 OptimizeFunctions(Module &M, TargetLibraryInfo *TLI, 2227 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2228 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2229 function_ref<DominatorTree &(Function &)> LookupDomTree, 2230 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2231 2232 bool Changed = false; 2233 2234 std::vector<Function *> AllCallsCold; 2235 for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) { 2236 Function *F = &*FI++; 2237 if (hasOnlyColdCalls(*F, GetBFI)) 2238 AllCallsCold.push_back(F); 2239 } 2240 2241 // Optimize functions. 2242 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 2243 Function *F = &*FI++; 2244 2245 // Don't perform global opt pass on naked functions; we don't want fast 2246 // calling conventions for naked functions. 2247 if (F->hasFnAttribute(Attribute::Naked)) 2248 continue; 2249 2250 // Functions without names cannot be referenced outside this module. 2251 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage()) 2252 F->setLinkage(GlobalValue::InternalLinkage); 2253 2254 if (deleteIfDead(*F, NotDiscardableComdats)) { 2255 Changed = true; 2256 continue; 2257 } 2258 2259 // LLVM's definition of dominance allows instructions that are cyclic 2260 // in unreachable blocks, e.g.: 2261 // %pat = select i1 %condition, @global, i16* %pat 2262 // because any instruction dominates an instruction in a block that's 2263 // not reachable from entry. 2264 // So, remove unreachable blocks from the function, because a) there's 2265 // no point in analyzing them and b) GlobalOpt should otherwise grow 2266 // some more complicated logic to break these cycles. 2267 // Removing unreachable blocks might invalidate the dominator so we 2268 // recalculate it. 2269 if (!F->isDeclaration()) { 2270 if (removeUnreachableBlocks(*F)) { 2271 auto &DT = LookupDomTree(*F); 2272 DT.recalculate(*F); 2273 Changed = true; 2274 } 2275 } 2276 2277 Changed |= processGlobal(*F, TLI, LookupDomTree); 2278 2279 if (!F->hasLocalLinkage()) 2280 continue; 2281 2282 if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) { 2283 NumInternalFunc++; 2284 TargetTransformInfo &TTI = GetTTI(*F); 2285 // Change the calling convention to coldcc if either stress testing is 2286 // enabled or the target would like to use coldcc on functions which are 2287 // cold at all call sites and the callers contain no other non coldcc 2288 // calls. 2289 if (EnableColdCCStressTest || 2290 (isValidCandidateForColdCC(*F, GetBFI, AllCallsCold) && 2291 TTI.useColdCCForColdCall(*F))) { 2292 F->setCallingConv(CallingConv::Cold); 2293 changeCallSitesToColdCC(F); 2294 Changed = true; 2295 NumColdCC++; 2296 } 2297 } 2298 2299 if (hasChangeableCC(F) && !F->isVarArg() && 2300 !F->hasAddressTaken()) { 2301 // If this function has a calling convention worth changing, is not a 2302 // varargs function, and is only called directly, promote it to use the 2303 // Fast calling convention. 2304 F->setCallingConv(CallingConv::Fast); 2305 ChangeCalleesToFastCall(F); 2306 ++NumFastCallFns; 2307 Changed = true; 2308 } 2309 2310 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 2311 !F->hasAddressTaken()) { 2312 // The function is not used by a trampoline intrinsic, so it is safe 2313 // to remove the 'nest' attribute. 2314 RemoveNestAttribute(F); 2315 ++NumNestRemoved; 2316 Changed = true; 2317 } 2318 } 2319 return Changed; 2320 } 2321 2322 static bool 2323 OptimizeGlobalVars(Module &M, TargetLibraryInfo *TLI, 2324 function_ref<DominatorTree &(Function &)> LookupDomTree, 2325 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2326 bool Changed = false; 2327 2328 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 2329 GVI != E; ) { 2330 GlobalVariable *GV = &*GVI++; 2331 // Global variables without names cannot be referenced outside this module. 2332 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage()) 2333 GV->setLinkage(GlobalValue::InternalLinkage); 2334 // Simplify the initializer. 2335 if (GV->hasInitializer()) 2336 if (auto *C = dyn_cast<Constant>(GV->getInitializer())) { 2337 auto &DL = M.getDataLayout(); 2338 Constant *New = ConstantFoldConstant(C, DL, TLI); 2339 if (New && New != C) 2340 GV->setInitializer(New); 2341 } 2342 2343 if (deleteIfDead(*GV, NotDiscardableComdats)) { 2344 Changed = true; 2345 continue; 2346 } 2347 2348 Changed |= processGlobal(*GV, TLI, LookupDomTree); 2349 } 2350 return Changed; 2351 } 2352 2353 /// Evaluate a piece of a constantexpr store into a global initializer. This 2354 /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the 2355 /// GEP operands of Addr [0, OpNo) have been stepped into. 2356 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2357 ConstantExpr *Addr, unsigned OpNo) { 2358 // Base case of the recursion. 2359 if (OpNo == Addr->getNumOperands()) { 2360 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2361 return Val; 2362 } 2363 2364 SmallVector<Constant*, 32> Elts; 2365 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2366 // Break up the constant into its elements. 2367 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2368 Elts.push_back(Init->getAggregateElement(i)); 2369 2370 // Replace the element that we are supposed to. 2371 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2372 unsigned Idx = CU->getZExtValue(); 2373 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2374 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2375 2376 // Return the modified struct. 2377 return ConstantStruct::get(STy, Elts); 2378 } 2379 2380 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2381 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2382 uint64_t NumElts = InitTy->getNumElements(); 2383 2384 // Break up the array into elements. 2385 for (uint64_t i = 0, e = NumElts; i != e; ++i) 2386 Elts.push_back(Init->getAggregateElement(i)); 2387 2388 assert(CI->getZExtValue() < NumElts); 2389 Elts[CI->getZExtValue()] = 2390 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2391 2392 if (Init->getType()->isArrayTy()) 2393 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2394 return ConstantVector::get(Elts); 2395 } 2396 2397 /// We have decided that Addr (which satisfies the predicate 2398 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2399 static void CommitValueTo(Constant *Val, Constant *Addr) { 2400 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2401 assert(GV->hasInitializer()); 2402 GV->setInitializer(Val); 2403 return; 2404 } 2405 2406 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2407 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2408 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2409 } 2410 2411 /// Given a map of address -> value, where addresses are expected to be some form 2412 /// of either a global or a constant GEP, set the initializer for the address to 2413 /// be the value. This performs mostly the same function as CommitValueTo() 2414 /// and EvaluateStoreInto() but is optimized to be more efficient for the common 2415 /// case where the set of addresses are GEPs sharing the same underlying global, 2416 /// processing the GEPs in batches rather than individually. 2417 /// 2418 /// To give an example, consider the following C++ code adapted from the clang 2419 /// regression tests: 2420 /// struct S { 2421 /// int n = 10; 2422 /// int m = 2 * n; 2423 /// S(int a) : n(a) {} 2424 /// }; 2425 /// 2426 /// template<typename T> 2427 /// struct U { 2428 /// T *r = &q; 2429 /// T q = 42; 2430 /// U *p = this; 2431 /// }; 2432 /// 2433 /// U<S> e; 2434 /// 2435 /// The global static constructor for 'e' will need to initialize 'r' and 'p' of 2436 /// the outer struct, while also initializing the inner 'q' structs 'n' and 'm' 2437 /// members. This batch algorithm will simply use general CommitValueTo() method 2438 /// to handle the complex nested S struct initialization of 'q', before 2439 /// processing the outermost members in a single batch. Using CommitValueTo() to 2440 /// handle member in the outer struct is inefficient when the struct/array is 2441 /// very large as we end up creating and destroy constant arrays for each 2442 /// initialization. 2443 /// For the above case, we expect the following IR to be generated: 2444 /// 2445 /// %struct.U = type { %struct.S*, %struct.S, %struct.U* } 2446 /// %struct.S = type { i32, i32 } 2447 /// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e, 2448 /// i64 0, i32 1), 2449 /// %struct.S { i32 42, i32 84 }, %struct.U* @e } 2450 /// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex 2451 /// constant expression, while the other two elements of @e are "simple". 2452 static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) { 2453 SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs; 2454 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs; 2455 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs; 2456 SimpleCEs.reserve(Mem.size()); 2457 2458 for (const auto &I : Mem) { 2459 if (auto *GV = dyn_cast<GlobalVariable>(I.first)) { 2460 GVs.push_back(std::make_pair(GV, I.second)); 2461 } else { 2462 ConstantExpr *GEP = cast<ConstantExpr>(I.first); 2463 // We don't handle the deeply recursive case using the batch method. 2464 if (GEP->getNumOperands() > 3) 2465 ComplexCEs.push_back(std::make_pair(GEP, I.second)); 2466 else 2467 SimpleCEs.push_back(std::make_pair(GEP, I.second)); 2468 } 2469 } 2470 2471 // The algorithm below doesn't handle cases like nested structs, so use the 2472 // slower fully general method if we have to. 2473 for (auto ComplexCE : ComplexCEs) 2474 CommitValueTo(ComplexCE.second, ComplexCE.first); 2475 2476 for (auto GVPair : GVs) { 2477 assert(GVPair.first->hasInitializer()); 2478 GVPair.first->setInitializer(GVPair.second); 2479 } 2480 2481 if (SimpleCEs.empty()) 2482 return; 2483 2484 // We cache a single global's initializer elements in the case where the 2485 // subsequent address/val pair uses the same one. This avoids throwing away and 2486 // rebuilding the constant struct/vector/array just because one element is 2487 // modified at a time. 2488 SmallVector<Constant *, 32> Elts; 2489 Elts.reserve(SimpleCEs.size()); 2490 GlobalVariable *CurrentGV = nullptr; 2491 2492 auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) { 2493 Constant *Init = GV->getInitializer(); 2494 Type *Ty = Init->getType(); 2495 if (Update) { 2496 if (CurrentGV) { 2497 assert(CurrentGV && "Expected a GV to commit to!"); 2498 Type *CurrentInitTy = CurrentGV->getInitializer()->getType(); 2499 // We have a valid cache that needs to be committed. 2500 if (StructType *STy = dyn_cast<StructType>(CurrentInitTy)) 2501 CurrentGV->setInitializer(ConstantStruct::get(STy, Elts)); 2502 else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy)) 2503 CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts)); 2504 else 2505 CurrentGV->setInitializer(ConstantVector::get(Elts)); 2506 } 2507 if (CurrentGV == GV) 2508 return; 2509 // Need to clear and set up cache for new initializer. 2510 CurrentGV = GV; 2511 Elts.clear(); 2512 unsigned NumElts; 2513 if (auto *STy = dyn_cast<StructType>(Ty)) 2514 NumElts = STy->getNumElements(); 2515 else 2516 NumElts = cast<SequentialType>(Ty)->getNumElements(); 2517 for (unsigned i = 0, e = NumElts; i != e; ++i) 2518 Elts.push_back(Init->getAggregateElement(i)); 2519 } 2520 }; 2521 2522 for (auto CEPair : SimpleCEs) { 2523 ConstantExpr *GEP = CEPair.first; 2524 Constant *Val = CEPair.second; 2525 2526 GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0)); 2527 commitAndSetupCache(GV, GV != CurrentGV); 2528 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2)); 2529 Elts[CI->getZExtValue()] = Val; 2530 } 2531 // The last initializer in the list needs to be committed, others 2532 // will be committed on a new initializer being processed. 2533 commitAndSetupCache(CurrentGV, true); 2534 } 2535 2536 /// Evaluate static constructors in the function, if we can. Return true if we 2537 /// can, false otherwise. 2538 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL, 2539 TargetLibraryInfo *TLI) { 2540 // Call the function. 2541 Evaluator Eval(DL, TLI); 2542 Constant *RetValDummy; 2543 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2544 SmallVector<Constant*, 0>()); 2545 2546 if (EvalSuccess) { 2547 ++NumCtorsEvaluated; 2548 2549 // We succeeded at evaluation: commit the result. 2550 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2551 << F->getName() << "' to " 2552 << Eval.getMutatedMemory().size() << " stores.\n"); 2553 BatchCommitValueTo(Eval.getMutatedMemory()); 2554 for (GlobalVariable *GV : Eval.getInvariants()) 2555 GV->setConstant(true); 2556 } 2557 2558 return EvalSuccess; 2559 } 2560 2561 static int compareNames(Constant *const *A, Constant *const *B) { 2562 Value *AStripped = (*A)->stripPointerCastsNoFollowAliases(); 2563 Value *BStripped = (*B)->stripPointerCastsNoFollowAliases(); 2564 return AStripped->getName().compare(BStripped->getName()); 2565 } 2566 2567 static void setUsedInitializer(GlobalVariable &V, 2568 const SmallPtrSetImpl<GlobalValue *> &Init) { 2569 if (Init.empty()) { 2570 V.eraseFromParent(); 2571 return; 2572 } 2573 2574 // Type of pointer to the array of pointers. 2575 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 2576 2577 SmallVector<Constant *, 8> UsedArray; 2578 for (GlobalValue *GV : Init) { 2579 Constant *Cast 2580 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy); 2581 UsedArray.push_back(Cast); 2582 } 2583 // Sort to get deterministic order. 2584 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 2585 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 2586 2587 Module *M = V.getParent(); 2588 V.removeFromParent(); 2589 GlobalVariable *NV = 2590 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage, 2591 ConstantArray::get(ATy, UsedArray), ""); 2592 NV->takeName(&V); 2593 NV->setSection("llvm.metadata"); 2594 delete &V; 2595 } 2596 2597 namespace { 2598 2599 /// An easy to access representation of llvm.used and llvm.compiler.used. 2600 class LLVMUsed { 2601 SmallPtrSet<GlobalValue *, 8> Used; 2602 SmallPtrSet<GlobalValue *, 8> CompilerUsed; 2603 GlobalVariable *UsedV; 2604 GlobalVariable *CompilerUsedV; 2605 2606 public: 2607 LLVMUsed(Module &M) { 2608 UsedV = collectUsedGlobalVariables(M, Used, false); 2609 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); 2610 } 2611 2612 using iterator = SmallPtrSet<GlobalValue *, 8>::iterator; 2613 using used_iterator_range = iterator_range<iterator>; 2614 2615 iterator usedBegin() { return Used.begin(); } 2616 iterator usedEnd() { return Used.end(); } 2617 2618 used_iterator_range used() { 2619 return used_iterator_range(usedBegin(), usedEnd()); 2620 } 2621 2622 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 2623 iterator compilerUsedEnd() { return CompilerUsed.end(); } 2624 2625 used_iterator_range compilerUsed() { 2626 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd()); 2627 } 2628 2629 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 2630 2631 bool compilerUsedCount(GlobalValue *GV) const { 2632 return CompilerUsed.count(GV); 2633 } 2634 2635 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 2636 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 2637 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; } 2638 2639 bool compilerUsedInsert(GlobalValue *GV) { 2640 return CompilerUsed.insert(GV).second; 2641 } 2642 2643 void syncVariablesAndSets() { 2644 if (UsedV) 2645 setUsedInitializer(*UsedV, Used); 2646 if (CompilerUsedV) 2647 setUsedInitializer(*CompilerUsedV, CompilerUsed); 2648 } 2649 }; 2650 2651 } // end anonymous namespace 2652 2653 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 2654 if (GA.use_empty()) // No use at all. 2655 return false; 2656 2657 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 2658 "We should have removed the duplicated " 2659 "element from llvm.compiler.used"); 2660 if (!GA.hasOneUse()) 2661 // Strictly more than one use. So at least one is not in llvm.used and 2662 // llvm.compiler.used. 2663 return true; 2664 2665 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 2666 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 2667 } 2668 2669 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 2670 const LLVMUsed &U) { 2671 unsigned N = 2; 2672 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 2673 "We should have removed the duplicated " 2674 "element from llvm.compiler.used"); 2675 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 2676 ++N; 2677 return V.hasNUsesOrMore(N); 2678 } 2679 2680 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 2681 if (!GA.hasLocalLinkage()) 2682 return true; 2683 2684 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 2685 } 2686 2687 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U, 2688 bool &RenameTarget) { 2689 RenameTarget = false; 2690 bool Ret = false; 2691 if (hasUseOtherThanLLVMUsed(GA, U)) 2692 Ret = true; 2693 2694 // If the alias is externally visible, we may still be able to simplify it. 2695 if (!mayHaveOtherReferences(GA, U)) 2696 return Ret; 2697 2698 // If the aliasee has internal linkage, give it the name and linkage 2699 // of the alias, and delete the alias. This turns: 2700 // define internal ... @f(...) 2701 // @a = alias ... @f 2702 // into: 2703 // define ... @a(...) 2704 Constant *Aliasee = GA.getAliasee(); 2705 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2706 if (!Target->hasLocalLinkage()) 2707 return Ret; 2708 2709 // Do not perform the transform if multiple aliases potentially target the 2710 // aliasee. This check also ensures that it is safe to replace the section 2711 // and other attributes of the aliasee with those of the alias. 2712 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 2713 return Ret; 2714 2715 RenameTarget = true; 2716 return true; 2717 } 2718 2719 static bool 2720 OptimizeGlobalAliases(Module &M, 2721 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) { 2722 bool Changed = false; 2723 LLVMUsed Used(M); 2724 2725 for (GlobalValue *GV : Used.used()) 2726 Used.compilerUsedErase(GV); 2727 2728 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2729 I != E;) { 2730 GlobalAlias *J = &*I++; 2731 2732 // Aliases without names cannot be referenced outside this module. 2733 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage()) 2734 J->setLinkage(GlobalValue::InternalLinkage); 2735 2736 if (deleteIfDead(*J, NotDiscardableComdats)) { 2737 Changed = true; 2738 continue; 2739 } 2740 2741 // If the alias can change at link time, nothing can be done - bail out. 2742 if (J->isInterposable()) 2743 continue; 2744 2745 Constant *Aliasee = J->getAliasee(); 2746 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts()); 2747 // We can't trivially replace the alias with the aliasee if the aliasee is 2748 // non-trivial in some way. 2749 // TODO: Try to handle non-zero GEPs of local aliasees. 2750 if (!Target) 2751 continue; 2752 Target->removeDeadConstantUsers(); 2753 2754 // Make all users of the alias use the aliasee instead. 2755 bool RenameTarget; 2756 if (!hasUsesToReplace(*J, Used, RenameTarget)) 2757 continue; 2758 2759 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType())); 2760 ++NumAliasesResolved; 2761 Changed = true; 2762 2763 if (RenameTarget) { 2764 // Give the aliasee the name, linkage and other attributes of the alias. 2765 Target->takeName(&*J); 2766 Target->setLinkage(J->getLinkage()); 2767 Target->setDSOLocal(J->isDSOLocal()); 2768 Target->setVisibility(J->getVisibility()); 2769 Target->setDLLStorageClass(J->getDLLStorageClass()); 2770 2771 if (Used.usedErase(&*J)) 2772 Used.usedInsert(Target); 2773 2774 if (Used.compilerUsedErase(&*J)) 2775 Used.compilerUsedInsert(Target); 2776 } else if (mayHaveOtherReferences(*J, Used)) 2777 continue; 2778 2779 // Delete the alias. 2780 M.getAliasList().erase(J); 2781 ++NumAliasesRemoved; 2782 Changed = true; 2783 } 2784 2785 Used.syncVariablesAndSets(); 2786 2787 return Changed; 2788 } 2789 2790 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { 2791 LibFunc F = LibFunc_cxa_atexit; 2792 if (!TLI->has(F)) 2793 return nullptr; 2794 2795 Function *Fn = M.getFunction(TLI->getName(F)); 2796 if (!Fn) 2797 return nullptr; 2798 2799 // Make sure that the function has the correct prototype. 2800 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit) 2801 return nullptr; 2802 2803 return Fn; 2804 } 2805 2806 /// Returns whether the given function is an empty C++ destructor and can 2807 /// therefore be eliminated. 2808 /// Note that we assume that other optimization passes have already simplified 2809 /// the code so we only look for a function with a single basic block, where 2810 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and 2811 /// other side-effect free instructions. 2812 static bool cxxDtorIsEmpty(const Function &Fn, 2813 SmallPtrSet<const Function *, 8> &CalledFunctions) { 2814 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 2815 // nounwind, but that doesn't seem worth doing. 2816 if (Fn.isDeclaration()) 2817 return false; 2818 2819 if (++Fn.begin() != Fn.end()) 2820 return false; 2821 2822 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 2823 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 2824 I != E; ++I) { 2825 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2826 // Ignore debug intrinsics. 2827 if (isa<DbgInfoIntrinsic>(CI)) 2828 continue; 2829 2830 const Function *CalledFn = CI->getCalledFunction(); 2831 2832 if (!CalledFn) 2833 return false; 2834 2835 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 2836 2837 // Don't treat recursive functions as empty. 2838 if (!NewCalledFunctions.insert(CalledFn).second) 2839 return false; 2840 2841 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 2842 return false; 2843 } else if (isa<ReturnInst>(*I)) 2844 return true; // We're done. 2845 else if (I->mayHaveSideEffects()) 2846 return false; // Destructor with side effects, bail. 2847 } 2848 2849 return false; 2850 } 2851 2852 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 2853 /// Itanium C++ ABI p3.3.5: 2854 /// 2855 /// After constructing a global (or local static) object, that will require 2856 /// destruction on exit, a termination function is registered as follows: 2857 /// 2858 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 2859 /// 2860 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 2861 /// call f(p) when DSO d is unloaded, before all such termination calls 2862 /// registered before this one. It returns zero if registration is 2863 /// successful, nonzero on failure. 2864 2865 // This pass will look for calls to __cxa_atexit where the function is trivial 2866 // and remove them. 2867 bool Changed = false; 2868 2869 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end(); 2870 I != E;) { 2871 // We're only interested in calls. Theoretically, we could handle invoke 2872 // instructions as well, but neither llvm-gcc nor clang generate invokes 2873 // to __cxa_atexit. 2874 CallInst *CI = dyn_cast<CallInst>(*I++); 2875 if (!CI) 2876 continue; 2877 2878 Function *DtorFn = 2879 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 2880 if (!DtorFn) 2881 continue; 2882 2883 SmallPtrSet<const Function *, 8> CalledFunctions; 2884 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 2885 continue; 2886 2887 // Just remove the call. 2888 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 2889 CI->eraseFromParent(); 2890 2891 ++NumCXXDtorsRemoved; 2892 2893 Changed |= true; 2894 } 2895 2896 return Changed; 2897 } 2898 2899 static bool optimizeGlobalsInModule( 2900 Module &M, const DataLayout &DL, TargetLibraryInfo *TLI, 2901 function_ref<TargetTransformInfo &(Function &)> GetTTI, 2902 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2903 function_ref<DominatorTree &(Function &)> LookupDomTree) { 2904 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats; 2905 bool Changed = false; 2906 bool LocalChange = true; 2907 while (LocalChange) { 2908 LocalChange = false; 2909 2910 NotDiscardableComdats.clear(); 2911 for (const GlobalVariable &GV : M.globals()) 2912 if (const Comdat *C = GV.getComdat()) 2913 if (!GV.isDiscardableIfUnused() || !GV.use_empty()) 2914 NotDiscardableComdats.insert(C); 2915 for (Function &F : M) 2916 if (const Comdat *C = F.getComdat()) 2917 if (!F.isDefTriviallyDead()) 2918 NotDiscardableComdats.insert(C); 2919 for (GlobalAlias &GA : M.aliases()) 2920 if (const Comdat *C = GA.getComdat()) 2921 if (!GA.isDiscardableIfUnused() || !GA.use_empty()) 2922 NotDiscardableComdats.insert(C); 2923 2924 // Delete functions that are trivially dead, ccc -> fastcc 2925 LocalChange |= OptimizeFunctions(M, TLI, GetTTI, GetBFI, LookupDomTree, 2926 NotDiscardableComdats); 2927 2928 // Optimize global_ctors list. 2929 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) { 2930 return EvaluateStaticConstructor(F, DL, TLI); 2931 }); 2932 2933 // Optimize non-address-taken globals. 2934 LocalChange |= OptimizeGlobalVars(M, TLI, LookupDomTree, 2935 NotDiscardableComdats); 2936 2937 // Resolve aliases, when possible. 2938 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats); 2939 2940 // Try to remove trivial global destructors if they are not removed 2941 // already. 2942 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); 2943 if (CXAAtExitFn) 2944 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 2945 2946 Changed |= LocalChange; 2947 } 2948 2949 // TODO: Move all global ctors functions to the end of the module for code 2950 // layout. 2951 2952 return Changed; 2953 } 2954 2955 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) { 2956 auto &DL = M.getDataLayout(); 2957 auto &TLI = AM.getResult<TargetLibraryAnalysis>(M); 2958 auto &FAM = 2959 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2960 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{ 2961 return FAM.getResult<DominatorTreeAnalysis>(F); 2962 }; 2963 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & { 2964 return FAM.getResult<TargetIRAnalysis>(F); 2965 }; 2966 2967 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & { 2968 return FAM.getResult<BlockFrequencyAnalysis>(F); 2969 }; 2970 2971 if (!optimizeGlobalsInModule(M, DL, &TLI, GetTTI, GetBFI, LookupDomTree)) 2972 return PreservedAnalyses::all(); 2973 return PreservedAnalyses::none(); 2974 } 2975 2976 namespace { 2977 2978 struct GlobalOptLegacyPass : public ModulePass { 2979 static char ID; // Pass identification, replacement for typeid 2980 2981 GlobalOptLegacyPass() : ModulePass(ID) { 2982 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry()); 2983 } 2984 2985 bool runOnModule(Module &M) override { 2986 if (skipModule(M)) 2987 return false; 2988 2989 auto &DL = M.getDataLayout(); 2990 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 2991 auto LookupDomTree = [this](Function &F) -> DominatorTree & { 2992 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); 2993 }; 2994 auto GetTTI = [this](Function &F) -> TargetTransformInfo & { 2995 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2996 }; 2997 2998 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & { 2999 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI(); 3000 }; 3001 3002 return optimizeGlobalsInModule(M, DL, TLI, GetTTI, GetBFI, LookupDomTree); 3003 } 3004 3005 void getAnalysisUsage(AnalysisUsage &AU) const override { 3006 AU.addRequired<TargetLibraryInfoWrapperPass>(); 3007 AU.addRequired<TargetTransformInfoWrapperPass>(); 3008 AU.addRequired<DominatorTreeWrapperPass>(); 3009 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 3010 } 3011 }; 3012 3013 } // end anonymous namespace 3014 3015 char GlobalOptLegacyPass::ID = 0; 3016 3017 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt", 3018 "Global Variable Optimizer", false, false) 3019 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 3020 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 3021 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 3022 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 3023 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt", 3024 "Global Variable Optimizer", false, false) 3025 3026 ModulePass *llvm::createGlobalOptimizerPass() { 3027 return new GlobalOptLegacyPass(); 3028 } 3029