1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass transforms simple global variables that never have their address 11 // taken. If obviously true, it marks read/write globals as constant, deletes 12 // variables only stored to, etc. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define DEBUG_TYPE "globalopt" 17 #include "llvm/Transforms/IPO.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/ConstantFolding.h" 24 #include "llvm/Analysis/MemoryBuiltins.h" 25 #include "llvm/IR/CallingConv.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/DerivedTypes.h" 29 #include "llvm/IR/Instructions.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/IR/Operator.h" 33 #include "llvm/Pass.h" 34 #include "llvm/Support/CallSite.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/GetElementPtrTypeIterator.h" 38 #include "llvm/Support/MathExtras.h" 39 #include "llvm/Support/ValueHandle.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetLibraryInfo.h" 42 #include "llvm/Transforms/Utils/GlobalStatus.h" 43 #include "llvm/Transforms/Utils/ModuleUtils.h" 44 #include <algorithm> 45 using namespace llvm; 46 47 STATISTIC(NumMarked , "Number of globals marked constant"); 48 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); 49 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); 50 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); 51 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); 52 STATISTIC(NumDeleted , "Number of globals deleted"); 53 STATISTIC(NumFnDeleted , "Number of functions deleted"); 54 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); 55 STATISTIC(NumLocalized , "Number of globals localized"); 56 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); 57 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); 58 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); 59 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); 60 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); 61 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); 62 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); 63 64 namespace { 65 struct GlobalOpt : public ModulePass { 66 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 67 AU.addRequired<TargetLibraryInfo>(); 68 } 69 static char ID; // Pass identification, replacement for typeid 70 GlobalOpt() : ModulePass(ID) { 71 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); 72 } 73 74 bool runOnModule(Module &M); 75 76 private: 77 GlobalVariable *FindGlobalCtors(Module &M); 78 bool OptimizeFunctions(Module &M); 79 bool OptimizeGlobalVars(Module &M); 80 bool OptimizeGlobalAliases(Module &M); 81 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); 82 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); 83 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, 84 const GlobalStatus &GS); 85 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); 86 87 const DataLayout *DL; 88 TargetLibraryInfo *TLI; 89 }; 90 } 91 92 char GlobalOpt::ID = 0; 93 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt", 94 "Global Variable Optimizer", false, false) 95 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 96 INITIALIZE_PASS_END(GlobalOpt, "globalopt", 97 "Global Variable Optimizer", false, false) 98 99 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } 100 101 /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker 102 /// as a root? If so, we might not really want to eliminate the stores to it. 103 static bool isLeakCheckerRoot(GlobalVariable *GV) { 104 // A global variable is a root if it is a pointer, or could plausibly contain 105 // a pointer. There are two challenges; one is that we could have a struct 106 // the has an inner member which is a pointer. We recurse through the type to 107 // detect these (up to a point). The other is that we may actually be a union 108 // of a pointer and another type, and so our LLVM type is an integer which 109 // gets converted into a pointer, or our type is an [i8 x #] with a pointer 110 // potentially contained here. 111 112 if (GV->hasPrivateLinkage()) 113 return false; 114 115 SmallVector<Type *, 4> Types; 116 Types.push_back(cast<PointerType>(GV->getType())->getElementType()); 117 118 unsigned Limit = 20; 119 do { 120 Type *Ty = Types.pop_back_val(); 121 switch (Ty->getTypeID()) { 122 default: break; 123 case Type::PointerTyID: return true; 124 case Type::ArrayTyID: 125 case Type::VectorTyID: { 126 SequentialType *STy = cast<SequentialType>(Ty); 127 Types.push_back(STy->getElementType()); 128 break; 129 } 130 case Type::StructTyID: { 131 StructType *STy = cast<StructType>(Ty); 132 if (STy->isOpaque()) return true; 133 for (StructType::element_iterator I = STy->element_begin(), 134 E = STy->element_end(); I != E; ++I) { 135 Type *InnerTy = *I; 136 if (isa<PointerType>(InnerTy)) return true; 137 if (isa<CompositeType>(InnerTy)) 138 Types.push_back(InnerTy); 139 } 140 break; 141 } 142 } 143 if (--Limit == 0) return true; 144 } while (!Types.empty()); 145 return false; 146 } 147 148 /// Given a value that is stored to a global but never read, determine whether 149 /// it's safe to remove the store and the chain of computation that feeds the 150 /// store. 151 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { 152 do { 153 if (isa<Constant>(V)) 154 return true; 155 if (!V->hasOneUse()) 156 return false; 157 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || 158 isa<GlobalValue>(V)) 159 return false; 160 if (isAllocationFn(V, TLI)) 161 return true; 162 163 Instruction *I = cast<Instruction>(V); 164 if (I->mayHaveSideEffects()) 165 return false; 166 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { 167 if (!GEP->hasAllConstantIndices()) 168 return false; 169 } else if (I->getNumOperands() != 1) { 170 return false; 171 } 172 173 V = I->getOperand(0); 174 } while (1); 175 } 176 177 /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users 178 /// of the global and clean up any that obviously don't assign the global a 179 /// value that isn't dynamically allocated. 180 /// 181 static bool CleanupPointerRootUsers(GlobalVariable *GV, 182 const TargetLibraryInfo *TLI) { 183 // A brief explanation of leak checkers. The goal is to find bugs where 184 // pointers are forgotten, causing an accumulating growth in memory 185 // usage over time. The common strategy for leak checkers is to whitelist the 186 // memory pointed to by globals at exit. This is popular because it also 187 // solves another problem where the main thread of a C++ program may shut down 188 // before other threads that are still expecting to use those globals. To 189 // handle that case, we expect the program may create a singleton and never 190 // destroy it. 191 192 bool Changed = false; 193 194 // If Dead[n].first is the only use of a malloc result, we can delete its 195 // chain of computation and the store to the global in Dead[n].second. 196 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; 197 198 // Constants can't be pointers to dynamically allocated memory. 199 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 200 UI != E;) { 201 User *U = *UI++; 202 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 203 Value *V = SI->getValueOperand(); 204 if (isa<Constant>(V)) { 205 Changed = true; 206 SI->eraseFromParent(); 207 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 208 if (I->hasOneUse()) 209 Dead.push_back(std::make_pair(I, SI)); 210 } 211 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { 212 if (isa<Constant>(MSI->getValue())) { 213 Changed = true; 214 MSI->eraseFromParent(); 215 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { 216 if (I->hasOneUse()) 217 Dead.push_back(std::make_pair(I, MSI)); 218 } 219 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { 220 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); 221 if (MemSrc && MemSrc->isConstant()) { 222 Changed = true; 223 MTI->eraseFromParent(); 224 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { 225 if (I->hasOneUse()) 226 Dead.push_back(std::make_pair(I, MTI)); 227 } 228 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 229 if (CE->use_empty()) { 230 CE->destroyConstant(); 231 Changed = true; 232 } 233 } else if (Constant *C = dyn_cast<Constant>(U)) { 234 if (isSafeToDestroyConstant(C)) { 235 C->destroyConstant(); 236 // This could have invalidated UI, start over from scratch. 237 Dead.clear(); 238 CleanupPointerRootUsers(GV, TLI); 239 return true; 240 } 241 } 242 } 243 244 for (int i = 0, e = Dead.size(); i != e; ++i) { 245 if (IsSafeComputationToRemove(Dead[i].first, TLI)) { 246 Dead[i].second->eraseFromParent(); 247 Instruction *I = Dead[i].first; 248 do { 249 if (isAllocationFn(I, TLI)) 250 break; 251 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); 252 if (!J) 253 break; 254 I->eraseFromParent(); 255 I = J; 256 } while (1); 257 I->eraseFromParent(); 258 } 259 } 260 261 return Changed; 262 } 263 264 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all 265 /// users of the global, cleaning up the obvious ones. This is largely just a 266 /// quick scan over the use list to clean up the easy and obvious cruft. This 267 /// returns true if it made a change. 268 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, 269 const DataLayout *DL, 270 TargetLibraryInfo *TLI) { 271 bool Changed = false; 272 // Note that we need to use a weak value handle for the worklist items. When 273 // we delete a constant array, we may also be holding pointer to one of its 274 // elements (or an element of one of its elements if we're dealing with an 275 // array of arrays) in the worklist. 276 SmallVector<WeakVH, 8> WorkList(V->use_begin(), V->use_end()); 277 while (!WorkList.empty()) { 278 Value *UV = WorkList.pop_back_val(); 279 if (!UV) 280 continue; 281 282 User *U = cast<User>(UV); 283 284 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { 285 if (Init) { 286 // Replace the load with the initializer. 287 LI->replaceAllUsesWith(Init); 288 LI->eraseFromParent(); 289 Changed = true; 290 } 291 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 292 // Store must be unreachable or storing Init into the global. 293 SI->eraseFromParent(); 294 Changed = true; 295 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { 296 if (CE->getOpcode() == Instruction::GetElementPtr) { 297 Constant *SubInit = 0; 298 if (Init) 299 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 300 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI); 301 } else if ((CE->getOpcode() == Instruction::BitCast && 302 CE->getType()->isPointerTy()) || 303 CE->getOpcode() == Instruction::AddrSpaceCast) { 304 // Pointer cast, delete any stores and memsets to the global. 305 Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI); 306 } 307 308 if (CE->use_empty()) { 309 CE->destroyConstant(); 310 Changed = true; 311 } 312 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { 313 // Do not transform "gepinst (gep constexpr (GV))" here, because forming 314 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold 315 // and will invalidate our notion of what Init is. 316 Constant *SubInit = 0; 317 if (!isa<ConstantExpr>(GEP->getOperand(0))) { 318 ConstantExpr *CE = 319 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI)); 320 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) 321 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); 322 323 // If the initializer is an all-null value and we have an inbounds GEP, 324 // we already know what the result of any load from that GEP is. 325 // TODO: Handle splats. 326 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) 327 SubInit = Constant::getNullValue(GEP->getType()->getElementType()); 328 } 329 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI); 330 331 if (GEP->use_empty()) { 332 GEP->eraseFromParent(); 333 Changed = true; 334 } 335 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv 336 if (MI->getRawDest() == V) { 337 MI->eraseFromParent(); 338 Changed = true; 339 } 340 341 } else if (Constant *C = dyn_cast<Constant>(U)) { 342 // If we have a chain of dead constantexprs or other things dangling from 343 // us, and if they are all dead, nuke them without remorse. 344 if (isSafeToDestroyConstant(C)) { 345 C->destroyConstant(); 346 CleanupConstantGlobalUsers(V, Init, DL, TLI); 347 return true; 348 } 349 } 350 } 351 return Changed; 352 } 353 354 /// isSafeSROAElementUse - Return true if the specified instruction is a safe 355 /// user of a derived expression from a global that we want to SROA. 356 static bool isSafeSROAElementUse(Value *V) { 357 // We might have a dead and dangling constant hanging off of here. 358 if (Constant *C = dyn_cast<Constant>(V)) 359 return isSafeToDestroyConstant(C); 360 361 Instruction *I = dyn_cast<Instruction>(V); 362 if (!I) return false; 363 364 // Loads are ok. 365 if (isa<LoadInst>(I)) return true; 366 367 // Stores *to* the pointer are ok. 368 if (StoreInst *SI = dyn_cast<StoreInst>(I)) 369 return SI->getOperand(0) != V; 370 371 // Otherwise, it must be a GEP. 372 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); 373 if (GEPI == 0) return false; 374 375 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || 376 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) 377 return false; 378 379 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); 380 I != E; ++I) 381 if (!isSafeSROAElementUse(*I)) 382 return false; 383 return true; 384 } 385 386 387 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. 388 /// Look at it and its uses and decide whether it is safe to SROA this global. 389 /// 390 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { 391 // The user of the global must be a GEP Inst or a ConstantExpr GEP. 392 if (!isa<GetElementPtrInst>(U) && 393 (!isa<ConstantExpr>(U) || 394 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) 395 return false; 396 397 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we 398 // don't like < 3 operand CE's, and we don't like non-constant integer 399 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some 400 // value of C. 401 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || 402 !cast<Constant>(U->getOperand(1))->isNullValue() || 403 !isa<ConstantInt>(U->getOperand(2))) 404 return false; 405 406 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); 407 ++GEPI; // Skip over the pointer index. 408 409 // If this is a use of an array allocation, do a bit more checking for sanity. 410 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { 411 uint64_t NumElements = AT->getNumElements(); 412 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); 413 414 // Check to make sure that index falls within the array. If not, 415 // something funny is going on, so we won't do the optimization. 416 // 417 if (Idx->getZExtValue() >= NumElements) 418 return false; 419 420 // We cannot scalar repl this level of the array unless any array 421 // sub-indices are in-range constants. In particular, consider: 422 // A[0][i]. We cannot know that the user isn't doing invalid things like 423 // allowing i to index an out-of-range subscript that accesses A[1]. 424 // 425 // Scalar replacing *just* the outer index of the array is probably not 426 // going to be a win anyway, so just give up. 427 for (++GEPI; // Skip array index. 428 GEPI != E; 429 ++GEPI) { 430 uint64_t NumElements; 431 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) 432 NumElements = SubArrayTy->getNumElements(); 433 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) 434 NumElements = SubVectorTy->getNumElements(); 435 else { 436 assert((*GEPI)->isStructTy() && 437 "Indexed GEP type is not array, vector, or struct!"); 438 continue; 439 } 440 441 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); 442 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) 443 return false; 444 } 445 } 446 447 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) 448 if (!isSafeSROAElementUse(*I)) 449 return false; 450 return true; 451 } 452 453 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it 454 /// is safe for us to perform this transformation. 455 /// 456 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { 457 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); 458 UI != E; ++UI) { 459 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) 460 return false; 461 } 462 return true; 463 } 464 465 466 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global 467 /// variable. This opens the door for other optimizations by exposing the 468 /// behavior of the program in a more fine-grained way. We have determined that 469 /// this transformation is safe already. We return the first global variable we 470 /// insert so that the caller can reprocess it. 471 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) { 472 // Make sure this global only has simple uses that we can SRA. 473 if (!GlobalUsersSafeToSRA(GV)) 474 return 0; 475 476 assert(GV->hasLocalLinkage() && !GV->isConstant()); 477 Constant *Init = GV->getInitializer(); 478 Type *Ty = Init->getType(); 479 480 std::vector<GlobalVariable*> NewGlobals; 481 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); 482 483 // Get the alignment of the global, either explicit or target-specific. 484 unsigned StartAlignment = GV->getAlignment(); 485 if (StartAlignment == 0) 486 StartAlignment = DL.getABITypeAlignment(GV->getType()); 487 488 if (StructType *STy = dyn_cast<StructType>(Ty)) { 489 NewGlobals.reserve(STy->getNumElements()); 490 const StructLayout &Layout = *DL.getStructLayout(STy); 491 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 492 Constant *In = Init->getAggregateElement(i); 493 assert(In && "Couldn't get element of initializer?"); 494 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, 495 GlobalVariable::InternalLinkage, 496 In, GV->getName()+"."+Twine(i), 497 GV->getThreadLocalMode(), 498 GV->getType()->getAddressSpace()); 499 Globals.insert(GV, NGV); 500 NewGlobals.push_back(NGV); 501 502 // Calculate the known alignment of the field. If the original aggregate 503 // had 256 byte alignment for example, something might depend on that: 504 // propagate info to each field. 505 uint64_t FieldOffset = Layout.getElementOffset(i); 506 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); 507 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i))) 508 NGV->setAlignment(NewAlign); 509 } 510 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { 511 unsigned NumElements = 0; 512 if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) 513 NumElements = ATy->getNumElements(); 514 else 515 NumElements = cast<VectorType>(STy)->getNumElements(); 516 517 if (NumElements > 16 && GV->hasNUsesOrMore(16)) 518 return 0; // It's not worth it. 519 NewGlobals.reserve(NumElements); 520 521 uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType()); 522 unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType()); 523 for (unsigned i = 0, e = NumElements; i != e; ++i) { 524 Constant *In = Init->getAggregateElement(i); 525 assert(In && "Couldn't get element of initializer?"); 526 527 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, 528 GlobalVariable::InternalLinkage, 529 In, GV->getName()+"."+Twine(i), 530 GV->getThreadLocalMode(), 531 GV->getType()->getAddressSpace()); 532 Globals.insert(GV, NGV); 533 NewGlobals.push_back(NGV); 534 535 // Calculate the known alignment of the field. If the original aggregate 536 // had 256 byte alignment for example, something might depend on that: 537 // propagate info to each field. 538 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); 539 if (NewAlign > EltAlign) 540 NGV->setAlignment(NewAlign); 541 } 542 } 543 544 if (NewGlobals.empty()) 545 return 0; 546 547 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); 548 549 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); 550 551 // Loop over all of the uses of the global, replacing the constantexpr geps, 552 // with smaller constantexpr geps or direct references. 553 while (!GV->use_empty()) { 554 User *GEP = GV->use_back(); 555 assert(((isa<ConstantExpr>(GEP) && 556 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| 557 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); 558 559 // Ignore the 1th operand, which has to be zero or else the program is quite 560 // broken (undefined). Get the 2nd operand, which is the structure or array 561 // index. 562 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); 563 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. 564 565 Value *NewPtr = NewGlobals[Val]; 566 567 // Form a shorter GEP if needed. 568 if (GEP->getNumOperands() > 3) { 569 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { 570 SmallVector<Constant*, 8> Idxs; 571 Idxs.push_back(NullInt); 572 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) 573 Idxs.push_back(CE->getOperand(i)); 574 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs); 575 } else { 576 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); 577 SmallVector<Value*, 8> Idxs; 578 Idxs.push_back(NullInt); 579 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) 580 Idxs.push_back(GEPI->getOperand(i)); 581 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs, 582 GEPI->getName()+"."+Twine(Val),GEPI); 583 } 584 } 585 GEP->replaceAllUsesWith(NewPtr); 586 587 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) 588 GEPI->eraseFromParent(); 589 else 590 cast<ConstantExpr>(GEP)->destroyConstant(); 591 } 592 593 // Delete the old global, now that it is dead. 594 Globals.erase(GV); 595 ++NumSRA; 596 597 // Loop over the new globals array deleting any globals that are obviously 598 // dead. This can arise due to scalarization of a structure or an array that 599 // has elements that are dead. 600 unsigned FirstGlobal = 0; 601 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) 602 if (NewGlobals[i]->use_empty()) { 603 Globals.erase(NewGlobals[i]); 604 if (FirstGlobal == i) ++FirstGlobal; 605 } 606 607 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; 608 } 609 610 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified 611 /// value will trap if the value is dynamically null. PHIs keeps track of any 612 /// phi nodes we've seen to avoid reprocessing them. 613 static bool AllUsesOfValueWillTrapIfNull(const Value *V, 614 SmallPtrSet<const PHINode*, 8> &PHIs) { 615 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 616 ++UI) { 617 const User *U = *UI; 618 619 if (isa<LoadInst>(U)) { 620 // Will trap. 621 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 622 if (SI->getOperand(0) == V) { 623 //cerr << "NONTRAPPING USE: " << *U; 624 return false; // Storing the value. 625 } 626 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { 627 if (CI->getCalledValue() != V) { 628 //cerr << "NONTRAPPING USE: " << *U; 629 return false; // Not calling the ptr 630 } 631 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { 632 if (II->getCalledValue() != V) { 633 //cerr << "NONTRAPPING USE: " << *U; 634 return false; // Not calling the ptr 635 } 636 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { 637 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; 638 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 639 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; 640 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { 641 // If we've already seen this phi node, ignore it, it has already been 642 // checked. 643 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) 644 return false; 645 } else if (isa<ICmpInst>(U) && 646 isa<ConstantPointerNull>(UI->getOperand(1))) { 647 // Ignore icmp X, null 648 } else { 649 //cerr << "NONTRAPPING USE: " << *U; 650 return false; 651 } 652 } 653 return true; 654 } 655 656 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads 657 /// from GV will trap if the loaded value is null. Note that this also permits 658 /// comparisons of the loaded value against null, as a special case. 659 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { 660 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 661 UI != E; ++UI) { 662 const User *U = *UI; 663 664 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 665 SmallPtrSet<const PHINode*, 8> PHIs; 666 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) 667 return false; 668 } else if (isa<StoreInst>(U)) { 669 // Ignore stores to the global. 670 } else { 671 // We don't know or understand this user, bail out. 672 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; 673 return false; 674 } 675 } 676 return true; 677 } 678 679 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { 680 bool Changed = false; 681 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { 682 Instruction *I = cast<Instruction>(*UI++); 683 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 684 LI->setOperand(0, NewV); 685 Changed = true; 686 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 687 if (SI->getOperand(1) == V) { 688 SI->setOperand(1, NewV); 689 Changed = true; 690 } 691 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 692 CallSite CS(I); 693 if (CS.getCalledValue() == V) { 694 // Calling through the pointer! Turn into a direct call, but be careful 695 // that the pointer is not also being passed as an argument. 696 CS.setCalledFunction(NewV); 697 Changed = true; 698 bool PassedAsArg = false; 699 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) 700 if (CS.getArgument(i) == V) { 701 PassedAsArg = true; 702 CS.setArgument(i, NewV); 703 } 704 705 if (PassedAsArg) { 706 // Being passed as an argument also. Be careful to not invalidate UI! 707 UI = V->use_begin(); 708 } 709 } 710 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { 711 Changed |= OptimizeAwayTrappingUsesOfValue(CI, 712 ConstantExpr::getCast(CI->getOpcode(), 713 NewV, CI->getType())); 714 if (CI->use_empty()) { 715 Changed = true; 716 CI->eraseFromParent(); 717 } 718 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { 719 // Should handle GEP here. 720 SmallVector<Constant*, 8> Idxs; 721 Idxs.reserve(GEPI->getNumOperands()-1); 722 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); 723 i != e; ++i) 724 if (Constant *C = dyn_cast<Constant>(*i)) 725 Idxs.push_back(C); 726 else 727 break; 728 if (Idxs.size() == GEPI->getNumOperands()-1) 729 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, 730 ConstantExpr::getGetElementPtr(NewV, Idxs)); 731 if (GEPI->use_empty()) { 732 Changed = true; 733 GEPI->eraseFromParent(); 734 } 735 } 736 } 737 738 return Changed; 739 } 740 741 742 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null 743 /// value stored into it. If there are uses of the loaded value that would trap 744 /// if the loaded value is dynamically null, then we know that they cannot be 745 /// reachable with a null optimize away the load. 746 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, 747 const DataLayout *DL, 748 TargetLibraryInfo *TLI) { 749 bool Changed = false; 750 751 // Keep track of whether we are able to remove all the uses of the global 752 // other than the store that defines it. 753 bool AllNonStoreUsesGone = true; 754 755 // Replace all uses of loads with uses of uses of the stored value. 756 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ 757 User *GlobalUser = *GUI++; 758 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { 759 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); 760 // If we were able to delete all uses of the loads 761 if (LI->use_empty()) { 762 LI->eraseFromParent(); 763 Changed = true; 764 } else { 765 AllNonStoreUsesGone = false; 766 } 767 } else if (isa<StoreInst>(GlobalUser)) { 768 // Ignore the store that stores "LV" to the global. 769 assert(GlobalUser->getOperand(1) == GV && 770 "Must be storing *to* the global"); 771 } else { 772 AllNonStoreUsesGone = false; 773 774 // If we get here we could have other crazy uses that are transitively 775 // loaded. 776 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || 777 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || 778 isa<BitCastInst>(GlobalUser) || 779 isa<GetElementPtrInst>(GlobalUser)) && 780 "Only expect load and stores!"); 781 } 782 } 783 784 if (Changed) { 785 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); 786 ++NumGlobUses; 787 } 788 789 // If we nuked all of the loads, then none of the stores are needed either, 790 // nor is the global. 791 if (AllNonStoreUsesGone) { 792 if (isLeakCheckerRoot(GV)) { 793 Changed |= CleanupPointerRootUsers(GV, TLI); 794 } else { 795 Changed = true; 796 CleanupConstantGlobalUsers(GV, 0, DL, TLI); 797 } 798 if (GV->use_empty()) { 799 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); 800 Changed = true; 801 GV->eraseFromParent(); 802 ++NumDeleted; 803 } 804 } 805 return Changed; 806 } 807 808 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the 809 /// instructions that are foldable. 810 static void ConstantPropUsersOf(Value *V, const DataLayout *DL, 811 TargetLibraryInfo *TLI) { 812 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) 813 if (Instruction *I = dyn_cast<Instruction>(*UI++)) 814 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { 815 I->replaceAllUsesWith(NewC); 816 817 // Advance UI to the next non-I use to avoid invalidating it! 818 // Instructions could multiply use V. 819 while (UI != E && *UI == I) 820 ++UI; 821 I->eraseFromParent(); 822 } 823 } 824 825 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global 826 /// variable, and transforms the program as if it always contained the result of 827 /// the specified malloc. Because it is always the result of the specified 828 /// malloc, there is no reason to actually DO the malloc. Instead, turn the 829 /// malloc into a global, and any loads of GV as uses of the new global. 830 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, 831 CallInst *CI, 832 Type *AllocTy, 833 ConstantInt *NElements, 834 const DataLayout *DL, 835 TargetLibraryInfo *TLI) { 836 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); 837 838 Type *GlobalType; 839 if (NElements->getZExtValue() == 1) 840 GlobalType = AllocTy; 841 else 842 // If we have an array allocation, the global variable is of an array. 843 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); 844 845 // Create the new global variable. The contents of the malloc'd memory is 846 // undefined, so initialize with an undef value. 847 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), 848 GlobalType, false, 849 GlobalValue::InternalLinkage, 850 UndefValue::get(GlobalType), 851 GV->getName()+".body", 852 GV, 853 GV->getThreadLocalMode()); 854 855 // If there are bitcast users of the malloc (which is typical, usually we have 856 // a malloc + bitcast) then replace them with uses of the new global. Update 857 // other users to use the global as well. 858 BitCastInst *TheBC = 0; 859 while (!CI->use_empty()) { 860 Instruction *User = cast<Instruction>(CI->use_back()); 861 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { 862 if (BCI->getType() == NewGV->getType()) { 863 BCI->replaceAllUsesWith(NewGV); 864 BCI->eraseFromParent(); 865 } else { 866 BCI->setOperand(0, NewGV); 867 } 868 } else { 869 if (TheBC == 0) 870 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); 871 User->replaceUsesOfWith(CI, TheBC); 872 } 873 } 874 875 Constant *RepValue = NewGV; 876 if (NewGV->getType() != GV->getType()->getElementType()) 877 RepValue = ConstantExpr::getBitCast(RepValue, 878 GV->getType()->getElementType()); 879 880 // If there is a comparison against null, we will insert a global bool to 881 // keep track of whether the global was initialized yet or not. 882 GlobalVariable *InitBool = 883 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, 884 GlobalValue::InternalLinkage, 885 ConstantInt::getFalse(GV->getContext()), 886 GV->getName()+".init", GV->getThreadLocalMode()); 887 bool InitBoolUsed = false; 888 889 // Loop over all uses of GV, processing them in turn. 890 while (!GV->use_empty()) { 891 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) { 892 // The global is initialized when the store to it occurs. 893 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, 894 SI->getOrdering(), SI->getSynchScope(), SI); 895 SI->eraseFromParent(); 896 continue; 897 } 898 899 LoadInst *LI = cast<LoadInst>(GV->use_back()); 900 while (!LI->use_empty()) { 901 Use &LoadUse = LI->use_begin().getUse(); 902 if (!isa<ICmpInst>(LoadUse.getUser())) { 903 LoadUse = RepValue; 904 continue; 905 } 906 907 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser()); 908 // Replace the cmp X, 0 with a use of the bool value. 909 // Sink the load to where the compare was, if atomic rules allow us to. 910 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, 911 LI->getOrdering(), LI->getSynchScope(), 912 LI->isUnordered() ? (Instruction*)ICI : LI); 913 InitBoolUsed = true; 914 switch (ICI->getPredicate()) { 915 default: llvm_unreachable("Unknown ICmp Predicate!"); 916 case ICmpInst::ICMP_ULT: 917 case ICmpInst::ICMP_SLT: // X < null -> always false 918 LV = ConstantInt::getFalse(GV->getContext()); 919 break; 920 case ICmpInst::ICMP_ULE: 921 case ICmpInst::ICMP_SLE: 922 case ICmpInst::ICMP_EQ: 923 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); 924 break; 925 case ICmpInst::ICMP_NE: 926 case ICmpInst::ICMP_UGE: 927 case ICmpInst::ICMP_SGE: 928 case ICmpInst::ICMP_UGT: 929 case ICmpInst::ICMP_SGT: 930 break; // no change. 931 } 932 ICI->replaceAllUsesWith(LV); 933 ICI->eraseFromParent(); 934 } 935 LI->eraseFromParent(); 936 } 937 938 // If the initialization boolean was used, insert it, otherwise delete it. 939 if (!InitBoolUsed) { 940 while (!InitBool->use_empty()) // Delete initializations 941 cast<StoreInst>(InitBool->use_back())->eraseFromParent(); 942 delete InitBool; 943 } else 944 GV->getParent()->getGlobalList().insert(GV, InitBool); 945 946 // Now the GV is dead, nuke it and the malloc.. 947 GV->eraseFromParent(); 948 CI->eraseFromParent(); 949 950 // To further other optimizations, loop over all users of NewGV and try to 951 // constant prop them. This will promote GEP instructions with constant 952 // indices into GEP constant-exprs, which will allow global-opt to hack on it. 953 ConstantPropUsersOf(NewGV, DL, TLI); 954 if (RepValue != NewGV) 955 ConstantPropUsersOf(RepValue, DL, TLI); 956 957 return NewGV; 958 } 959 960 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking 961 /// to make sure that there are no complex uses of V. We permit simple things 962 /// like dereferencing the pointer, but not storing through the address, unless 963 /// it is to the specified global. 964 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, 965 const GlobalVariable *GV, 966 SmallPtrSet<const PHINode*, 8> &PHIs) { 967 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); 968 UI != E; ++UI) { 969 const Instruction *Inst = cast<Instruction>(*UI); 970 971 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { 972 continue; // Fine, ignore. 973 } 974 975 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 976 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) 977 return false; // Storing the pointer itself... bad. 978 continue; // Otherwise, storing through it, or storing into GV... fine. 979 } 980 981 // Must index into the array and into the struct. 982 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { 983 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) 984 return false; 985 continue; 986 } 987 988 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { 989 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI 990 // cycles. 991 if (PHIs.insert(PN)) 992 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) 993 return false; 994 continue; 995 } 996 997 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { 998 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) 999 return false; 1000 continue; 1001 } 1002 1003 return false; 1004 } 1005 return true; 1006 } 1007 1008 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV 1009 /// somewhere. Transform all uses of the allocation into loads from the 1010 /// global and uses of the resultant pointer. Further, delete the store into 1011 /// GV. This assumes that these value pass the 1012 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. 1013 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, 1014 GlobalVariable *GV) { 1015 while (!Alloc->use_empty()) { 1016 Instruction *U = cast<Instruction>(*Alloc->use_begin()); 1017 Instruction *InsertPt = U; 1018 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { 1019 // If this is the store of the allocation into the global, remove it. 1020 if (SI->getOperand(1) == GV) { 1021 SI->eraseFromParent(); 1022 continue; 1023 } 1024 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { 1025 // Insert the load in the corresponding predecessor, not right before the 1026 // PHI. 1027 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); 1028 } else if (isa<BitCastInst>(U)) { 1029 // Must be bitcast between the malloc and store to initialize the global. 1030 ReplaceUsesOfMallocWithGlobal(U, GV); 1031 U->eraseFromParent(); 1032 continue; 1033 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1034 // If this is a "GEP bitcast" and the user is a store to the global, then 1035 // just process it as a bitcast. 1036 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) 1037 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) 1038 if (SI->getOperand(1) == GV) { 1039 // Must be bitcast GEP between the malloc and store to initialize 1040 // the global. 1041 ReplaceUsesOfMallocWithGlobal(GEPI, GV); 1042 GEPI->eraseFromParent(); 1043 continue; 1044 } 1045 } 1046 1047 // Insert a load from the global, and use it instead of the malloc. 1048 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); 1049 U->replaceUsesOfWith(Alloc, NL); 1050 } 1051 } 1052 1053 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi 1054 /// of a load) are simple enough to perform heap SRA on. This permits GEP's 1055 /// that index through the array and struct field, icmps of null, and PHIs. 1056 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, 1057 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs, 1058 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) { 1059 // We permit two users of the load: setcc comparing against the null 1060 // pointer, and a getelementptr of a specific form. 1061 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; 1062 ++UI) { 1063 const Instruction *User = cast<Instruction>(*UI); 1064 1065 // Comparison against null is ok. 1066 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { 1067 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) 1068 return false; 1069 continue; 1070 } 1071 1072 // getelementptr is also ok, but only a simple form. 1073 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { 1074 // Must index into the array and into the struct. 1075 if (GEPI->getNumOperands() < 3) 1076 return false; 1077 1078 // Otherwise the GEP is ok. 1079 continue; 1080 } 1081 1082 if (const PHINode *PN = dyn_cast<PHINode>(User)) { 1083 if (!LoadUsingPHIsPerLoad.insert(PN)) 1084 // This means some phi nodes are dependent on each other. 1085 // Avoid infinite looping! 1086 return false; 1087 if (!LoadUsingPHIs.insert(PN)) 1088 // If we have already analyzed this PHI, then it is safe. 1089 continue; 1090 1091 // Make sure all uses of the PHI are simple enough to transform. 1092 if (!LoadUsesSimpleEnoughForHeapSRA(PN, 1093 LoadUsingPHIs, LoadUsingPHIsPerLoad)) 1094 return false; 1095 1096 continue; 1097 } 1098 1099 // Otherwise we don't know what this is, not ok. 1100 return false; 1101 } 1102 1103 return true; 1104 } 1105 1106 1107 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from 1108 /// GV are simple enough to perform HeapSRA, return true. 1109 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, 1110 Instruction *StoredVal) { 1111 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; 1112 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; 1113 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); 1114 UI != E; ++UI) 1115 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) { 1116 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, 1117 LoadUsingPHIsPerLoad)) 1118 return false; 1119 LoadUsingPHIsPerLoad.clear(); 1120 } 1121 1122 // If we reach here, we know that all uses of the loads and transitive uses 1123 // (through PHI nodes) are simple enough to transform. However, we don't know 1124 // that all inputs the to the PHI nodes are in the same equivalence sets. 1125 // Check to verify that all operands of the PHIs are either PHIS that can be 1126 // transformed, loads from GV, or MI itself. 1127 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin() 1128 , E = LoadUsingPHIs.end(); I != E; ++I) { 1129 const PHINode *PN = *I; 1130 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { 1131 Value *InVal = PN->getIncomingValue(op); 1132 1133 // PHI of the stored value itself is ok. 1134 if (InVal == StoredVal) continue; 1135 1136 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { 1137 // One of the PHIs in our set is (optimistically) ok. 1138 if (LoadUsingPHIs.count(InPN)) 1139 continue; 1140 return false; 1141 } 1142 1143 // Load from GV is ok. 1144 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) 1145 if (LI->getOperand(0) == GV) 1146 continue; 1147 1148 // UNDEF? NULL? 1149 1150 // Anything else is rejected. 1151 return false; 1152 } 1153 } 1154 1155 return true; 1156 } 1157 1158 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, 1159 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1160 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1161 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; 1162 1163 if (FieldNo >= FieldVals.size()) 1164 FieldVals.resize(FieldNo+1); 1165 1166 // If we already have this value, just reuse the previously scalarized 1167 // version. 1168 if (Value *FieldVal = FieldVals[FieldNo]) 1169 return FieldVal; 1170 1171 // Depending on what instruction this is, we have several cases. 1172 Value *Result; 1173 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { 1174 // This is a scalarized version of the load from the global. Just create 1175 // a new Load of the scalarized global. 1176 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, 1177 InsertedScalarizedValues, 1178 PHIsToRewrite), 1179 LI->getName()+".f"+Twine(FieldNo), LI); 1180 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1181 // PN's type is pointer to struct. Make a new PHI of pointer to struct 1182 // field. 1183 StructType *ST = cast<StructType>(PN->getType()->getPointerElementType()); 1184 1185 PHINode *NewPN = 1186 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)), 1187 PN->getNumIncomingValues(), 1188 PN->getName()+".f"+Twine(FieldNo), PN); 1189 Result = NewPN; 1190 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); 1191 } else { 1192 llvm_unreachable("Unknown usable value"); 1193 } 1194 1195 return FieldVals[FieldNo] = Result; 1196 } 1197 1198 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from 1199 /// the load, rewrite the derived value to use the HeapSRoA'd load. 1200 static void RewriteHeapSROALoadUser(Instruction *LoadUser, 1201 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1202 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1203 // If this is a comparison against null, handle it. 1204 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { 1205 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); 1206 // If we have a setcc of the loaded pointer, we can use a setcc of any 1207 // field. 1208 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, 1209 InsertedScalarizedValues, PHIsToRewrite); 1210 1211 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, 1212 Constant::getNullValue(NPtr->getType()), 1213 SCI->getName()); 1214 SCI->replaceAllUsesWith(New); 1215 SCI->eraseFromParent(); 1216 return; 1217 } 1218 1219 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' 1220 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { 1221 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) 1222 && "Unexpected GEPI!"); 1223 1224 // Load the pointer for this field. 1225 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); 1226 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, 1227 InsertedScalarizedValues, PHIsToRewrite); 1228 1229 // Create the new GEP idx vector. 1230 SmallVector<Value*, 8> GEPIdx; 1231 GEPIdx.push_back(GEPI->getOperand(1)); 1232 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); 1233 1234 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx, 1235 GEPI->getName(), GEPI); 1236 GEPI->replaceAllUsesWith(NGEPI); 1237 GEPI->eraseFromParent(); 1238 return; 1239 } 1240 1241 // Recursively transform the users of PHI nodes. This will lazily create the 1242 // PHIs that are needed for individual elements. Keep track of what PHIs we 1243 // see in InsertedScalarizedValues so that we don't get infinite loops (very 1244 // antisocial). If the PHI is already in InsertedScalarizedValues, it has 1245 // already been seen first by another load, so its uses have already been 1246 // processed. 1247 PHINode *PN = cast<PHINode>(LoadUser); 1248 if (!InsertedScalarizedValues.insert(std::make_pair(PN, 1249 std::vector<Value*>())).second) 1250 return; 1251 1252 // If this is the first time we've seen this PHI, recursively process all 1253 // users. 1254 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { 1255 Instruction *User = cast<Instruction>(*UI++); 1256 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1257 } 1258 } 1259 1260 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr 1261 /// is a value loaded from the global. Eliminate all uses of Ptr, making them 1262 /// use FieldGlobals instead. All uses of loaded values satisfy 1263 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA. 1264 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, 1265 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, 1266 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { 1267 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); 1268 UI != E; ) { 1269 Instruction *User = cast<Instruction>(*UI++); 1270 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); 1271 } 1272 1273 if (Load->use_empty()) { 1274 Load->eraseFromParent(); 1275 InsertedScalarizedValues.erase(Load); 1276 } 1277 } 1278 1279 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break 1280 /// it up into multiple allocations of arrays of the fields. 1281 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, 1282 Value *NElems, const DataLayout *DL, 1283 const TargetLibraryInfo *TLI) { 1284 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); 1285 Type *MAT = getMallocAllocatedType(CI, TLI); 1286 StructType *STy = cast<StructType>(MAT); 1287 1288 // There is guaranteed to be at least one use of the malloc (storing 1289 // it into GV). If there are other uses, change them to be uses of 1290 // the global to simplify later code. This also deletes the store 1291 // into GV. 1292 ReplaceUsesOfMallocWithGlobal(CI, GV); 1293 1294 // Okay, at this point, there are no users of the malloc. Insert N 1295 // new mallocs at the same place as CI, and N globals. 1296 std::vector<Value*> FieldGlobals; 1297 std::vector<Value*> FieldMallocs; 1298 1299 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ 1300 Type *FieldTy = STy->getElementType(FieldNo); 1301 PointerType *PFieldTy = PointerType::getUnqual(FieldTy); 1302 1303 GlobalVariable *NGV = 1304 new GlobalVariable(*GV->getParent(), 1305 PFieldTy, false, GlobalValue::InternalLinkage, 1306 Constant::getNullValue(PFieldTy), 1307 GV->getName() + ".f" + Twine(FieldNo), GV, 1308 GV->getThreadLocalMode()); 1309 FieldGlobals.push_back(NGV); 1310 1311 unsigned TypeSize = DL->getTypeAllocSize(FieldTy); 1312 if (StructType *ST = dyn_cast<StructType>(FieldTy)) 1313 TypeSize = DL->getStructLayout(ST)->getSizeInBytes(); 1314 Type *IntPtrTy = DL->getIntPtrType(CI->getType()); 1315 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, 1316 ConstantInt::get(IntPtrTy, TypeSize), 1317 NElems, 0, 1318 CI->getName() + ".f" + Twine(FieldNo)); 1319 FieldMallocs.push_back(NMI); 1320 new StoreInst(NMI, NGV, CI); 1321 } 1322 1323 // The tricky aspect of this transformation is handling the case when malloc 1324 // fails. In the original code, malloc failing would set the result pointer 1325 // of malloc to null. In this case, some mallocs could succeed and others 1326 // could fail. As such, we emit code that looks like this: 1327 // F0 = malloc(field0) 1328 // F1 = malloc(field1) 1329 // F2 = malloc(field2) 1330 // if (F0 == 0 || F1 == 0 || F2 == 0) { 1331 // if (F0) { free(F0); F0 = 0; } 1332 // if (F1) { free(F1); F1 = 0; } 1333 // if (F2) { free(F2); F2 = 0; } 1334 // } 1335 // The malloc can also fail if its argument is too large. 1336 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); 1337 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), 1338 ConstantZero, "isneg"); 1339 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { 1340 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], 1341 Constant::getNullValue(FieldMallocs[i]->getType()), 1342 "isnull"); 1343 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); 1344 } 1345 1346 // Split the basic block at the old malloc. 1347 BasicBlock *OrigBB = CI->getParent(); 1348 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); 1349 1350 // Create the block to check the first condition. Put all these blocks at the 1351 // end of the function as they are unlikely to be executed. 1352 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), 1353 "malloc_ret_null", 1354 OrigBB->getParent()); 1355 1356 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond 1357 // branch on RunningOr. 1358 OrigBB->getTerminator()->eraseFromParent(); 1359 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); 1360 1361 // Within the NullPtrBlock, we need to emit a comparison and branch for each 1362 // pointer, because some may be null while others are not. 1363 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1364 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); 1365 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, 1366 Constant::getNullValue(GVVal->getType())); 1367 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", 1368 OrigBB->getParent()); 1369 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", 1370 OrigBB->getParent()); 1371 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, 1372 Cmp, NullPtrBlock); 1373 1374 // Fill in FreeBlock. 1375 CallInst::CreateFree(GVVal, BI); 1376 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], 1377 FreeBlock); 1378 BranchInst::Create(NextBlock, FreeBlock); 1379 1380 NullPtrBlock = NextBlock; 1381 } 1382 1383 BranchInst::Create(ContBB, NullPtrBlock); 1384 1385 // CI is no longer needed, remove it. 1386 CI->eraseFromParent(); 1387 1388 /// InsertedScalarizedLoads - As we process loads, if we can't immediately 1389 /// update all uses of the load, keep track of what scalarized loads are 1390 /// inserted for a given load. 1391 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; 1392 InsertedScalarizedValues[GV] = FieldGlobals; 1393 1394 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; 1395 1396 // Okay, the malloc site is completely handled. All of the uses of GV are now 1397 // loads, and all uses of those loads are simple. Rewrite them to use loads 1398 // of the per-field globals instead. 1399 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { 1400 Instruction *User = cast<Instruction>(*UI++); 1401 1402 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { 1403 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); 1404 continue; 1405 } 1406 1407 // Must be a store of null. 1408 StoreInst *SI = cast<StoreInst>(User); 1409 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && 1410 "Unexpected heap-sra user!"); 1411 1412 // Insert a store of null into each global. 1413 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { 1414 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); 1415 Constant *Null = Constant::getNullValue(PT->getElementType()); 1416 new StoreInst(Null, FieldGlobals[i], SI); 1417 } 1418 // Erase the original store. 1419 SI->eraseFromParent(); 1420 } 1421 1422 // While we have PHIs that are interesting to rewrite, do it. 1423 while (!PHIsToRewrite.empty()) { 1424 PHINode *PN = PHIsToRewrite.back().first; 1425 unsigned FieldNo = PHIsToRewrite.back().second; 1426 PHIsToRewrite.pop_back(); 1427 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); 1428 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); 1429 1430 // Add all the incoming values. This can materialize more phis. 1431 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1432 Value *InVal = PN->getIncomingValue(i); 1433 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, 1434 PHIsToRewrite); 1435 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); 1436 } 1437 } 1438 1439 // Drop all inter-phi links and any loads that made it this far. 1440 for (DenseMap<Value*, std::vector<Value*> >::iterator 1441 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1442 I != E; ++I) { 1443 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1444 PN->dropAllReferences(); 1445 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1446 LI->dropAllReferences(); 1447 } 1448 1449 // Delete all the phis and loads now that inter-references are dead. 1450 for (DenseMap<Value*, std::vector<Value*> >::iterator 1451 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); 1452 I != E; ++I) { 1453 if (PHINode *PN = dyn_cast<PHINode>(I->first)) 1454 PN->eraseFromParent(); 1455 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) 1456 LI->eraseFromParent(); 1457 } 1458 1459 // The old global is now dead, remove it. 1460 GV->eraseFromParent(); 1461 1462 ++NumHeapSRA; 1463 return cast<GlobalVariable>(FieldGlobals[0]); 1464 } 1465 1466 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a 1467 /// pointer global variable with a single value stored it that is a malloc or 1468 /// cast of malloc. 1469 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, 1470 CallInst *CI, 1471 Type *AllocTy, 1472 AtomicOrdering Ordering, 1473 Module::global_iterator &GVI, 1474 const DataLayout *DL, 1475 TargetLibraryInfo *TLI) { 1476 if (!DL) 1477 return false; 1478 1479 // If this is a malloc of an abstract type, don't touch it. 1480 if (!AllocTy->isSized()) 1481 return false; 1482 1483 // We can't optimize this global unless all uses of it are *known* to be 1484 // of the malloc value, not of the null initializer value (consider a use 1485 // that compares the global's value against zero to see if the malloc has 1486 // been reached). To do this, we check to see if all uses of the global 1487 // would trap if the global were null: this proves that they must all 1488 // happen after the malloc. 1489 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) 1490 return false; 1491 1492 // We can't optimize this if the malloc itself is used in a complex way, 1493 // for example, being stored into multiple globals. This allows the 1494 // malloc to be stored into the specified global, loaded icmp'd, and 1495 // GEP'd. These are all things we could transform to using the global 1496 // for. 1497 SmallPtrSet<const PHINode*, 8> PHIs; 1498 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) 1499 return false; 1500 1501 // If we have a global that is only initialized with a fixed size malloc, 1502 // transform the program to use global memory instead of malloc'd memory. 1503 // This eliminates dynamic allocation, avoids an indirection accessing the 1504 // data, and exposes the resultant global to further GlobalOpt. 1505 // We cannot optimize the malloc if we cannot determine malloc array size. 1506 Value *NElems = getMallocArraySize(CI, DL, TLI, true); 1507 if (!NElems) 1508 return false; 1509 1510 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) 1511 // Restrict this transformation to only working on small allocations 1512 // (2048 bytes currently), as we don't want to introduce a 16M global or 1513 // something. 1514 if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) { 1515 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI); 1516 return true; 1517 } 1518 1519 // If the allocation is an array of structures, consider transforming this 1520 // into multiple malloc'd arrays, one for each field. This is basically 1521 // SRoA for malloc'd memory. 1522 1523 if (Ordering != NotAtomic) 1524 return false; 1525 1526 // If this is an allocation of a fixed size array of structs, analyze as a 1527 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 1528 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) 1529 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) 1530 AllocTy = AT->getElementType(); 1531 1532 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); 1533 if (!AllocSTy) 1534 return false; 1535 1536 // This the structure has an unreasonable number of fields, leave it 1537 // alone. 1538 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && 1539 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { 1540 1541 // If this is a fixed size array, transform the Malloc to be an alloc of 1542 // structs. malloc [100 x struct],1 -> malloc struct, 100 1543 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { 1544 Type *IntPtrTy = DL->getIntPtrType(CI->getType()); 1545 unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes(); 1546 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); 1547 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); 1548 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, 1549 AllocSize, NumElements, 1550 0, CI->getName()); 1551 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); 1552 CI->replaceAllUsesWith(Cast); 1553 CI->eraseFromParent(); 1554 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) 1555 CI = cast<CallInst>(BCI->getOperand(0)); 1556 else 1557 CI = cast<CallInst>(Malloc); 1558 } 1559 1560 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), 1561 DL, TLI); 1562 return true; 1563 } 1564 1565 return false; 1566 } 1567 1568 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge 1569 // that only one value (besides its initializer) is ever stored to the global. 1570 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, 1571 AtomicOrdering Ordering, 1572 Module::global_iterator &GVI, 1573 const DataLayout *DL, 1574 TargetLibraryInfo *TLI) { 1575 // Ignore no-op GEPs and bitcasts. 1576 StoredOnceVal = StoredOnceVal->stripPointerCasts(); 1577 1578 // If we are dealing with a pointer global that is initialized to null and 1579 // only has one (non-null) value stored into it, then we can optimize any 1580 // users of the loaded value (often calls and loads) that would trap if the 1581 // value was null. 1582 if (GV->getInitializer()->getType()->isPointerTy() && 1583 GV->getInitializer()->isNullValue()) { 1584 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { 1585 if (GV->getInitializer()->getType() != SOVC->getType()) 1586 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); 1587 1588 // Optimize away any trapping uses of the loaded value. 1589 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI)) 1590 return true; 1591 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { 1592 Type *MallocType = getMallocAllocatedType(CI, TLI); 1593 if (MallocType && 1594 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, 1595 DL, TLI)) 1596 return true; 1597 } 1598 } 1599 1600 return false; 1601 } 1602 1603 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only 1604 /// two values ever stored into GV are its initializer and OtherVal. See if we 1605 /// can shrink the global into a boolean and select between the two values 1606 /// whenever it is used. This exposes the values to other scalar optimizations. 1607 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { 1608 Type *GVElType = GV->getType()->getElementType(); 1609 1610 // If GVElType is already i1, it is already shrunk. If the type of the GV is 1611 // an FP value, pointer or vector, don't do this optimization because a select 1612 // between them is very expensive and unlikely to lead to later 1613 // simplification. In these cases, we typically end up with "cond ? v1 : v2" 1614 // where v1 and v2 both require constant pool loads, a big loss. 1615 if (GVElType == Type::getInt1Ty(GV->getContext()) || 1616 GVElType->isFloatingPointTy() || 1617 GVElType->isPointerTy() || GVElType->isVectorTy()) 1618 return false; 1619 1620 // Walk the use list of the global seeing if all the uses are load or store. 1621 // If there is anything else, bail out. 1622 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){ 1623 User *U = *I; 1624 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) 1625 return false; 1626 } 1627 1628 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); 1629 1630 // Create the new global, initializing it to false. 1631 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), 1632 false, 1633 GlobalValue::InternalLinkage, 1634 ConstantInt::getFalse(GV->getContext()), 1635 GV->getName()+".b", 1636 GV->getThreadLocalMode(), 1637 GV->getType()->getAddressSpace()); 1638 GV->getParent()->getGlobalList().insert(GV, NewGV); 1639 1640 Constant *InitVal = GV->getInitializer(); 1641 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && 1642 "No reason to shrink to bool!"); 1643 1644 // If initialized to zero and storing one into the global, we can use a cast 1645 // instead of a select to synthesize the desired value. 1646 bool IsOneZero = false; 1647 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) 1648 IsOneZero = InitVal->isNullValue() && CI->isOne(); 1649 1650 while (!GV->use_empty()) { 1651 Instruction *UI = cast<Instruction>(GV->use_back()); 1652 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { 1653 // Change the store into a boolean store. 1654 bool StoringOther = SI->getOperand(0) == OtherVal; 1655 // Only do this if we weren't storing a loaded value. 1656 Value *StoreVal; 1657 if (StoringOther || SI->getOperand(0) == InitVal) { 1658 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), 1659 StoringOther); 1660 } else { 1661 // Otherwise, we are storing a previously loaded copy. To do this, 1662 // change the copy from copying the original value to just copying the 1663 // bool. 1664 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); 1665 1666 // If we've already replaced the input, StoredVal will be a cast or 1667 // select instruction. If not, it will be a load of the original 1668 // global. 1669 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { 1670 assert(LI->getOperand(0) == GV && "Not a copy!"); 1671 // Insert a new load, to preserve the saved value. 1672 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1673 LI->getOrdering(), LI->getSynchScope(), LI); 1674 } else { 1675 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && 1676 "This is not a form that we understand!"); 1677 StoreVal = StoredVal->getOperand(0); 1678 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); 1679 } 1680 } 1681 new StoreInst(StoreVal, NewGV, false, 0, 1682 SI->getOrdering(), SI->getSynchScope(), SI); 1683 } else { 1684 // Change the load into a load of bool then a select. 1685 LoadInst *LI = cast<LoadInst>(UI); 1686 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, 1687 LI->getOrdering(), LI->getSynchScope(), LI); 1688 Value *NSI; 1689 if (IsOneZero) 1690 NSI = new ZExtInst(NLI, LI->getType(), "", LI); 1691 else 1692 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); 1693 NSI->takeName(LI); 1694 LI->replaceAllUsesWith(NSI); 1695 } 1696 UI->eraseFromParent(); 1697 } 1698 1699 // Retain the name of the old global variable. People who are debugging their 1700 // programs may expect these variables to be named the same. 1701 NewGV->takeName(GV); 1702 GV->eraseFromParent(); 1703 return true; 1704 } 1705 1706 1707 /// ProcessGlobal - Analyze the specified global variable and optimize it if 1708 /// possible. If we make a change, return true. 1709 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, 1710 Module::global_iterator &GVI) { 1711 if (!GV->isDiscardableIfUnused()) 1712 return false; 1713 1714 // Do more involved optimizations if the global is internal. 1715 GV->removeDeadConstantUsers(); 1716 1717 if (GV->use_empty()) { 1718 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); 1719 GV->eraseFromParent(); 1720 ++NumDeleted; 1721 return true; 1722 } 1723 1724 if (!GV->hasLocalLinkage()) 1725 return false; 1726 1727 GlobalStatus GS; 1728 1729 if (GlobalStatus::analyzeGlobal(GV, GS)) 1730 return false; 1731 1732 if (!GS.IsCompared && !GV->hasUnnamedAddr()) { 1733 GV->setUnnamedAddr(true); 1734 NumUnnamed++; 1735 } 1736 1737 if (GV->isConstant() || !GV->hasInitializer()) 1738 return false; 1739 1740 return ProcessInternalGlobal(GV, GVI, GS); 1741 } 1742 1743 /// ProcessInternalGlobal - Analyze the specified global variable and optimize 1744 /// it if possible. If we make a change, return true. 1745 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, 1746 Module::global_iterator &GVI, 1747 const GlobalStatus &GS) { 1748 // If this is a first class global and has only one accessing function 1749 // and this function is main (which we know is not recursive), we replace 1750 // the global with a local alloca in this function. 1751 // 1752 // NOTE: It doesn't make sense to promote non-single-value types since we 1753 // are just replacing static memory to stack memory. 1754 // 1755 // If the global is in different address space, don't bring it to stack. 1756 if (!GS.HasMultipleAccessingFunctions && 1757 GS.AccessingFunction && !GS.HasNonInstructionUser && 1758 GV->getType()->getElementType()->isSingleValueType() && 1759 GS.AccessingFunction->getName() == "main" && 1760 GS.AccessingFunction->hasExternalLinkage() && 1761 GV->getType()->getAddressSpace() == 0) { 1762 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); 1763 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction 1764 ->getEntryBlock().begin()); 1765 Type *ElemTy = GV->getType()->getElementType(); 1766 // FIXME: Pass Global's alignment when globals have alignment 1767 AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); 1768 if (!isa<UndefValue>(GV->getInitializer())) 1769 new StoreInst(GV->getInitializer(), Alloca, &FirstI); 1770 1771 GV->replaceAllUsesWith(Alloca); 1772 GV->eraseFromParent(); 1773 ++NumLocalized; 1774 return true; 1775 } 1776 1777 // If the global is never loaded (but may be stored to), it is dead. 1778 // Delete it now. 1779 if (!GS.IsLoaded) { 1780 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); 1781 1782 bool Changed; 1783 if (isLeakCheckerRoot(GV)) { 1784 // Delete any constant stores to the global. 1785 Changed = CleanupPointerRootUsers(GV, TLI); 1786 } else { 1787 // Delete any stores we can find to the global. We may not be able to 1788 // make it completely dead though. 1789 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1790 } 1791 1792 // If the global is dead now, delete it. 1793 if (GV->use_empty()) { 1794 GV->eraseFromParent(); 1795 ++NumDeleted; 1796 Changed = true; 1797 } 1798 return Changed; 1799 1800 } else if (GS.StoredType <= GlobalStatus::InitializerStored) { 1801 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); 1802 GV->setConstant(true); 1803 1804 // Clean up any obviously simplifiable users now. 1805 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1806 1807 // If the global is dead now, just nuke it. 1808 if (GV->use_empty()) { 1809 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " 1810 << "all users and delete global!\n"); 1811 GV->eraseFromParent(); 1812 ++NumDeleted; 1813 } 1814 1815 ++NumMarked; 1816 return true; 1817 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { 1818 if (DataLayout *DL = getAnalysisIfAvailable<DataLayout>()) 1819 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *DL)) { 1820 GVI = FirstNewGV; // Don't skip the newly produced globals! 1821 return true; 1822 } 1823 } else if (GS.StoredType == GlobalStatus::StoredOnce) { 1824 // If the initial value for the global was an undef value, and if only 1825 // one other value was stored into it, we can just change the 1826 // initializer to be the stored value, then delete all stores to the 1827 // global. This allows us to mark it constant. 1828 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) 1829 if (isa<UndefValue>(GV->getInitializer())) { 1830 // Change the initial value here. 1831 GV->setInitializer(SOVConstant); 1832 1833 // Clean up any obviously simplifiable users now. 1834 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI); 1835 1836 if (GV->use_empty()) { 1837 DEBUG(dbgs() << " *** Substituting initializer allowed us to " 1838 << "simplify all users and delete global!\n"); 1839 GV->eraseFromParent(); 1840 ++NumDeleted; 1841 } else { 1842 GVI = GV; 1843 } 1844 ++NumSubstitute; 1845 return true; 1846 } 1847 1848 // Try to optimize globals based on the knowledge that only one value 1849 // (besides its initializer) is ever stored to the global. 1850 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI, 1851 DL, TLI)) 1852 return true; 1853 1854 // Otherwise, if the global was not a boolean, we can shrink it to be a 1855 // boolean. 1856 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { 1857 if (GS.Ordering == NotAtomic) { 1858 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { 1859 ++NumShrunkToBool; 1860 return true; 1861 } 1862 } 1863 } 1864 } 1865 1866 return false; 1867 } 1868 1869 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified 1870 /// function, changing them to FastCC. 1871 static void ChangeCalleesToFastCall(Function *F) { 1872 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1873 if (isa<BlockAddress>(*UI)) 1874 continue; 1875 CallSite User(cast<Instruction>(*UI)); 1876 User.setCallingConv(CallingConv::Fast); 1877 } 1878 } 1879 1880 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) { 1881 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { 1882 unsigned Index = Attrs.getSlotIndex(i); 1883 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest)) 1884 continue; 1885 1886 // There can be only one. 1887 return Attrs.removeAttribute(C, Index, Attribute::Nest); 1888 } 1889 1890 return Attrs; 1891 } 1892 1893 static void RemoveNestAttribute(Function *F) { 1894 F->setAttributes(StripNest(F->getContext(), F->getAttributes())); 1895 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ 1896 if (isa<BlockAddress>(*UI)) 1897 continue; 1898 CallSite User(cast<Instruction>(*UI)); 1899 User.setAttributes(StripNest(F->getContext(), User.getAttributes())); 1900 } 1901 } 1902 1903 bool GlobalOpt::OptimizeFunctions(Module &M) { 1904 bool Changed = false; 1905 // Optimize functions. 1906 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { 1907 Function *F = FI++; 1908 // Functions without names cannot be referenced outside this module. 1909 if (!F->hasName() && !F->isDeclaration()) 1910 F->setLinkage(GlobalValue::InternalLinkage); 1911 F->removeDeadConstantUsers(); 1912 if (F->isDefTriviallyDead()) { 1913 F->eraseFromParent(); 1914 Changed = true; 1915 ++NumFnDeleted; 1916 } else if (F->hasLocalLinkage()) { 1917 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && 1918 !F->hasAddressTaken()) { 1919 // If this function has C calling conventions, is not a varargs 1920 // function, and is only called directly, promote it to use the Fast 1921 // calling convention. 1922 F->setCallingConv(CallingConv::Fast); 1923 ChangeCalleesToFastCall(F); 1924 ++NumFastCallFns; 1925 Changed = true; 1926 } 1927 1928 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && 1929 !F->hasAddressTaken()) { 1930 // The function is not used by a trampoline intrinsic, so it is safe 1931 // to remove the 'nest' attribute. 1932 RemoveNestAttribute(F); 1933 ++NumNestRemoved; 1934 Changed = true; 1935 } 1936 } 1937 } 1938 return Changed; 1939 } 1940 1941 bool GlobalOpt::OptimizeGlobalVars(Module &M) { 1942 bool Changed = false; 1943 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); 1944 GVI != E; ) { 1945 GlobalVariable *GV = GVI++; 1946 // Global variables without names cannot be referenced outside this module. 1947 if (!GV->hasName() && !GV->isDeclaration()) 1948 GV->setLinkage(GlobalValue::InternalLinkage); 1949 // Simplify the initializer. 1950 if (GV->hasInitializer()) 1951 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { 1952 Constant *New = ConstantFoldConstantExpression(CE, DL, TLI); 1953 if (New && New != CE) 1954 GV->setInitializer(New); 1955 } 1956 1957 Changed |= ProcessGlobal(GV, GVI); 1958 } 1959 return Changed; 1960 } 1961 1962 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all 1963 /// initializers have an init priority of 65535. 1964 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { 1965 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); 1966 if (GV == 0) return 0; 1967 1968 // Verify that the initializer is simple enough for us to handle. We are 1969 // only allowed to optimize the initializer if it is unique. 1970 if (!GV->hasUniqueInitializer()) return 0; 1971 1972 if (isa<ConstantAggregateZero>(GV->getInitializer())) 1973 return GV; 1974 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 1975 1976 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 1977 if (isa<ConstantAggregateZero>(*i)) 1978 continue; 1979 ConstantStruct *CS = cast<ConstantStruct>(*i); 1980 if (isa<ConstantPointerNull>(CS->getOperand(1))) 1981 continue; 1982 1983 // Must have a function or null ptr. 1984 if (!isa<Function>(CS->getOperand(1))) 1985 return 0; 1986 1987 // Init priority must be standard. 1988 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0)); 1989 if (CI->getZExtValue() != 65535) 1990 return 0; 1991 } 1992 1993 return GV; 1994 } 1995 1996 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, 1997 /// return a list of the functions and null terminator as a vector. 1998 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { 1999 if (GV->getInitializer()->isNullValue()) 2000 return std::vector<Function*>(); 2001 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); 2002 std::vector<Function*> Result; 2003 Result.reserve(CA->getNumOperands()); 2004 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { 2005 ConstantStruct *CS = cast<ConstantStruct>(*i); 2006 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); 2007 } 2008 return Result; 2009 } 2010 2011 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the 2012 /// specified array, returning the new global to use. 2013 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, 2014 const std::vector<Function*> &Ctors) { 2015 // If we made a change, reassemble the initializer list. 2016 Constant *CSVals[2]; 2017 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535); 2018 CSVals[1] = 0; 2019 2020 StructType *StructTy = 2021 cast<StructType>(GCL->getType()->getElementType()->getArrayElementType()); 2022 2023 // Create the new init list. 2024 std::vector<Constant*> CAList; 2025 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { 2026 if (Ctors[i]) { 2027 CSVals[1] = Ctors[i]; 2028 } else { 2029 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), 2030 false); 2031 PointerType *PFTy = PointerType::getUnqual(FTy); 2032 CSVals[1] = Constant::getNullValue(PFTy); 2033 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 2034 0x7fffffff); 2035 } 2036 CAList.push_back(ConstantStruct::get(StructTy, CSVals)); 2037 } 2038 2039 // Create the array initializer. 2040 Constant *CA = ConstantArray::get(ArrayType::get(StructTy, 2041 CAList.size()), CAList); 2042 2043 // If we didn't change the number of elements, don't create a new GV. 2044 if (CA->getType() == GCL->getInitializer()->getType()) { 2045 GCL->setInitializer(CA); 2046 return GCL; 2047 } 2048 2049 // Create the new global and insert it next to the existing list. 2050 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(), 2051 GCL->getLinkage(), CA, "", 2052 GCL->getThreadLocalMode()); 2053 GCL->getParent()->getGlobalList().insert(GCL, NGV); 2054 NGV->takeName(GCL); 2055 2056 // Nuke the old list, replacing any uses with the new one. 2057 if (!GCL->use_empty()) { 2058 Constant *V = NGV; 2059 if (V->getType() != GCL->getType()) 2060 V = ConstantExpr::getBitCast(V, GCL->getType()); 2061 GCL->replaceAllUsesWith(V); 2062 } 2063 GCL->eraseFromParent(); 2064 2065 if (Ctors.size()) 2066 return NGV; 2067 else 2068 return 0; 2069 } 2070 2071 2072 static inline bool 2073 isSimpleEnoughValueToCommit(Constant *C, 2074 SmallPtrSet<Constant*, 8> &SimpleConstants, 2075 const DataLayout *DL); 2076 2077 2078 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be 2079 /// handled by the code generator. We don't want to generate something like: 2080 /// void *X = &X/42; 2081 /// because the code generator doesn't have a relocation that can handle that. 2082 /// 2083 /// This function should be called if C was not found (but just got inserted) 2084 /// in SimpleConstants to avoid having to rescan the same constants all the 2085 /// time. 2086 static bool isSimpleEnoughValueToCommitHelper(Constant *C, 2087 SmallPtrSet<Constant*, 8> &SimpleConstants, 2088 const DataLayout *DL) { 2089 // Simple integer, undef, constant aggregate zero, global addresses, etc are 2090 // all supported. 2091 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) || 2092 isa<GlobalValue>(C)) 2093 return true; 2094 2095 // Aggregate values are safe if all their elements are. 2096 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || 2097 isa<ConstantVector>(C)) { 2098 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { 2099 Constant *Op = cast<Constant>(C->getOperand(i)); 2100 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL)) 2101 return false; 2102 } 2103 return true; 2104 } 2105 2106 // We don't know exactly what relocations are allowed in constant expressions, 2107 // so we allow &global+constantoffset, which is safe and uniformly supported 2108 // across targets. 2109 ConstantExpr *CE = cast<ConstantExpr>(C); 2110 switch (CE->getOpcode()) { 2111 case Instruction::BitCast: 2112 // Bitcast is fine if the casted value is fine. 2113 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2114 2115 case Instruction::IntToPtr: 2116 case Instruction::PtrToInt: 2117 // int <=> ptr is fine if the int type is the same size as the 2118 // pointer type. 2119 if (!DL || DL->getTypeSizeInBits(CE->getType()) != 2120 DL->getTypeSizeInBits(CE->getOperand(0)->getType())) 2121 return false; 2122 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2123 2124 // GEP is fine if it is simple + constant offset. 2125 case Instruction::GetElementPtr: 2126 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) 2127 if (!isa<ConstantInt>(CE->getOperand(i))) 2128 return false; 2129 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2130 2131 case Instruction::Add: 2132 // We allow simple+cst. 2133 if (!isa<ConstantInt>(CE->getOperand(1))) 2134 return false; 2135 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL); 2136 } 2137 return false; 2138 } 2139 2140 static inline bool 2141 isSimpleEnoughValueToCommit(Constant *C, 2142 SmallPtrSet<Constant*, 8> &SimpleConstants, 2143 const DataLayout *DL) { 2144 // If we already checked this constant, we win. 2145 if (!SimpleConstants.insert(C)) return true; 2146 // Check the constant. 2147 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL); 2148 } 2149 2150 2151 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple 2152 /// enough for us to understand. In particular, if it is a cast to anything 2153 /// other than from one pointer type to another pointer type, we punt. 2154 /// We basically just support direct accesses to globals and GEP's of 2155 /// globals. This should be kept up to date with CommitValueTo. 2156 static bool isSimpleEnoughPointerToCommit(Constant *C) { 2157 // Conservatively, avoid aggregate types. This is because we don't 2158 // want to worry about them partially overlapping other stores. 2159 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) 2160 return false; 2161 2162 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) 2163 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2164 // external globals. 2165 return GV->hasUniqueInitializer(); 2166 2167 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { 2168 // Handle a constantexpr gep. 2169 if (CE->getOpcode() == Instruction::GetElementPtr && 2170 isa<GlobalVariable>(CE->getOperand(0)) && 2171 cast<GEPOperator>(CE)->isInBounds()) { 2172 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2173 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2174 // external globals. 2175 if (!GV->hasUniqueInitializer()) 2176 return false; 2177 2178 // The first index must be zero. 2179 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin())); 2180 if (!CI || !CI->isZero()) return false; 2181 2182 // The remaining indices must be compile-time known integers within the 2183 // notional bounds of the corresponding static array types. 2184 if (!CE->isGEPWithNoNotionalOverIndexing()) 2185 return false; 2186 2187 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2188 2189 // A constantexpr bitcast from a pointer to another pointer is a no-op, 2190 // and we know how to evaluate it by moving the bitcast from the pointer 2191 // operand to the value operand. 2192 } else if (CE->getOpcode() == Instruction::BitCast && 2193 isa<GlobalVariable>(CE->getOperand(0))) { 2194 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or 2195 // external globals. 2196 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); 2197 } 2198 } 2199 2200 return false; 2201 } 2202 2203 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global 2204 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it. 2205 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. 2206 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, 2207 ConstantExpr *Addr, unsigned OpNo) { 2208 // Base case of the recursion. 2209 if (OpNo == Addr->getNumOperands()) { 2210 assert(Val->getType() == Init->getType() && "Type mismatch!"); 2211 return Val; 2212 } 2213 2214 SmallVector<Constant*, 32> Elts; 2215 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { 2216 // Break up the constant into its elements. 2217 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 2218 Elts.push_back(Init->getAggregateElement(i)); 2219 2220 // Replace the element that we are supposed to. 2221 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); 2222 unsigned Idx = CU->getZExtValue(); 2223 assert(Idx < STy->getNumElements() && "Struct index out of range!"); 2224 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); 2225 2226 // Return the modified struct. 2227 return ConstantStruct::get(STy, Elts); 2228 } 2229 2230 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); 2231 SequentialType *InitTy = cast<SequentialType>(Init->getType()); 2232 2233 uint64_t NumElts; 2234 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) 2235 NumElts = ATy->getNumElements(); 2236 else 2237 NumElts = InitTy->getVectorNumElements(); 2238 2239 // Break up the array into elements. 2240 for (uint64_t i = 0, e = NumElts; i != e; ++i) 2241 Elts.push_back(Init->getAggregateElement(i)); 2242 2243 assert(CI->getZExtValue() < NumElts); 2244 Elts[CI->getZExtValue()] = 2245 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); 2246 2247 if (Init->getType()->isArrayTy()) 2248 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); 2249 return ConstantVector::get(Elts); 2250 } 2251 2252 /// CommitValueTo - We have decided that Addr (which satisfies the predicate 2253 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. 2254 static void CommitValueTo(Constant *Val, Constant *Addr) { 2255 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { 2256 assert(GV->hasInitializer()); 2257 GV->setInitializer(Val); 2258 return; 2259 } 2260 2261 ConstantExpr *CE = cast<ConstantExpr>(Addr); 2262 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2263 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); 2264 } 2265 2266 namespace { 2267 2268 /// Evaluator - This class evaluates LLVM IR, producing the Constant 2269 /// representing each SSA instruction. Changes to global variables are stored 2270 /// in a mapping that can be iterated over after the evaluation is complete. 2271 /// Once an evaluation call fails, the evaluation object should not be reused. 2272 class Evaluator { 2273 public: 2274 Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI) 2275 : DL(DL), TLI(TLI) { 2276 ValueStack.push_back(new DenseMap<Value*, Constant*>); 2277 } 2278 2279 ~Evaluator() { 2280 DeleteContainerPointers(ValueStack); 2281 while (!AllocaTmps.empty()) { 2282 GlobalVariable *Tmp = AllocaTmps.back(); 2283 AllocaTmps.pop_back(); 2284 2285 // If there are still users of the alloca, the program is doing something 2286 // silly, e.g. storing the address of the alloca somewhere and using it 2287 // later. Since this is undefined, we'll just make it be null. 2288 if (!Tmp->use_empty()) 2289 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); 2290 delete Tmp; 2291 } 2292 } 2293 2294 /// EvaluateFunction - Evaluate a call to function F, returning true if 2295 /// successful, false if we can't evaluate it. ActualArgs contains the formal 2296 /// arguments for the function. 2297 bool EvaluateFunction(Function *F, Constant *&RetVal, 2298 const SmallVectorImpl<Constant*> &ActualArgs); 2299 2300 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if 2301 /// successful, false if we can't evaluate it. NewBB returns the next BB that 2302 /// control flows into, or null upon return. 2303 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB); 2304 2305 Constant *getVal(Value *V) { 2306 if (Constant *CV = dyn_cast<Constant>(V)) return CV; 2307 Constant *R = ValueStack.back()->lookup(V); 2308 assert(R && "Reference to an uncomputed value!"); 2309 return R; 2310 } 2311 2312 void setVal(Value *V, Constant *C) { 2313 ValueStack.back()->operator[](V) = C; 2314 } 2315 2316 const DenseMap<Constant*, Constant*> &getMutatedMemory() const { 2317 return MutatedMemory; 2318 } 2319 2320 const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const { 2321 return Invariants; 2322 } 2323 2324 private: 2325 Constant *ComputeLoadResult(Constant *P); 2326 2327 /// ValueStack - As we compute SSA register values, we store their contents 2328 /// here. The back of the vector contains the current function and the stack 2329 /// contains the values in the calling frames. 2330 SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack; 2331 2332 /// CallStack - This is used to detect recursion. In pathological situations 2333 /// we could hit exponential behavior, but at least there is nothing 2334 /// unbounded. 2335 SmallVector<Function*, 4> CallStack; 2336 2337 /// MutatedMemory - For each store we execute, we update this map. Loads 2338 /// check this to get the most up-to-date value. If evaluation is successful, 2339 /// this state is committed to the process. 2340 DenseMap<Constant*, Constant*> MutatedMemory; 2341 2342 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable 2343 /// to represent its body. This vector is needed so we can delete the 2344 /// temporary globals when we are done. 2345 SmallVector<GlobalVariable*, 32> AllocaTmps; 2346 2347 /// Invariants - These global variables have been marked invariant by the 2348 /// static constructor. 2349 SmallPtrSet<GlobalVariable*, 8> Invariants; 2350 2351 /// SimpleConstants - These are constants we have checked and know to be 2352 /// simple enough to live in a static initializer of a global. 2353 SmallPtrSet<Constant*, 8> SimpleConstants; 2354 2355 const DataLayout *DL; 2356 const TargetLibraryInfo *TLI; 2357 }; 2358 2359 } // anonymous namespace 2360 2361 /// ComputeLoadResult - Return the value that would be computed by a load from 2362 /// P after the stores reflected by 'memory' have been performed. If we can't 2363 /// decide, return null. 2364 Constant *Evaluator::ComputeLoadResult(Constant *P) { 2365 // If this memory location has been recently stored, use the stored value: it 2366 // is the most up-to-date. 2367 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P); 2368 if (I != MutatedMemory.end()) return I->second; 2369 2370 // Access it. 2371 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { 2372 if (GV->hasDefinitiveInitializer()) 2373 return GV->getInitializer(); 2374 return 0; 2375 } 2376 2377 // Handle a constantexpr getelementptr. 2378 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) 2379 if (CE->getOpcode() == Instruction::GetElementPtr && 2380 isa<GlobalVariable>(CE->getOperand(0))) { 2381 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); 2382 if (GV->hasDefinitiveInitializer()) 2383 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); 2384 } 2385 2386 return 0; // don't know how to evaluate. 2387 } 2388 2389 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if 2390 /// successful, false if we can't evaluate it. NewBB returns the next BB that 2391 /// control flows into, or null upon return. 2392 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, 2393 BasicBlock *&NextBB) { 2394 // This is the main evaluation loop. 2395 while (1) { 2396 Constant *InstResult = 0; 2397 2398 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); 2399 2400 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { 2401 if (!SI->isSimple()) { 2402 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); 2403 return false; // no volatile/atomic accesses. 2404 } 2405 Constant *Ptr = getVal(SI->getOperand(1)); 2406 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 2407 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); 2408 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 2409 DEBUG(dbgs() << "; To: " << *Ptr << "\n"); 2410 } 2411 if (!isSimpleEnoughPointerToCommit(Ptr)) { 2412 // If this is too complex for us to commit, reject it. 2413 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store."); 2414 return false; 2415 } 2416 2417 Constant *Val = getVal(SI->getOperand(0)); 2418 2419 // If this might be too difficult for the backend to handle (e.g. the addr 2420 // of one global variable divided by another) then we can't commit it. 2421 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) { 2422 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val 2423 << "\n"); 2424 return false; 2425 } 2426 2427 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 2428 if (CE->getOpcode() == Instruction::BitCast) { 2429 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n"); 2430 // If we're evaluating a store through a bitcast, then we need 2431 // to pull the bitcast off the pointer type and push it onto the 2432 // stored value. 2433 Ptr = CE->getOperand(0); 2434 2435 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType(); 2436 2437 // In order to push the bitcast onto the stored value, a bitcast 2438 // from NewTy to Val's type must be legal. If it's not, we can try 2439 // introspecting NewTy to find a legal conversion. 2440 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { 2441 // If NewTy is a struct, we can convert the pointer to the struct 2442 // into a pointer to its first member. 2443 // FIXME: This could be extended to support arrays as well. 2444 if (StructType *STy = dyn_cast<StructType>(NewTy)) { 2445 NewTy = STy->getTypeAtIndex(0U); 2446 2447 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32); 2448 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); 2449 Constant * const IdxList[] = {IdxZero, IdxZero}; 2450 2451 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList); 2452 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) 2453 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 2454 2455 // If we can't improve the situation by introspecting NewTy, 2456 // we have to give up. 2457 } else { 2458 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " 2459 "evaluate.\n"); 2460 return false; 2461 } 2462 } 2463 2464 // If we found compatible types, go ahead and push the bitcast 2465 // onto the stored value. 2466 Val = ConstantExpr::getBitCast(Val, NewTy); 2467 2468 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); 2469 } 2470 } 2471 2472 MutatedMemory[Ptr] = Val; 2473 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { 2474 InstResult = ConstantExpr::get(BO->getOpcode(), 2475 getVal(BO->getOperand(0)), 2476 getVal(BO->getOperand(1))); 2477 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult 2478 << "\n"); 2479 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { 2480 InstResult = ConstantExpr::getCompare(CI->getPredicate(), 2481 getVal(CI->getOperand(0)), 2482 getVal(CI->getOperand(1))); 2483 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult 2484 << "\n"); 2485 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { 2486 InstResult = ConstantExpr::getCast(CI->getOpcode(), 2487 getVal(CI->getOperand(0)), 2488 CI->getType()); 2489 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult 2490 << "\n"); 2491 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { 2492 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), 2493 getVal(SI->getOperand(1)), 2494 getVal(SI->getOperand(2))); 2495 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult 2496 << "\n"); 2497 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { 2498 Constant *P = getVal(GEP->getOperand(0)); 2499 SmallVector<Constant*, 8> GEPOps; 2500 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); 2501 i != e; ++i) 2502 GEPOps.push_back(getVal(*i)); 2503 InstResult = 2504 ConstantExpr::getGetElementPtr(P, GEPOps, 2505 cast<GEPOperator>(GEP)->isInBounds()); 2506 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult 2507 << "\n"); 2508 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { 2509 2510 if (!LI->isSimple()) { 2511 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); 2512 return false; // no volatile/atomic accesses. 2513 } 2514 2515 Constant *Ptr = getVal(LI->getOperand(0)); 2516 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { 2517 Ptr = ConstantFoldConstantExpression(CE, DL, TLI); 2518 DEBUG(dbgs() << "Found a constant pointer expression, constant " 2519 "folding: " << *Ptr << "\n"); 2520 } 2521 InstResult = ComputeLoadResult(Ptr); 2522 if (InstResult == 0) { 2523 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load." 2524 "\n"); 2525 return false; // Could not evaluate load. 2526 } 2527 2528 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); 2529 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { 2530 if (AI->isArrayAllocation()) { 2531 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); 2532 return false; // Cannot handle array allocs. 2533 } 2534 Type *Ty = AI->getType()->getElementType(); 2535 AllocaTmps.push_back(new GlobalVariable(Ty, false, 2536 GlobalValue::InternalLinkage, 2537 UndefValue::get(Ty), 2538 AI->getName())); 2539 InstResult = AllocaTmps.back(); 2540 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); 2541 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { 2542 CallSite CS(CurInst); 2543 2544 // Debug info can safely be ignored here. 2545 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { 2546 DEBUG(dbgs() << "Ignoring debug info.\n"); 2547 ++CurInst; 2548 continue; 2549 } 2550 2551 // Cannot handle inline asm. 2552 if (isa<InlineAsm>(CS.getCalledValue())) { 2553 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); 2554 return false; 2555 } 2556 2557 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 2558 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { 2559 if (MSI->isVolatile()) { 2560 DEBUG(dbgs() << "Can not optimize a volatile memset " << 2561 "intrinsic.\n"); 2562 return false; 2563 } 2564 Constant *Ptr = getVal(MSI->getDest()); 2565 Constant *Val = getVal(MSI->getValue()); 2566 Constant *DestVal = ComputeLoadResult(getVal(Ptr)); 2567 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { 2568 // This memset is a no-op. 2569 DEBUG(dbgs() << "Ignoring no-op memset.\n"); 2570 ++CurInst; 2571 continue; 2572 } 2573 } 2574 2575 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 2576 II->getIntrinsicID() == Intrinsic::lifetime_end) { 2577 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); 2578 ++CurInst; 2579 continue; 2580 } 2581 2582 if (II->getIntrinsicID() == Intrinsic::invariant_start) { 2583 // We don't insert an entry into Values, as it doesn't have a 2584 // meaningful return value. 2585 if (!II->use_empty()) { 2586 DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n"); 2587 return false; 2588 } 2589 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); 2590 Value *PtrArg = getVal(II->getArgOperand(1)); 2591 Value *Ptr = PtrArg->stripPointerCasts(); 2592 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { 2593 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType(); 2594 if (DL && !Size->isAllOnesValue() && 2595 Size->getValue().getLimitedValue() >= 2596 DL->getTypeStoreSize(ElemTy)) { 2597 Invariants.insert(GV); 2598 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV 2599 << "\n"); 2600 } else { 2601 DEBUG(dbgs() << "Found a global var, but can not treat it as an " 2602 "invariant.\n"); 2603 } 2604 } 2605 // Continue even if we do nothing. 2606 ++CurInst; 2607 continue; 2608 } 2609 2610 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); 2611 return false; 2612 } 2613 2614 // Resolve function pointers. 2615 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); 2616 if (!Callee || Callee->mayBeOverridden()) { 2617 DEBUG(dbgs() << "Can not resolve function pointer.\n"); 2618 return false; // Cannot resolve. 2619 } 2620 2621 SmallVector<Constant*, 8> Formals; 2622 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) 2623 Formals.push_back(getVal(*i)); 2624 2625 if (Callee->isDeclaration()) { 2626 // If this is a function we can constant fold, do it. 2627 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) { 2628 InstResult = C; 2629 DEBUG(dbgs() << "Constant folded function call. Result: " << 2630 *InstResult << "\n"); 2631 } else { 2632 DEBUG(dbgs() << "Can not constant fold function call.\n"); 2633 return false; 2634 } 2635 } else { 2636 if (Callee->getFunctionType()->isVarArg()) { 2637 DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); 2638 return false; 2639 } 2640 2641 Constant *RetVal = 0; 2642 // Execute the call, if successful, use the return value. 2643 ValueStack.push_back(new DenseMap<Value*, Constant*>); 2644 if (!EvaluateFunction(Callee, RetVal, Formals)) { 2645 DEBUG(dbgs() << "Failed to evaluate function.\n"); 2646 return false; 2647 } 2648 delete ValueStack.pop_back_val(); 2649 InstResult = RetVal; 2650 2651 if (InstResult != NULL) { 2652 DEBUG(dbgs() << "Successfully evaluated function. Result: " << 2653 InstResult << "\n\n"); 2654 } else { 2655 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n"); 2656 } 2657 } 2658 } else if (isa<TerminatorInst>(CurInst)) { 2659 DEBUG(dbgs() << "Found a terminator instruction.\n"); 2660 2661 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { 2662 if (BI->isUnconditional()) { 2663 NextBB = BI->getSuccessor(0); 2664 } else { 2665 ConstantInt *Cond = 2666 dyn_cast<ConstantInt>(getVal(BI->getCondition())); 2667 if (!Cond) return false; // Cannot determine. 2668 2669 NextBB = BI->getSuccessor(!Cond->getZExtValue()); 2670 } 2671 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { 2672 ConstantInt *Val = 2673 dyn_cast<ConstantInt>(getVal(SI->getCondition())); 2674 if (!Val) return false; // Cannot determine. 2675 NextBB = SI->findCaseValue(Val).getCaseSuccessor(); 2676 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { 2677 Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); 2678 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) 2679 NextBB = BA->getBasicBlock(); 2680 else 2681 return false; // Cannot determine. 2682 } else if (isa<ReturnInst>(CurInst)) { 2683 NextBB = 0; 2684 } else { 2685 // invoke, unwind, resume, unreachable. 2686 DEBUG(dbgs() << "Can not handle terminator."); 2687 return false; // Cannot handle this terminator. 2688 } 2689 2690 // We succeeded at evaluating this block! 2691 DEBUG(dbgs() << "Successfully evaluated block.\n"); 2692 return true; 2693 } else { 2694 // Did not know how to evaluate this! 2695 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction." 2696 "\n"); 2697 return false; 2698 } 2699 2700 if (!CurInst->use_empty()) { 2701 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) 2702 InstResult = ConstantFoldConstantExpression(CE, DL, TLI); 2703 2704 setVal(CurInst, InstResult); 2705 } 2706 2707 // If we just processed an invoke, we finished evaluating the block. 2708 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { 2709 NextBB = II->getNormalDest(); 2710 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); 2711 return true; 2712 } 2713 2714 // Advance program counter. 2715 ++CurInst; 2716 } 2717 } 2718 2719 /// EvaluateFunction - Evaluate a call to function F, returning true if 2720 /// successful, false if we can't evaluate it. ActualArgs contains the formal 2721 /// arguments for the function. 2722 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, 2723 const SmallVectorImpl<Constant*> &ActualArgs) { 2724 // Check to see if this function is already executing (recursion). If so, 2725 // bail out. TODO: we might want to accept limited recursion. 2726 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) 2727 return false; 2728 2729 CallStack.push_back(F); 2730 2731 // Initialize arguments to the incoming values specified. 2732 unsigned ArgNo = 0; 2733 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 2734 ++AI, ++ArgNo) 2735 setVal(AI, ActualArgs[ArgNo]); 2736 2737 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, 2738 // we can only evaluate any one basic block at most once. This set keeps 2739 // track of what we have executed so we can detect recursive cases etc. 2740 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; 2741 2742 // CurBB - The current basic block we're evaluating. 2743 BasicBlock *CurBB = F->begin(); 2744 2745 BasicBlock::iterator CurInst = CurBB->begin(); 2746 2747 while (1) { 2748 BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings. 2749 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); 2750 2751 if (!EvaluateBlock(CurInst, NextBB)) 2752 return false; 2753 2754 if (NextBB == 0) { 2755 // Successfully running until there's no next block means that we found 2756 // the return. Fill it the return value and pop the call stack. 2757 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); 2758 if (RI->getNumOperands()) 2759 RetVal = getVal(RI->getOperand(0)); 2760 CallStack.pop_back(); 2761 return true; 2762 } 2763 2764 // Okay, we succeeded in evaluating this control flow. See if we have 2765 // executed the new block before. If so, we have a looping function, 2766 // which we cannot evaluate in reasonable time. 2767 if (!ExecutedBlocks.insert(NextBB)) 2768 return false; // looped! 2769 2770 // Okay, we have never been in this block before. Check to see if there 2771 // are any PHI nodes. If so, evaluate them with information about where 2772 // we came from. 2773 PHINode *PN = 0; 2774 for (CurInst = NextBB->begin(); 2775 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) 2776 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); 2777 2778 // Advance to the next block. 2779 CurBB = NextBB; 2780 } 2781 } 2782 2783 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if 2784 /// we can. Return true if we can, false otherwise. 2785 static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL, 2786 const TargetLibraryInfo *TLI) { 2787 // Call the function. 2788 Evaluator Eval(DL, TLI); 2789 Constant *RetValDummy; 2790 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, 2791 SmallVector<Constant*, 0>()); 2792 2793 if (EvalSuccess) { 2794 // We succeeded at evaluation: commit the result. 2795 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" 2796 << F->getName() << "' to " << Eval.getMutatedMemory().size() 2797 << " stores.\n"); 2798 for (DenseMap<Constant*, Constant*>::const_iterator I = 2799 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end(); 2800 I != E; ++I) 2801 CommitValueTo(I->second, I->first); 2802 for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I = 2803 Eval.getInvariants().begin(), E = Eval.getInvariants().end(); 2804 I != E; ++I) 2805 (*I)->setConstant(true); 2806 } 2807 2808 return EvalSuccess; 2809 } 2810 2811 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. 2812 /// Return true if anything changed. 2813 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { 2814 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); 2815 bool MadeChange = false; 2816 if (Ctors.empty()) return false; 2817 2818 // Loop over global ctors, optimizing them when we can. 2819 for (unsigned i = 0; i != Ctors.size(); ++i) { 2820 Function *F = Ctors[i]; 2821 // Found a null terminator in the middle of the list, prune off the rest of 2822 // the list. 2823 if (F == 0) { 2824 if (i != Ctors.size()-1) { 2825 Ctors.resize(i+1); 2826 MadeChange = true; 2827 } 2828 break; 2829 } 2830 DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n"); 2831 2832 // We cannot simplify external ctor functions. 2833 if (F->empty()) continue; 2834 2835 // If we can evaluate the ctor at compile time, do. 2836 if (EvaluateStaticConstructor(F, DL, TLI)) { 2837 Ctors.erase(Ctors.begin()+i); 2838 MadeChange = true; 2839 --i; 2840 ++NumCtorsEvaluated; 2841 continue; 2842 } 2843 } 2844 2845 if (!MadeChange) return false; 2846 2847 GCL = InstallGlobalCtors(GCL, Ctors); 2848 return true; 2849 } 2850 2851 static int compareNames(Constant *const *A, Constant *const *B) { 2852 return (*A)->getName().compare((*B)->getName()); 2853 } 2854 2855 static void setUsedInitializer(GlobalVariable &V, 2856 SmallPtrSet<GlobalValue *, 8> Init) { 2857 if (Init.empty()) { 2858 V.eraseFromParent(); 2859 return; 2860 } 2861 2862 // Type of pointer to the array of pointers. 2863 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0); 2864 2865 SmallVector<llvm::Constant *, 8> UsedArray; 2866 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Init.begin(), E = Init.end(); 2867 I != E; ++I) { 2868 Constant *Cast 2869 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(*I, Int8PtrTy); 2870 UsedArray.push_back(Cast); 2871 } 2872 // Sort to get deterministic order. 2873 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); 2874 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); 2875 2876 Module *M = V.getParent(); 2877 V.removeFromParent(); 2878 GlobalVariable *NV = 2879 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage, 2880 llvm::ConstantArray::get(ATy, UsedArray), ""); 2881 NV->takeName(&V); 2882 NV->setSection("llvm.metadata"); 2883 delete &V; 2884 } 2885 2886 namespace { 2887 /// \brief An easy to access representation of llvm.used and llvm.compiler.used. 2888 class LLVMUsed { 2889 SmallPtrSet<GlobalValue *, 8> Used; 2890 SmallPtrSet<GlobalValue *, 8> CompilerUsed; 2891 GlobalVariable *UsedV; 2892 GlobalVariable *CompilerUsedV; 2893 2894 public: 2895 LLVMUsed(Module &M) { 2896 UsedV = collectUsedGlobalVariables(M, Used, false); 2897 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); 2898 } 2899 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator; 2900 iterator usedBegin() { return Used.begin(); } 2901 iterator usedEnd() { return Used.end(); } 2902 iterator compilerUsedBegin() { return CompilerUsed.begin(); } 2903 iterator compilerUsedEnd() { return CompilerUsed.end(); } 2904 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } 2905 bool compilerUsedCount(GlobalValue *GV) const { 2906 return CompilerUsed.count(GV); 2907 } 2908 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } 2909 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } 2910 bool usedInsert(GlobalValue *GV) { return Used.insert(GV); } 2911 bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV); } 2912 2913 void syncVariablesAndSets() { 2914 if (UsedV) 2915 setUsedInitializer(*UsedV, Used); 2916 if (CompilerUsedV) 2917 setUsedInitializer(*CompilerUsedV, CompilerUsed); 2918 } 2919 }; 2920 } 2921 2922 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { 2923 if (GA.use_empty()) // No use at all. 2924 return false; 2925 2926 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && 2927 "We should have removed the duplicated " 2928 "element from llvm.compiler.used"); 2929 if (!GA.hasOneUse()) 2930 // Strictly more than one use. So at least one is not in llvm.used and 2931 // llvm.compiler.used. 2932 return true; 2933 2934 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. 2935 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); 2936 } 2937 2938 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, 2939 const LLVMUsed &U) { 2940 unsigned N = 2; 2941 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && 2942 "We should have removed the duplicated " 2943 "element from llvm.compiler.used"); 2944 if (U.usedCount(&V) || U.compilerUsedCount(&V)) 2945 ++N; 2946 return V.hasNUsesOrMore(N); 2947 } 2948 2949 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { 2950 if (!GA.hasLocalLinkage()) 2951 return true; 2952 2953 return U.usedCount(&GA) || U.compilerUsedCount(&GA); 2954 } 2955 2956 static bool hasUsesToReplace(GlobalAlias &GA, LLVMUsed &U, bool &RenameTarget) { 2957 RenameTarget = false; 2958 bool Ret = false; 2959 if (hasUseOtherThanLLVMUsed(GA, U)) 2960 Ret = true; 2961 2962 // If the alias is externally visible, we may still be able to simplify it. 2963 if (!mayHaveOtherReferences(GA, U)) 2964 return Ret; 2965 2966 // If the aliasee has internal linkage, give it the name and linkage 2967 // of the alias, and delete the alias. This turns: 2968 // define internal ... @f(...) 2969 // @a = alias ... @f 2970 // into: 2971 // define ... @a(...) 2972 Constant *Aliasee = GA.getAliasee(); 2973 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 2974 if (!Target->hasLocalLinkage()) 2975 return Ret; 2976 2977 // Do not perform the transform if multiple aliases potentially target the 2978 // aliasee. This check also ensures that it is safe to replace the section 2979 // and other attributes of the aliasee with those of the alias. 2980 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) 2981 return Ret; 2982 2983 RenameTarget = true; 2984 return true; 2985 } 2986 2987 bool GlobalOpt::OptimizeGlobalAliases(Module &M) { 2988 bool Changed = false; 2989 LLVMUsed Used(M); 2990 2991 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Used.usedBegin(), 2992 E = Used.usedEnd(); 2993 I != E; ++I) 2994 Used.compilerUsedErase(*I); 2995 2996 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); 2997 I != E;) { 2998 Module::alias_iterator J = I++; 2999 // Aliases without names cannot be referenced outside this module. 3000 if (!J->hasName() && !J->isDeclaration()) 3001 J->setLinkage(GlobalValue::InternalLinkage); 3002 // If the aliasee may change at link time, nothing can be done - bail out. 3003 if (J->mayBeOverridden()) 3004 continue; 3005 3006 Constant *Aliasee = J->getAliasee(); 3007 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); 3008 Target->removeDeadConstantUsers(); 3009 3010 // Make all users of the alias use the aliasee instead. 3011 bool RenameTarget; 3012 if (!hasUsesToReplace(*J, Used, RenameTarget)) 3013 continue; 3014 3015 J->replaceAllUsesWith(Aliasee); 3016 ++NumAliasesResolved; 3017 Changed = true; 3018 3019 if (RenameTarget) { 3020 // Give the aliasee the name, linkage and other attributes of the alias. 3021 Target->takeName(J); 3022 Target->setLinkage(J->getLinkage()); 3023 Target->setVisibility(J->getVisibility()); 3024 Target->setDLLStorageClass(J->getDLLStorageClass()); 3025 3026 if (Used.usedErase(J)) 3027 Used.usedInsert(Target); 3028 3029 if (Used.compilerUsedErase(J)) 3030 Used.compilerUsedInsert(Target); 3031 } else if (mayHaveOtherReferences(*J, Used)) 3032 continue; 3033 3034 // Delete the alias. 3035 M.getAliasList().erase(J); 3036 ++NumAliasesRemoved; 3037 Changed = true; 3038 } 3039 3040 Used.syncVariablesAndSets(); 3041 3042 return Changed; 3043 } 3044 3045 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { 3046 if (!TLI->has(LibFunc::cxa_atexit)) 3047 return 0; 3048 3049 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit)); 3050 3051 if (!Fn) 3052 return 0; 3053 3054 FunctionType *FTy = Fn->getFunctionType(); 3055 3056 // Checking that the function has the right return type, the right number of 3057 // parameters and that they all have pointer types should be enough. 3058 if (!FTy->getReturnType()->isIntegerTy() || 3059 FTy->getNumParams() != 3 || 3060 !FTy->getParamType(0)->isPointerTy() || 3061 !FTy->getParamType(1)->isPointerTy() || 3062 !FTy->getParamType(2)->isPointerTy()) 3063 return 0; 3064 3065 return Fn; 3066 } 3067 3068 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++ 3069 /// destructor and can therefore be eliminated. 3070 /// Note that we assume that other optimization passes have already simplified 3071 /// the code so we only look for a function with a single basic block, where 3072 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and 3073 /// other side-effect free instructions. 3074 static bool cxxDtorIsEmpty(const Function &Fn, 3075 SmallPtrSet<const Function *, 8> &CalledFunctions) { 3076 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and 3077 // nounwind, but that doesn't seem worth doing. 3078 if (Fn.isDeclaration()) 3079 return false; 3080 3081 if (++Fn.begin() != Fn.end()) 3082 return false; 3083 3084 const BasicBlock &EntryBlock = Fn.getEntryBlock(); 3085 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); 3086 I != E; ++I) { 3087 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 3088 // Ignore debug intrinsics. 3089 if (isa<DbgInfoIntrinsic>(CI)) 3090 continue; 3091 3092 const Function *CalledFn = CI->getCalledFunction(); 3093 3094 if (!CalledFn) 3095 return false; 3096 3097 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); 3098 3099 // Don't treat recursive functions as empty. 3100 if (!NewCalledFunctions.insert(CalledFn)) 3101 return false; 3102 3103 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) 3104 return false; 3105 } else if (isa<ReturnInst>(*I)) 3106 return true; // We're done. 3107 else if (I->mayHaveSideEffects()) 3108 return false; // Destructor with side effects, bail. 3109 } 3110 3111 return false; 3112 } 3113 3114 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { 3115 /// Itanium C++ ABI p3.3.5: 3116 /// 3117 /// After constructing a global (or local static) object, that will require 3118 /// destruction on exit, a termination function is registered as follows: 3119 /// 3120 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); 3121 /// 3122 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the 3123 /// call f(p) when DSO d is unloaded, before all such termination calls 3124 /// registered before this one. It returns zero if registration is 3125 /// successful, nonzero on failure. 3126 3127 // This pass will look for calls to __cxa_atexit where the function is trivial 3128 // and remove them. 3129 bool Changed = false; 3130 3131 for (Function::use_iterator I = CXAAtExitFn->use_begin(), 3132 E = CXAAtExitFn->use_end(); I != E;) { 3133 // We're only interested in calls. Theoretically, we could handle invoke 3134 // instructions as well, but neither llvm-gcc nor clang generate invokes 3135 // to __cxa_atexit. 3136 CallInst *CI = dyn_cast<CallInst>(*I++); 3137 if (!CI) 3138 continue; 3139 3140 Function *DtorFn = 3141 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); 3142 if (!DtorFn) 3143 continue; 3144 3145 SmallPtrSet<const Function *, 8> CalledFunctions; 3146 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) 3147 continue; 3148 3149 // Just remove the call. 3150 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); 3151 CI->eraseFromParent(); 3152 3153 ++NumCXXDtorsRemoved; 3154 3155 Changed |= true; 3156 } 3157 3158 return Changed; 3159 } 3160 3161 bool GlobalOpt::runOnModule(Module &M) { 3162 bool Changed = false; 3163 3164 DL = getAnalysisIfAvailable<DataLayout>(); 3165 TLI = &getAnalysis<TargetLibraryInfo>(); 3166 3167 // Try to find the llvm.globalctors list. 3168 GlobalVariable *GlobalCtors = FindGlobalCtors(M); 3169 3170 bool LocalChange = true; 3171 while (LocalChange) { 3172 LocalChange = false; 3173 3174 // Delete functions that are trivially dead, ccc -> fastcc 3175 LocalChange |= OptimizeFunctions(M); 3176 3177 // Optimize global_ctors list. 3178 if (GlobalCtors) 3179 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); 3180 3181 // Optimize non-address-taken globals. 3182 LocalChange |= OptimizeGlobalVars(M); 3183 3184 // Resolve aliases, when possible. 3185 LocalChange |= OptimizeGlobalAliases(M); 3186 3187 // Try to remove trivial global destructors if they are not removed 3188 // already. 3189 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); 3190 if (CXAAtExitFn) 3191 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); 3192 3193 Changed |= LocalChange; 3194 } 3195 3196 // TODO: Move all global ctors functions to the end of the module for code 3197 // layout. 3198 3199 return Changed; 3200 } 3201