1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines several CodeGen-specific LLVM IR analysis utilities. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/Analysis.h" 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/CodeGen/SelectionDAG.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Function.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/IntrinsicInst.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/Support/ErrorHandling.h" 26 #include "llvm/Support/MathExtras.h" 27 #include "llvm/Target/TargetLowering.h" 28 #include "llvm/Target/TargetSubtargetInfo.h" 29 #include "llvm/Transforms/Utils/GlobalStatus.h" 30 31 using namespace llvm; 32 33 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence 34 /// of insertvalue or extractvalue indices that identify a member, return 35 /// the linearized index of the start of the member. 36 /// 37 unsigned llvm::ComputeLinearIndex(Type *Ty, 38 const unsigned *Indices, 39 const unsigned *IndicesEnd, 40 unsigned CurIndex) { 41 // Base case: We're done. 42 if (Indices && Indices == IndicesEnd) 43 return CurIndex; 44 45 // Given a struct type, recursively traverse the elements. 46 if (StructType *STy = dyn_cast<StructType>(Ty)) { 47 for (StructType::element_iterator EB = STy->element_begin(), 48 EI = EB, 49 EE = STy->element_end(); 50 EI != EE; ++EI) { 51 if (Indices && *Indices == unsigned(EI - EB)) 52 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex); 53 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex); 54 } 55 return CurIndex; 56 } 57 // Given an array type, recursively traverse the elements. 58 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 59 Type *EltTy = ATy->getElementType(); 60 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) { 61 if (Indices && *Indices == i) 62 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex); 63 CurIndex = ComputeLinearIndex(EltTy, nullptr, nullptr, CurIndex); 64 } 65 return CurIndex; 66 } 67 // We haven't found the type we're looking for, so keep searching. 68 return CurIndex + 1; 69 } 70 71 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 72 /// EVTs that represent all the individual underlying 73 /// non-aggregate types that comprise it. 74 /// 75 /// If Offsets is non-null, it points to a vector to be filled in 76 /// with the in-memory offsets of each of the individual values. 77 /// 78 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, 79 SmallVectorImpl<EVT> &ValueVTs, 80 SmallVectorImpl<uint64_t> *Offsets, 81 uint64_t StartingOffset) { 82 // Given a struct type, recursively traverse the elements. 83 if (StructType *STy = dyn_cast<StructType>(Ty)) { 84 const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy); 85 for (StructType::element_iterator EB = STy->element_begin(), 86 EI = EB, 87 EE = STy->element_end(); 88 EI != EE; ++EI) 89 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, 90 StartingOffset + SL->getElementOffset(EI - EB)); 91 return; 92 } 93 // Given an array type, recursively traverse the elements. 94 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 95 Type *EltTy = ATy->getElementType(); 96 uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy); 97 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 98 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, 99 StartingOffset + i * EltSize); 100 return; 101 } 102 // Interpret void as zero return values. 103 if (Ty->isVoidTy()) 104 return; 105 // Base case: we can get an EVT for this LLVM IR type. 106 ValueVTs.push_back(TLI.getValueType(Ty)); 107 if (Offsets) 108 Offsets->push_back(StartingOffset); 109 } 110 111 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 112 GlobalVariable *llvm::ExtractTypeInfo(Value *V) { 113 V = V->stripPointerCasts(); 114 GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 115 116 if (GV && GV->getName() == "llvm.eh.catch.all.value") { 117 assert(GV->hasInitializer() && 118 "The EH catch-all value must have an initializer"); 119 Value *Init = GV->getInitializer(); 120 GV = dyn_cast<GlobalVariable>(Init); 121 if (!GV) V = cast<ConstantPointerNull>(Init); 122 } 123 124 assert((GV || isa<ConstantPointerNull>(V)) && 125 "TypeInfo must be a global variable or NULL"); 126 return GV; 127 } 128 129 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being 130 /// processed uses a memory 'm' constraint. 131 bool 132 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, 133 const TargetLowering &TLI) { 134 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { 135 InlineAsm::ConstraintInfo &CI = CInfos[i]; 136 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { 137 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); 138 if (CType == TargetLowering::C_Memory) 139 return true; 140 } 141 142 // Indirect operand accesses access memory. 143 if (CI.isIndirect) 144 return true; 145 } 146 147 return false; 148 } 149 150 /// getFCmpCondCode - Return the ISD condition code corresponding to 151 /// the given LLVM IR floating-point condition code. This includes 152 /// consideration of global floating-point math flags. 153 /// 154 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { 155 switch (Pred) { 156 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE; 157 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ; 158 case FCmpInst::FCMP_OGT: return ISD::SETOGT; 159 case FCmpInst::FCMP_OGE: return ISD::SETOGE; 160 case FCmpInst::FCMP_OLT: return ISD::SETOLT; 161 case FCmpInst::FCMP_OLE: return ISD::SETOLE; 162 case FCmpInst::FCMP_ONE: return ISD::SETONE; 163 case FCmpInst::FCMP_ORD: return ISD::SETO; 164 case FCmpInst::FCMP_UNO: return ISD::SETUO; 165 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ; 166 case FCmpInst::FCMP_UGT: return ISD::SETUGT; 167 case FCmpInst::FCMP_UGE: return ISD::SETUGE; 168 case FCmpInst::FCMP_ULT: return ISD::SETULT; 169 case FCmpInst::FCMP_ULE: return ISD::SETULE; 170 case FCmpInst::FCMP_UNE: return ISD::SETUNE; 171 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE; 172 default: llvm_unreachable("Invalid FCmp predicate opcode!"); 173 } 174 } 175 176 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) { 177 switch (CC) { 178 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ; 179 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE; 180 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT; 181 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE; 182 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT; 183 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE; 184 default: return CC; 185 } 186 } 187 188 /// getICmpCondCode - Return the ISD condition code corresponding to 189 /// the given LLVM IR integer condition code. 190 /// 191 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { 192 switch (Pred) { 193 case ICmpInst::ICMP_EQ: return ISD::SETEQ; 194 case ICmpInst::ICMP_NE: return ISD::SETNE; 195 case ICmpInst::ICMP_SLE: return ISD::SETLE; 196 case ICmpInst::ICMP_ULE: return ISD::SETULE; 197 case ICmpInst::ICMP_SGE: return ISD::SETGE; 198 case ICmpInst::ICMP_UGE: return ISD::SETUGE; 199 case ICmpInst::ICMP_SLT: return ISD::SETLT; 200 case ICmpInst::ICMP_ULT: return ISD::SETULT; 201 case ICmpInst::ICMP_SGT: return ISD::SETGT; 202 case ICmpInst::ICMP_UGT: return ISD::SETUGT; 203 default: 204 llvm_unreachable("Invalid ICmp predicate opcode!"); 205 } 206 } 207 208 static bool isNoopBitcast(Type *T1, Type *T2, 209 const TargetLoweringBase& TLI) { 210 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) || 211 (isa<VectorType>(T1) && isa<VectorType>(T2) && 212 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2))); 213 } 214 215 /// Look through operations that will be free to find the earliest source of 216 /// this value. 217 /// 218 /// @param ValLoc If V has aggegate type, we will be interested in a particular 219 /// scalar component. This records its address; the reverse of this list gives a 220 /// sequence of indices appropriate for an extractvalue to locate the important 221 /// value. This value is updated during the function and on exit will indicate 222 /// similar information for the Value returned. 223 /// 224 /// @param DataBits If this function looks through truncate instructions, this 225 /// will record the smallest size attained. 226 static const Value *getNoopInput(const Value *V, 227 SmallVectorImpl<unsigned> &ValLoc, 228 unsigned &DataBits, 229 const TargetLoweringBase &TLI) { 230 while (true) { 231 // Try to look through V1; if V1 is not an instruction, it can't be looked 232 // through. 233 const Instruction *I = dyn_cast<Instruction>(V); 234 if (!I || I->getNumOperands() == 0) return V; 235 const Value *NoopInput = nullptr; 236 237 Value *Op = I->getOperand(0); 238 if (isa<BitCastInst>(I)) { 239 // Look through truly no-op bitcasts. 240 if (isNoopBitcast(Op->getType(), I->getType(), TLI)) 241 NoopInput = Op; 242 } else if (isa<GetElementPtrInst>(I)) { 243 // Look through getelementptr 244 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices()) 245 NoopInput = Op; 246 } else if (isa<IntToPtrInst>(I)) { 247 // Look through inttoptr. 248 // Make sure this isn't a truncating or extending cast. We could 249 // support this eventually, but don't bother for now. 250 if (!isa<VectorType>(I->getType()) && 251 TLI.getPointerTy().getSizeInBits() == 252 cast<IntegerType>(Op->getType())->getBitWidth()) 253 NoopInput = Op; 254 } else if (isa<PtrToIntInst>(I)) { 255 // Look through ptrtoint. 256 // Make sure this isn't a truncating or extending cast. We could 257 // support this eventually, but don't bother for now. 258 if (!isa<VectorType>(I->getType()) && 259 TLI.getPointerTy().getSizeInBits() == 260 cast<IntegerType>(I->getType())->getBitWidth()) 261 NoopInput = Op; 262 } else if (isa<TruncInst>(I) && 263 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) { 264 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits()); 265 NoopInput = Op; 266 } else if (isa<CallInst>(I)) { 267 // Look through call (skipping callee) 268 for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1; 269 i != e; ++i) { 270 unsigned attrInd = i - I->op_begin() + 1; 271 if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) && 272 isNoopBitcast((*i)->getType(), I->getType(), TLI)) { 273 NoopInput = *i; 274 break; 275 } 276 } 277 } else if (isa<InvokeInst>(I)) { 278 // Look through invoke (skipping BB, BB, Callee) 279 for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3; 280 i != e; ++i) { 281 unsigned attrInd = i - I->op_begin() + 1; 282 if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) && 283 isNoopBitcast((*i)->getType(), I->getType(), TLI)) { 284 NoopInput = *i; 285 break; 286 } 287 } 288 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) { 289 // Value may come from either the aggregate or the scalar 290 ArrayRef<unsigned> InsertLoc = IVI->getIndices(); 291 if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(), 292 ValLoc.rbegin())) { 293 // The type being inserted is a nested sub-type of the aggregate; we 294 // have to remove those initial indices to get the location we're 295 // interested in for the operand. 296 ValLoc.resize(ValLoc.size() - InsertLoc.size()); 297 NoopInput = IVI->getInsertedValueOperand(); 298 } else { 299 // The struct we're inserting into has the value we're interested in, no 300 // change of address. 301 NoopInput = Op; 302 } 303 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) { 304 // The part we're interested in will inevitably be some sub-section of the 305 // previous aggregate. Combine the two paths to obtain the true address of 306 // our element. 307 ArrayRef<unsigned> ExtractLoc = EVI->getIndices(); 308 std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(), 309 std::back_inserter(ValLoc)); 310 NoopInput = Op; 311 } 312 // Terminate if we couldn't find anything to look through. 313 if (!NoopInput) 314 return V; 315 316 V = NoopInput; 317 } 318 } 319 320 /// Return true if this scalar return value only has bits discarded on its path 321 /// from the "tail call" to the "ret". This includes the obvious noop 322 /// instructions handled by getNoopInput above as well as free truncations (or 323 /// extensions prior to the call). 324 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, 325 SmallVectorImpl<unsigned> &RetIndices, 326 SmallVectorImpl<unsigned> &CallIndices, 327 bool AllowDifferingSizes, 328 const TargetLoweringBase &TLI) { 329 330 // Trace the sub-value needed by the return value as far back up the graph as 331 // possible, in the hope that it will intersect with the value produced by the 332 // call. In the simple case with no "returned" attribute, the hope is actually 333 // that we end up back at the tail call instruction itself. 334 unsigned BitsRequired = UINT_MAX; 335 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI); 336 337 // If this slot in the value returned is undef, it doesn't matter what the 338 // call puts there, it'll be fine. 339 if (isa<UndefValue>(RetVal)) 340 return true; 341 342 // Now do a similar search up through the graph to find where the value 343 // actually returned by the "tail call" comes from. In the simple case without 344 // a "returned" attribute, the search will be blocked immediately and the loop 345 // a Noop. 346 unsigned BitsProvided = UINT_MAX; 347 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI); 348 349 // There's no hope if we can't actually trace them to (the same part of!) the 350 // same value. 351 if (CallVal != RetVal || CallIndices != RetIndices) 352 return false; 353 354 // However, intervening truncates may have made the call non-tail. Make sure 355 // all the bits that are needed by the "ret" have been provided by the "tail 356 // call". FIXME: with sufficiently cunning bit-tracking, we could look through 357 // extensions too. 358 if (BitsProvided < BitsRequired || 359 (!AllowDifferingSizes && BitsProvided != BitsRequired)) 360 return false; 361 362 return true; 363 } 364 365 /// For an aggregate type, determine whether a given index is within bounds or 366 /// not. 367 static bool indexReallyValid(CompositeType *T, unsigned Idx) { 368 if (ArrayType *AT = dyn_cast<ArrayType>(T)) 369 return Idx < AT->getNumElements(); 370 371 return Idx < cast<StructType>(T)->getNumElements(); 372 } 373 374 /// Move the given iterators to the next leaf type in depth first traversal. 375 /// 376 /// Performs a depth-first traversal of the type as specified by its arguments, 377 /// stopping at the next leaf node (which may be a legitimate scalar type or an 378 /// empty struct or array). 379 /// 380 /// @param SubTypes List of the partial components making up the type from 381 /// outermost to innermost non-empty aggregate. The element currently 382 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1). 383 /// 384 /// @param Path Set of extractvalue indices leading from the outermost type 385 /// (SubTypes[0]) to the leaf node currently represented. 386 /// 387 /// @returns true if a new type was found, false otherwise. Calling this 388 /// function again on a finished iterator will repeatedly return 389 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty 390 /// aggregate or a non-aggregate 391 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes, 392 SmallVectorImpl<unsigned> &Path) { 393 // First march back up the tree until we can successfully increment one of the 394 // coordinates in Path. 395 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) { 396 Path.pop_back(); 397 SubTypes.pop_back(); 398 } 399 400 // If we reached the top, then the iterator is done. 401 if (Path.empty()) 402 return false; 403 404 // We know there's *some* valid leaf now, so march back down the tree picking 405 // out the left-most element at each node. 406 ++Path.back(); 407 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back()); 408 while (DeeperType->isAggregateType()) { 409 CompositeType *CT = cast<CompositeType>(DeeperType); 410 if (!indexReallyValid(CT, 0)) 411 return true; 412 413 SubTypes.push_back(CT); 414 Path.push_back(0); 415 416 DeeperType = CT->getTypeAtIndex(0U); 417 } 418 419 return true; 420 } 421 422 /// Find the first non-empty, scalar-like type in Next and setup the iterator 423 /// components. 424 /// 425 /// Assuming Next is an aggregate of some kind, this function will traverse the 426 /// tree from left to right (i.e. depth-first) looking for the first 427 /// non-aggregate type which will play a role in function return. 428 /// 429 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup 430 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first 431 /// i32 in that type. 432 static bool firstRealType(Type *Next, 433 SmallVectorImpl<CompositeType *> &SubTypes, 434 SmallVectorImpl<unsigned> &Path) { 435 // First initialise the iterator components to the first "leaf" node 436 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf 437 // despite nominally being an aggregate). 438 while (Next->isAggregateType() && 439 indexReallyValid(cast<CompositeType>(Next), 0)) { 440 SubTypes.push_back(cast<CompositeType>(Next)); 441 Path.push_back(0); 442 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U); 443 } 444 445 // If there's no Path now, Next was originally scalar already (or empty 446 // leaf). We're done. 447 if (Path.empty()) 448 return true; 449 450 // Otherwise, use normal iteration to keep looking through the tree until we 451 // find a non-aggregate type. 452 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) { 453 if (!advanceToNextLeafType(SubTypes, Path)) 454 return false; 455 } 456 457 return true; 458 } 459 460 /// Set the iterator data-structures to the next non-empty, non-aggregate 461 /// subtype. 462 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes, 463 SmallVectorImpl<unsigned> &Path) { 464 do { 465 if (!advanceToNextLeafType(SubTypes, Path)) 466 return false; 467 468 assert(!Path.empty() && "found a leaf but didn't set the path?"); 469 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()); 470 471 return true; 472 } 473 474 475 /// Test if the given instruction is in a position to be optimized 476 /// with a tail-call. This roughly means that it's in a block with 477 /// a return and there's nothing that needs to be scheduled 478 /// between it and the return. 479 /// 480 /// This function only tests target-independent requirements. 481 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) { 482 const Instruction *I = CS.getInstruction(); 483 const BasicBlock *ExitBB = I->getParent(); 484 const TerminatorInst *Term = ExitBB->getTerminator(); 485 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); 486 487 // The block must end in a return statement or unreachable. 488 // 489 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in 490 // an unreachable, for now. The way tailcall optimization is currently 491 // implemented means it will add an epilogue followed by a jump. That is 492 // not profitable. Also, if the callee is a special function (e.g. 493 // longjmp on x86), it can end up causing miscompilation that has not 494 // been fully understood. 495 if (!Ret && 496 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) 497 return false; 498 499 // If I will have a chain, make sure no other instruction that will have a 500 // chain interposes between I and the return. 501 if (I->mayHaveSideEffects() || I->mayReadFromMemory() || 502 !isSafeToSpeculativelyExecute(I)) 503 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) { 504 if (&*BBI == I) 505 break; 506 // Debug info intrinsics do not get in the way of tail call optimization. 507 if (isa<DbgInfoIntrinsic>(BBI)) 508 continue; 509 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || 510 !isSafeToSpeculativelyExecute(BBI)) 511 return false; 512 } 513 514 return returnTypeIsEligibleForTailCall( 515 ExitBB->getParent(), I, Ret, *TM.getSubtargetImpl()->getTargetLowering()); 516 } 517 518 bool llvm::returnTypeIsEligibleForTailCall(const Function *F, 519 const Instruction *I, 520 const ReturnInst *Ret, 521 const TargetLoweringBase &TLI) { 522 // If the block ends with a void return or unreachable, it doesn't matter 523 // what the call's return type is. 524 if (!Ret || Ret->getNumOperands() == 0) return true; 525 526 // If the return value is undef, it doesn't matter what the call's 527 // return type is. 528 if (isa<UndefValue>(Ret->getOperand(0))) return true; 529 530 // Make sure the attributes attached to each return are compatible. 531 AttrBuilder CallerAttrs(F->getAttributes(), 532 AttributeSet::ReturnIndex); 533 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(), 534 AttributeSet::ReturnIndex); 535 536 // Noalias is completely benign as far as calling convention goes, it 537 // shouldn't affect whether the call is a tail call. 538 CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias); 539 CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias); 540 541 bool AllowDifferingSizes = true; 542 if (CallerAttrs.contains(Attribute::ZExt)) { 543 if (!CalleeAttrs.contains(Attribute::ZExt)) 544 return false; 545 546 AllowDifferingSizes = false; 547 CallerAttrs.removeAttribute(Attribute::ZExt); 548 CalleeAttrs.removeAttribute(Attribute::ZExt); 549 } else if (CallerAttrs.contains(Attribute::SExt)) { 550 if (!CalleeAttrs.contains(Attribute::SExt)) 551 return false; 552 553 AllowDifferingSizes = false; 554 CallerAttrs.removeAttribute(Attribute::SExt); 555 CalleeAttrs.removeAttribute(Attribute::SExt); 556 } 557 558 // If they're still different, there's some facet we don't understand 559 // (currently only "inreg", but in future who knows). It may be OK but the 560 // only safe option is to reject the tail call. 561 if (CallerAttrs != CalleeAttrs) 562 return false; 563 564 const Value *RetVal = Ret->getOperand(0), *CallVal = I; 565 SmallVector<unsigned, 4> RetPath, CallPath; 566 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes; 567 568 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath); 569 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath); 570 571 // Nothing's actually returned, it doesn't matter what the callee put there 572 // it's a valid tail call. 573 if (RetEmpty) 574 return true; 575 576 // Iterate pairwise through each of the value types making up the tail call 577 // and the corresponding return. For each one we want to know whether it's 578 // essentially going directly from the tail call to the ret, via operations 579 // that end up not generating any code. 580 // 581 // We allow a certain amount of covariance here. For example it's permitted 582 // for the tail call to define more bits than the ret actually cares about 583 // (e.g. via a truncate). 584 do { 585 if (CallEmpty) { 586 // We've exhausted the values produced by the tail call instruction, the 587 // rest are essentially undef. The type doesn't really matter, but we need 588 // *something*. 589 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back()); 590 CallVal = UndefValue::get(SlotType); 591 } 592 593 // The manipulations performed when we're looking through an insertvalue or 594 // an extractvalue would happen at the front of the RetPath list, so since 595 // we have to copy it anyway it's more efficient to create a reversed copy. 596 using std::copy; 597 SmallVector<unsigned, 4> TmpRetPath, TmpCallPath; 598 copy(RetPath.rbegin(), RetPath.rend(), std::back_inserter(TmpRetPath)); 599 copy(CallPath.rbegin(), CallPath.rend(), std::back_inserter(TmpCallPath)); 600 601 // Finally, we can check whether the value produced by the tail call at this 602 // index is compatible with the value we return. 603 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath, 604 AllowDifferingSizes, TLI)) 605 return false; 606 607 CallEmpty = !nextRealType(CallSubTypes, CallPath); 608 } while(nextRealType(RetSubTypes, RetPath)); 609 610 return true; 611 } 612 613 bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) { 614 if (!GV->hasLinkOnceODRLinkage()) 615 return false; 616 617 if (GV->hasUnnamedAddr()) 618 return true; 619 620 // If it is a non constant variable, it needs to be uniqued across shared 621 // objects. 622 if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) { 623 if (!Var->isConstant()) 624 return false; 625 } 626 627 // An alias can point to a variable. We could try to resolve the alias to 628 // decide, but for now just don't hide them. 629 if (isa<GlobalAlias>(GV)) 630 return false; 631 632 GlobalStatus GS; 633 if (GlobalStatus::analyzeGlobal(GV, GS)) 634 return false; 635 636 return !GS.IsCompared; 637 } 638