1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines several CodeGen-specific LLVM IR analysis utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/Analysis.h" 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/CodeGen/TargetInstrInfo.h" 17 #include "llvm/CodeGen/TargetLowering.h" 18 #include "llvm/CodeGen/TargetSubtargetInfo.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/Module.h" 26 #include "llvm/Support/ErrorHandling.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Transforms/Utils/GlobalStatus.h" 29 30 using namespace llvm; 31 32 /// Compute the linearized index of a member in a nested aggregate/struct/array 33 /// by recursing and accumulating CurIndex as long as there are indices in the 34 /// index list. 35 unsigned llvm::ComputeLinearIndex(Type *Ty, 36 const unsigned *Indices, 37 const unsigned *IndicesEnd, 38 unsigned CurIndex) { 39 // Base case: We're done. 40 if (Indices && Indices == IndicesEnd) 41 return CurIndex; 42 43 // Given a struct type, recursively traverse the elements. 44 if (StructType *STy = dyn_cast<StructType>(Ty)) { 45 for (StructType::element_iterator EB = STy->element_begin(), 46 EI = EB, 47 EE = STy->element_end(); 48 EI != EE; ++EI) { 49 if (Indices && *Indices == unsigned(EI - EB)) 50 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex); 51 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex); 52 } 53 assert(!Indices && "Unexpected out of bound"); 54 return CurIndex; 55 } 56 // Given an array type, recursively traverse the elements. 57 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 58 Type *EltTy = ATy->getElementType(); 59 unsigned NumElts = ATy->getNumElements(); 60 // Compute the Linear offset when jumping one element of the array 61 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0); 62 if (Indices) { 63 assert(*Indices < NumElts && "Unexpected out of bound"); 64 // If the indice is inside the array, compute the index to the requested 65 // elt and recurse inside the element with the end of the indices list 66 CurIndex += EltLinearOffset* *Indices; 67 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex); 68 } 69 CurIndex += EltLinearOffset*NumElts; 70 return CurIndex; 71 } 72 // We haven't found the type we're looking for, so keep searching. 73 return CurIndex + 1; 74 } 75 76 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 77 /// EVTs that represent all the individual underlying 78 /// non-aggregate types that comprise it. 79 /// 80 /// If Offsets is non-null, it points to a vector to be filled in 81 /// with the in-memory offsets of each of the individual values. 82 /// 83 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, 84 Type *Ty, SmallVectorImpl<EVT> &ValueVTs, 85 SmallVectorImpl<uint64_t> *Offsets, 86 uint64_t StartingOffset) { 87 // Given a struct type, recursively traverse the elements. 88 if (StructType *STy = dyn_cast<StructType>(Ty)) { 89 const StructLayout *SL = DL.getStructLayout(STy); 90 for (StructType::element_iterator EB = STy->element_begin(), 91 EI = EB, 92 EE = STy->element_end(); 93 EI != EE; ++EI) 94 ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets, 95 StartingOffset + SL->getElementOffset(EI - EB)); 96 return; 97 } 98 // Given an array type, recursively traverse the elements. 99 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 100 Type *EltTy = ATy->getElementType(); 101 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 102 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 103 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets, 104 StartingOffset + i * EltSize); 105 return; 106 } 107 // Interpret void as zero return values. 108 if (Ty->isVoidTy()) 109 return; 110 // Base case: we can get an EVT for this LLVM IR type. 111 ValueVTs.push_back(TLI.getValueType(DL, Ty)); 112 if (Offsets) 113 Offsets->push_back(StartingOffset); 114 } 115 116 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty, 117 SmallVectorImpl<LLT> &ValueTys, 118 SmallVectorImpl<uint64_t> *Offsets, 119 uint64_t StartingOffset) { 120 // Given a struct type, recursively traverse the elements. 121 if (StructType *STy = dyn_cast<StructType>(&Ty)) { 122 const StructLayout *SL = DL.getStructLayout(STy); 123 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) 124 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, 125 StartingOffset + SL->getElementOffset(I)); 126 return; 127 } 128 // Given an array type, recursively traverse the elements. 129 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) { 130 Type *EltTy = ATy->getElementType(); 131 uint64_t EltSize = DL.getTypeAllocSize(EltTy); 132 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 133 computeValueLLTs(DL, *EltTy, ValueTys, Offsets, 134 StartingOffset + i * EltSize); 135 return; 136 } 137 // Interpret void as zero return values. 138 if (Ty.isVoidTy()) 139 return; 140 // Base case: we can get an LLT for this LLVM IR type. 141 ValueTys.push_back(getLLTForType(Ty, DL)); 142 if (Offsets != nullptr) 143 Offsets->push_back(StartingOffset * 8); 144 } 145 146 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 147 GlobalValue *llvm::ExtractTypeInfo(Value *V) { 148 V = V->stripPointerCasts(); 149 GlobalValue *GV = dyn_cast<GlobalValue>(V); 150 GlobalVariable *Var = dyn_cast<GlobalVariable>(V); 151 152 if (Var && Var->getName() == "llvm.eh.catch.all.value") { 153 assert(Var->hasInitializer() && 154 "The EH catch-all value must have an initializer"); 155 Value *Init = Var->getInitializer(); 156 GV = dyn_cast<GlobalValue>(Init); 157 if (!GV) V = cast<ConstantPointerNull>(Init); 158 } 159 160 assert((GV || isa<ConstantPointerNull>(V)) && 161 "TypeInfo must be a global variable or NULL"); 162 return GV; 163 } 164 165 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being 166 /// processed uses a memory 'm' constraint. 167 bool 168 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, 169 const TargetLowering &TLI) { 170 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) { 171 InlineAsm::ConstraintInfo &CI = CInfos[i]; 172 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) { 173 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]); 174 if (CType == TargetLowering::C_Memory) 175 return true; 176 } 177 178 // Indirect operand accesses access memory. 179 if (CI.isIndirect) 180 return true; 181 } 182 183 return false; 184 } 185 186 /// getFCmpCondCode - Return the ISD condition code corresponding to 187 /// the given LLVM IR floating-point condition code. This includes 188 /// consideration of global floating-point math flags. 189 /// 190 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { 191 switch (Pred) { 192 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE; 193 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ; 194 case FCmpInst::FCMP_OGT: return ISD::SETOGT; 195 case FCmpInst::FCMP_OGE: return ISD::SETOGE; 196 case FCmpInst::FCMP_OLT: return ISD::SETOLT; 197 case FCmpInst::FCMP_OLE: return ISD::SETOLE; 198 case FCmpInst::FCMP_ONE: return ISD::SETONE; 199 case FCmpInst::FCMP_ORD: return ISD::SETO; 200 case FCmpInst::FCMP_UNO: return ISD::SETUO; 201 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ; 202 case FCmpInst::FCMP_UGT: return ISD::SETUGT; 203 case FCmpInst::FCMP_UGE: return ISD::SETUGE; 204 case FCmpInst::FCMP_ULT: return ISD::SETULT; 205 case FCmpInst::FCMP_ULE: return ISD::SETULE; 206 case FCmpInst::FCMP_UNE: return ISD::SETUNE; 207 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE; 208 default: llvm_unreachable("Invalid FCmp predicate opcode!"); 209 } 210 } 211 212 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) { 213 switch (CC) { 214 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ; 215 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE; 216 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT; 217 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE; 218 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT; 219 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE; 220 default: return CC; 221 } 222 } 223 224 /// getICmpCondCode - Return the ISD condition code corresponding to 225 /// the given LLVM IR integer condition code. 226 /// 227 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { 228 switch (Pred) { 229 case ICmpInst::ICMP_EQ: return ISD::SETEQ; 230 case ICmpInst::ICMP_NE: return ISD::SETNE; 231 case ICmpInst::ICMP_SLE: return ISD::SETLE; 232 case ICmpInst::ICMP_ULE: return ISD::SETULE; 233 case ICmpInst::ICMP_SGE: return ISD::SETGE; 234 case ICmpInst::ICMP_UGE: return ISD::SETUGE; 235 case ICmpInst::ICMP_SLT: return ISD::SETLT; 236 case ICmpInst::ICMP_ULT: return ISD::SETULT; 237 case ICmpInst::ICMP_SGT: return ISD::SETGT; 238 case ICmpInst::ICMP_UGT: return ISD::SETUGT; 239 default: 240 llvm_unreachable("Invalid ICmp predicate opcode!"); 241 } 242 } 243 244 static bool isNoopBitcast(Type *T1, Type *T2, 245 const TargetLoweringBase& TLI) { 246 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) || 247 (isa<VectorType>(T1) && isa<VectorType>(T2) && 248 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2))); 249 } 250 251 /// Look through operations that will be free to find the earliest source of 252 /// this value. 253 /// 254 /// @param ValLoc If V has aggegate type, we will be interested in a particular 255 /// scalar component. This records its address; the reverse of this list gives a 256 /// sequence of indices appropriate for an extractvalue to locate the important 257 /// value. This value is updated during the function and on exit will indicate 258 /// similar information for the Value returned. 259 /// 260 /// @param DataBits If this function looks through truncate instructions, this 261 /// will record the smallest size attained. 262 static const Value *getNoopInput(const Value *V, 263 SmallVectorImpl<unsigned> &ValLoc, 264 unsigned &DataBits, 265 const TargetLoweringBase &TLI, 266 const DataLayout &DL) { 267 while (true) { 268 // Try to look through V1; if V1 is not an instruction, it can't be looked 269 // through. 270 const Instruction *I = dyn_cast<Instruction>(V); 271 if (!I || I->getNumOperands() == 0) return V; 272 const Value *NoopInput = nullptr; 273 274 Value *Op = I->getOperand(0); 275 if (isa<BitCastInst>(I)) { 276 // Look through truly no-op bitcasts. 277 if (isNoopBitcast(Op->getType(), I->getType(), TLI)) 278 NoopInput = Op; 279 } else if (isa<GetElementPtrInst>(I)) { 280 // Look through getelementptr 281 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices()) 282 NoopInput = Op; 283 } else if (isa<IntToPtrInst>(I)) { 284 // Look through inttoptr. 285 // Make sure this isn't a truncating or extending cast. We could 286 // support this eventually, but don't bother for now. 287 if (!isa<VectorType>(I->getType()) && 288 DL.getPointerSizeInBits() == 289 cast<IntegerType>(Op->getType())->getBitWidth()) 290 NoopInput = Op; 291 } else if (isa<PtrToIntInst>(I)) { 292 // Look through ptrtoint. 293 // Make sure this isn't a truncating or extending cast. We could 294 // support this eventually, but don't bother for now. 295 if (!isa<VectorType>(I->getType()) && 296 DL.getPointerSizeInBits() == 297 cast<IntegerType>(I->getType())->getBitWidth()) 298 NoopInput = Op; 299 } else if (isa<TruncInst>(I) && 300 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) { 301 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits()); 302 NoopInput = Op; 303 } else if (auto CS = ImmutableCallSite(I)) { 304 const Value *ReturnedOp = CS.getReturnedArgOperand(); 305 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI)) 306 NoopInput = ReturnedOp; 307 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) { 308 // Value may come from either the aggregate or the scalar 309 ArrayRef<unsigned> InsertLoc = IVI->getIndices(); 310 if (ValLoc.size() >= InsertLoc.size() && 311 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) { 312 // The type being inserted is a nested sub-type of the aggregate; we 313 // have to remove those initial indices to get the location we're 314 // interested in for the operand. 315 ValLoc.resize(ValLoc.size() - InsertLoc.size()); 316 NoopInput = IVI->getInsertedValueOperand(); 317 } else { 318 // The struct we're inserting into has the value we're interested in, no 319 // change of address. 320 NoopInput = Op; 321 } 322 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) { 323 // The part we're interested in will inevitably be some sub-section of the 324 // previous aggregate. Combine the two paths to obtain the true address of 325 // our element. 326 ArrayRef<unsigned> ExtractLoc = EVI->getIndices(); 327 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend()); 328 NoopInput = Op; 329 } 330 // Terminate if we couldn't find anything to look through. 331 if (!NoopInput) 332 return V; 333 334 V = NoopInput; 335 } 336 } 337 338 /// Return true if this scalar return value only has bits discarded on its path 339 /// from the "tail call" to the "ret". This includes the obvious noop 340 /// instructions handled by getNoopInput above as well as free truncations (or 341 /// extensions prior to the call). 342 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, 343 SmallVectorImpl<unsigned> &RetIndices, 344 SmallVectorImpl<unsigned> &CallIndices, 345 bool AllowDifferingSizes, 346 const TargetLoweringBase &TLI, 347 const DataLayout &DL) { 348 349 // Trace the sub-value needed by the return value as far back up the graph as 350 // possible, in the hope that it will intersect with the value produced by the 351 // call. In the simple case with no "returned" attribute, the hope is actually 352 // that we end up back at the tail call instruction itself. 353 unsigned BitsRequired = UINT_MAX; 354 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL); 355 356 // If this slot in the value returned is undef, it doesn't matter what the 357 // call puts there, it'll be fine. 358 if (isa<UndefValue>(RetVal)) 359 return true; 360 361 // Now do a similar search up through the graph to find where the value 362 // actually returned by the "tail call" comes from. In the simple case without 363 // a "returned" attribute, the search will be blocked immediately and the loop 364 // a Noop. 365 unsigned BitsProvided = UINT_MAX; 366 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL); 367 368 // There's no hope if we can't actually trace them to (the same part of!) the 369 // same value. 370 if (CallVal != RetVal || CallIndices != RetIndices) 371 return false; 372 373 // However, intervening truncates may have made the call non-tail. Make sure 374 // all the bits that are needed by the "ret" have been provided by the "tail 375 // call". FIXME: with sufficiently cunning bit-tracking, we could look through 376 // extensions too. 377 if (BitsProvided < BitsRequired || 378 (!AllowDifferingSizes && BitsProvided != BitsRequired)) 379 return false; 380 381 return true; 382 } 383 384 /// For an aggregate type, determine whether a given index is within bounds or 385 /// not. 386 static bool indexReallyValid(CompositeType *T, unsigned Idx) { 387 if (ArrayType *AT = dyn_cast<ArrayType>(T)) 388 return Idx < AT->getNumElements(); 389 390 return Idx < cast<StructType>(T)->getNumElements(); 391 } 392 393 /// Move the given iterators to the next leaf type in depth first traversal. 394 /// 395 /// Performs a depth-first traversal of the type as specified by its arguments, 396 /// stopping at the next leaf node (which may be a legitimate scalar type or an 397 /// empty struct or array). 398 /// 399 /// @param SubTypes List of the partial components making up the type from 400 /// outermost to innermost non-empty aggregate. The element currently 401 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1). 402 /// 403 /// @param Path Set of extractvalue indices leading from the outermost type 404 /// (SubTypes[0]) to the leaf node currently represented. 405 /// 406 /// @returns true if a new type was found, false otherwise. Calling this 407 /// function again on a finished iterator will repeatedly return 408 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty 409 /// aggregate or a non-aggregate 410 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes, 411 SmallVectorImpl<unsigned> &Path) { 412 // First march back up the tree until we can successfully increment one of the 413 // coordinates in Path. 414 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) { 415 Path.pop_back(); 416 SubTypes.pop_back(); 417 } 418 419 // If we reached the top, then the iterator is done. 420 if (Path.empty()) 421 return false; 422 423 // We know there's *some* valid leaf now, so march back down the tree picking 424 // out the left-most element at each node. 425 ++Path.back(); 426 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back()); 427 while (DeeperType->isAggregateType()) { 428 CompositeType *CT = cast<CompositeType>(DeeperType); 429 if (!indexReallyValid(CT, 0)) 430 return true; 431 432 SubTypes.push_back(CT); 433 Path.push_back(0); 434 435 DeeperType = CT->getTypeAtIndex(0U); 436 } 437 438 return true; 439 } 440 441 /// Find the first non-empty, scalar-like type in Next and setup the iterator 442 /// components. 443 /// 444 /// Assuming Next is an aggregate of some kind, this function will traverse the 445 /// tree from left to right (i.e. depth-first) looking for the first 446 /// non-aggregate type which will play a role in function return. 447 /// 448 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup 449 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first 450 /// i32 in that type. 451 static bool firstRealType(Type *Next, 452 SmallVectorImpl<CompositeType *> &SubTypes, 453 SmallVectorImpl<unsigned> &Path) { 454 // First initialise the iterator components to the first "leaf" node 455 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf 456 // despite nominally being an aggregate). 457 while (Next->isAggregateType() && 458 indexReallyValid(cast<CompositeType>(Next), 0)) { 459 SubTypes.push_back(cast<CompositeType>(Next)); 460 Path.push_back(0); 461 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U); 462 } 463 464 // If there's no Path now, Next was originally scalar already (or empty 465 // leaf). We're done. 466 if (Path.empty()) 467 return true; 468 469 // Otherwise, use normal iteration to keep looking through the tree until we 470 // find a non-aggregate type. 471 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) { 472 if (!advanceToNextLeafType(SubTypes, Path)) 473 return false; 474 } 475 476 return true; 477 } 478 479 /// Set the iterator data-structures to the next non-empty, non-aggregate 480 /// subtype. 481 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes, 482 SmallVectorImpl<unsigned> &Path) { 483 do { 484 if (!advanceToNextLeafType(SubTypes, Path)) 485 return false; 486 487 assert(!Path.empty() && "found a leaf but didn't set the path?"); 488 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()); 489 490 return true; 491 } 492 493 494 /// Test if the given instruction is in a position to be optimized 495 /// with a tail-call. This roughly means that it's in a block with 496 /// a return and there's nothing that needs to be scheduled 497 /// between it and the return. 498 /// 499 /// This function only tests target-independent requirements. 500 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) { 501 const Instruction *I = CS.getInstruction(); 502 const BasicBlock *ExitBB = I->getParent(); 503 const Instruction *Term = ExitBB->getTerminator(); 504 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); 505 506 // The block must end in a return statement or unreachable. 507 // 508 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in 509 // an unreachable, for now. The way tailcall optimization is currently 510 // implemented means it will add an epilogue followed by a jump. That is 511 // not profitable. Also, if the callee is a special function (e.g. 512 // longjmp on x86), it can end up causing miscompilation that has not 513 // been fully understood. 514 if (!Ret && 515 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) 516 return false; 517 518 // If I will have a chain, make sure no other instruction that will have a 519 // chain interposes between I and the return. 520 if (I->mayHaveSideEffects() || I->mayReadFromMemory() || 521 !isSafeToSpeculativelyExecute(I)) 522 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) { 523 if (&*BBI == I) 524 break; 525 // Debug info intrinsics do not get in the way of tail call optimization. 526 if (isa<DbgInfoIntrinsic>(BBI)) 527 continue; 528 // A lifetime end intrinsic should not stop tail call optimization. 529 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI)) 530 if (II->getIntrinsicID() == Intrinsic::lifetime_end) 531 continue; 532 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || 533 !isSafeToSpeculativelyExecute(&*BBI)) 534 return false; 535 } 536 537 const Function *F = ExitBB->getParent(); 538 return returnTypeIsEligibleForTailCall( 539 F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering()); 540 } 541 542 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I, 543 const ReturnInst *Ret, 544 const TargetLoweringBase &TLI, 545 bool *AllowDifferingSizes) { 546 // ADS may be null, so don't write to it directly. 547 bool DummyADS; 548 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS; 549 ADS = true; 550 551 AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex); 552 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(), 553 AttributeList::ReturnIndex); 554 555 // NoAlias and NonNull are completely benign as far as calling convention 556 // goes, they shouldn't affect whether the call is a tail call. 557 CallerAttrs.removeAttribute(Attribute::NoAlias); 558 CalleeAttrs.removeAttribute(Attribute::NoAlias); 559 CallerAttrs.removeAttribute(Attribute::NonNull); 560 CalleeAttrs.removeAttribute(Attribute::NonNull); 561 562 if (CallerAttrs.contains(Attribute::ZExt)) { 563 if (!CalleeAttrs.contains(Attribute::ZExt)) 564 return false; 565 566 ADS = false; 567 CallerAttrs.removeAttribute(Attribute::ZExt); 568 CalleeAttrs.removeAttribute(Attribute::ZExt); 569 } else if (CallerAttrs.contains(Attribute::SExt)) { 570 if (!CalleeAttrs.contains(Attribute::SExt)) 571 return false; 572 573 ADS = false; 574 CallerAttrs.removeAttribute(Attribute::SExt); 575 CalleeAttrs.removeAttribute(Attribute::SExt); 576 } 577 578 // Drop sext and zext return attributes if the result is not used. 579 // This enables tail calls for code like: 580 // 581 // define void @caller() { 582 // entry: 583 // %unused_result = tail call zeroext i1 @callee() 584 // br label %retlabel 585 // retlabel: 586 // ret void 587 // } 588 if (I->use_empty()) { 589 CalleeAttrs.removeAttribute(Attribute::SExt); 590 CalleeAttrs.removeAttribute(Attribute::ZExt); 591 } 592 593 // If they're still different, there's some facet we don't understand 594 // (currently only "inreg", but in future who knows). It may be OK but the 595 // only safe option is to reject the tail call. 596 return CallerAttrs == CalleeAttrs; 597 } 598 599 bool llvm::returnTypeIsEligibleForTailCall(const Function *F, 600 const Instruction *I, 601 const ReturnInst *Ret, 602 const TargetLoweringBase &TLI) { 603 // If the block ends with a void return or unreachable, it doesn't matter 604 // what the call's return type is. 605 if (!Ret || Ret->getNumOperands() == 0) return true; 606 607 // If the return value is undef, it doesn't matter what the call's 608 // return type is. 609 if (isa<UndefValue>(Ret->getOperand(0))) return true; 610 611 // Make sure the attributes attached to each return are compatible. 612 bool AllowDifferingSizes; 613 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes)) 614 return false; 615 616 const Value *RetVal = Ret->getOperand(0), *CallVal = I; 617 // Intrinsic like llvm.memcpy has no return value, but the expanded 618 // libcall may or may not have return value. On most platforms, it 619 // will be expanded as memcpy in libc, which returns the first 620 // argument. On other platforms like arm-none-eabi, memcpy may be 621 // expanded as library call without return value, like __aeabi_memcpy. 622 const CallInst *Call = cast<CallInst>(I); 623 if (Function *F = Call->getCalledFunction()) { 624 Intrinsic::ID IID = F->getIntrinsicID(); 625 if (((IID == Intrinsic::memcpy && 626 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) || 627 (IID == Intrinsic::memmove && 628 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) || 629 (IID == Intrinsic::memset && 630 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) && 631 RetVal == Call->getArgOperand(0)) 632 return true; 633 } 634 635 SmallVector<unsigned, 4> RetPath, CallPath; 636 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes; 637 638 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath); 639 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath); 640 641 // Nothing's actually returned, it doesn't matter what the callee put there 642 // it's a valid tail call. 643 if (RetEmpty) 644 return true; 645 646 // Iterate pairwise through each of the value types making up the tail call 647 // and the corresponding return. For each one we want to know whether it's 648 // essentially going directly from the tail call to the ret, via operations 649 // that end up not generating any code. 650 // 651 // We allow a certain amount of covariance here. For example it's permitted 652 // for the tail call to define more bits than the ret actually cares about 653 // (e.g. via a truncate). 654 do { 655 if (CallEmpty) { 656 // We've exhausted the values produced by the tail call instruction, the 657 // rest are essentially undef. The type doesn't really matter, but we need 658 // *something*. 659 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back()); 660 CallVal = UndefValue::get(SlotType); 661 } 662 663 // The manipulations performed when we're looking through an insertvalue or 664 // an extractvalue would happen at the front of the RetPath list, so since 665 // we have to copy it anyway it's more efficient to create a reversed copy. 666 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend()); 667 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend()); 668 669 // Finally, we can check whether the value produced by the tail call at this 670 // index is compatible with the value we return. 671 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath, 672 AllowDifferingSizes, TLI, 673 F->getParent()->getDataLayout())) 674 return false; 675 676 CallEmpty = !nextRealType(CallSubTypes, CallPath); 677 } while(nextRealType(RetSubTypes, RetPath)); 678 679 return true; 680 } 681 682 static void collectEHScopeMembers( 683 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope, 684 const MachineBasicBlock *MBB) { 685 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB}; 686 while (!Worklist.empty()) { 687 const MachineBasicBlock *Visiting = Worklist.pop_back_val(); 688 // Don't follow blocks which start new scopes. 689 if (Visiting->isEHPad() && Visiting != MBB) 690 continue; 691 692 // Add this MBB to our scope. 693 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope)); 694 695 // Don't revisit blocks. 696 if (!P.second) { 697 assert(P.first->second == EHScope && "MBB is part of two scopes!"); 698 continue; 699 } 700 701 // Returns are boundaries where scope transfer can occur, don't follow 702 // successors. 703 if (Visiting->isEHScopeReturnBlock()) 704 continue; 705 706 for (const MachineBasicBlock *Succ : Visiting->successors()) 707 Worklist.push_back(Succ); 708 } 709 } 710 711 DenseMap<const MachineBasicBlock *, int> 712 llvm::getEHScopeMembership(const MachineFunction &MF) { 713 DenseMap<const MachineBasicBlock *, int> EHScopeMembership; 714 715 // We don't have anything to do if there aren't any EH pads. 716 if (!MF.hasEHScopes()) 717 return EHScopeMembership; 718 719 int EntryBBNumber = MF.front().getNumber(); 720 bool IsSEH = isAsynchronousEHPersonality( 721 classifyEHPersonality(MF.getFunction().getPersonalityFn())); 722 723 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 724 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks; 725 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks; 726 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads; 727 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors; 728 for (const MachineBasicBlock &MBB : MF) { 729 if (MBB.isEHScopeEntry()) { 730 EHScopeBlocks.push_back(&MBB); 731 } else if (IsSEH && MBB.isEHPad()) { 732 SEHCatchPads.push_back(&MBB); 733 } else if (MBB.pred_empty()) { 734 UnreachableBlocks.push_back(&MBB); 735 } 736 737 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 738 739 // CatchPads are not scopes for SEH so do not consider CatchRet to 740 // transfer control to another scope. 741 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode()) 742 continue; 743 744 // FIXME: SEH CatchPads are not necessarily in the parent function: 745 // they could be inside a finally block. 746 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB(); 747 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB(); 748 CatchRetSuccessors.push_back( 749 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()}); 750 } 751 752 // We don't have anything to do if there aren't any EH pads. 753 if (EHScopeBlocks.empty()) 754 return EHScopeMembership; 755 756 // Identify all the basic blocks reachable from the function entry. 757 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front()); 758 // All blocks not part of a scope are in the parent function. 759 for (const MachineBasicBlock *MBB : UnreachableBlocks) 760 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB); 761 // Next, identify all the blocks inside the scopes. 762 for (const MachineBasicBlock *MBB : EHScopeBlocks) 763 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB); 764 // SEH CatchPads aren't really scopes, handle them separately. 765 for (const MachineBasicBlock *MBB : SEHCatchPads) 766 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB); 767 // Finally, identify all the targets of a catchret. 768 for (std::pair<const MachineBasicBlock *, int> CatchRetPair : 769 CatchRetSuccessors) 770 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second, 771 CatchRetPair.first); 772 return EHScopeMembership; 773 } 774