1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Transforms/Utils/GlobalStatus.h"
30 
31 using namespace llvm;
32 
33 /// Compute the linearized index of a member in a nested aggregate/struct/array
34 /// by recursing and accumulating CurIndex as long as there are indices in the
35 /// index list.
36 unsigned llvm::ComputeLinearIndex(Type *Ty,
37                                   const unsigned *Indices,
38                                   const unsigned *IndicesEnd,
39                                   unsigned CurIndex) {
40   // Base case: We're done.
41   if (Indices && Indices == IndicesEnd)
42     return CurIndex;
43 
44   // Given a struct type, recursively traverse the elements.
45   if (StructType *STy = dyn_cast<StructType>(Ty)) {
46     for (StructType::element_iterator EB = STy->element_begin(),
47                                       EI = EB,
48                                       EE = STy->element_end();
49         EI != EE; ++EI) {
50       if (Indices && *Indices == unsigned(EI - EB))
51         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
52       CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
53     }
54     assert(!Indices && "Unexpected out of bound");
55     return CurIndex;
56   }
57   // Given an array type, recursively traverse the elements.
58   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
59     Type *EltTy = ATy->getElementType();
60     unsigned NumElts = ATy->getNumElements();
61     // Compute the Linear offset when jumping one element of the array
62     unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
63     if (Indices) {
64       assert(*Indices < NumElts && "Unexpected out of bound");
65       // If the indice is inside the array, compute the index to the requested
66       // elt and recurse inside the element with the end of the indices list
67       CurIndex += EltLinearOffset* *Indices;
68       return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
69     }
70     CurIndex += EltLinearOffset*NumElts;
71     return CurIndex;
72   }
73   // We haven't found the type we're looking for, so keep searching.
74   return CurIndex + 1;
75 }
76 
77 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
78 /// EVTs that represent all the individual underlying
79 /// non-aggregate types that comprise it.
80 ///
81 /// If Offsets is non-null, it points to a vector to be filled in
82 /// with the in-memory offsets of each of the individual values.
83 ///
84 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
85                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
86                            SmallVectorImpl<EVT> *MemVTs,
87                            SmallVectorImpl<uint64_t> *Offsets,
88                            uint64_t StartingOffset) {
89   // Given a struct type, recursively traverse the elements.
90   if (StructType *STy = dyn_cast<StructType>(Ty)) {
91     const StructLayout *SL = DL.getStructLayout(STy);
92     for (StructType::element_iterator EB = STy->element_begin(),
93                                       EI = EB,
94                                       EE = STy->element_end();
95          EI != EE; ++EI)
96       ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
97                       StartingOffset + SL->getElementOffset(EI - EB));
98     return;
99   }
100   // Given an array type, recursively traverse the elements.
101   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
102     Type *EltTy = ATy->getElementType();
103     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
104     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
105       ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
106                       StartingOffset + i * EltSize);
107     return;
108   }
109   // Interpret void as zero return values.
110   if (Ty->isVoidTy())
111     return;
112   // Base case: we can get an EVT for this LLVM IR type.
113   ValueVTs.push_back(TLI.getValueType(DL, Ty));
114   if (MemVTs)
115     MemVTs->push_back(TLI.getMemValueType(DL, Ty));
116   if (Offsets)
117     Offsets->push_back(StartingOffset);
118 }
119 
120 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
121                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
122                            SmallVectorImpl<uint64_t> *Offsets,
123                            uint64_t StartingOffset) {
124   return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
125                          StartingOffset);
126 }
127 
128 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
129                             SmallVectorImpl<LLT> &ValueTys,
130                             SmallVectorImpl<uint64_t> *Offsets,
131                             uint64_t StartingOffset) {
132   // Given a struct type, recursively traverse the elements.
133   if (StructType *STy = dyn_cast<StructType>(&Ty)) {
134     const StructLayout *SL = DL.getStructLayout(STy);
135     for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
136       computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
137                        StartingOffset + SL->getElementOffset(I));
138     return;
139   }
140   // Given an array type, recursively traverse the elements.
141   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
142     Type *EltTy = ATy->getElementType();
143     uint64_t EltSize = DL.getTypeAllocSize(EltTy);
144     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
145       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
146                        StartingOffset + i * EltSize);
147     return;
148   }
149   // Interpret void as zero return values.
150   if (Ty.isVoidTy())
151     return;
152   // Base case: we can get an LLT for this LLVM IR type.
153   ValueTys.push_back(getLLTForType(Ty, DL));
154   if (Offsets != nullptr)
155     Offsets->push_back(StartingOffset * 8);
156 }
157 
158 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
159 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
160   V = V->stripPointerCasts();
161   GlobalValue *GV = dyn_cast<GlobalValue>(V);
162   GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
163 
164   if (Var && Var->getName() == "llvm.eh.catch.all.value") {
165     assert(Var->hasInitializer() &&
166            "The EH catch-all value must have an initializer");
167     Value *Init = Var->getInitializer();
168     GV = dyn_cast<GlobalValue>(Init);
169     if (!GV) V = cast<ConstantPointerNull>(Init);
170   }
171 
172   assert((GV || isa<ConstantPointerNull>(V)) &&
173          "TypeInfo must be a global variable or NULL");
174   return GV;
175 }
176 
177 /// getFCmpCondCode - Return the ISD condition code corresponding to
178 /// the given LLVM IR floating-point condition code.  This includes
179 /// consideration of global floating-point math flags.
180 ///
181 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
182   switch (Pred) {
183   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
184   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
185   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
186   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
187   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
188   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
189   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
190   case FCmpInst::FCMP_ORD:   return ISD::SETO;
191   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
192   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
193   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
194   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
195   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
196   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
197   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
198   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
199   default: llvm_unreachable("Invalid FCmp predicate opcode!");
200   }
201 }
202 
203 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
204   switch (CC) {
205     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
206     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
207     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
208     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
209     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
210     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
211     default: return CC;
212   }
213 }
214 
215 /// getICmpCondCode - Return the ISD condition code corresponding to
216 /// the given LLVM IR integer condition code.
217 ///
218 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
219   switch (Pred) {
220   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
221   case ICmpInst::ICMP_NE:  return ISD::SETNE;
222   case ICmpInst::ICMP_SLE: return ISD::SETLE;
223   case ICmpInst::ICMP_ULE: return ISD::SETULE;
224   case ICmpInst::ICMP_SGE: return ISD::SETGE;
225   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
226   case ICmpInst::ICMP_SLT: return ISD::SETLT;
227   case ICmpInst::ICMP_ULT: return ISD::SETULT;
228   case ICmpInst::ICMP_SGT: return ISD::SETGT;
229   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
230   default:
231     llvm_unreachable("Invalid ICmp predicate opcode!");
232   }
233 }
234 
235 static bool isNoopBitcast(Type *T1, Type *T2,
236                           const TargetLoweringBase& TLI) {
237   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
238          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
239           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
240 }
241 
242 /// Look through operations that will be free to find the earliest source of
243 /// this value.
244 ///
245 /// @param ValLoc If V has aggregate type, we will be interested in a particular
246 /// scalar component. This records its address; the reverse of this list gives a
247 /// sequence of indices appropriate for an extractvalue to locate the important
248 /// value. This value is updated during the function and on exit will indicate
249 /// similar information for the Value returned.
250 ///
251 /// @param DataBits If this function looks through truncate instructions, this
252 /// will record the smallest size attained.
253 static const Value *getNoopInput(const Value *V,
254                                  SmallVectorImpl<unsigned> &ValLoc,
255                                  unsigned &DataBits,
256                                  const TargetLoweringBase &TLI,
257                                  const DataLayout &DL) {
258   while (true) {
259     // Try to look through V1; if V1 is not an instruction, it can't be looked
260     // through.
261     const Instruction *I = dyn_cast<Instruction>(V);
262     if (!I || I->getNumOperands() == 0) return V;
263     const Value *NoopInput = nullptr;
264 
265     Value *Op = I->getOperand(0);
266     if (isa<BitCastInst>(I)) {
267       // Look through truly no-op bitcasts.
268       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
269         NoopInput = Op;
270     } else if (isa<GetElementPtrInst>(I)) {
271       // Look through getelementptr
272       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
273         NoopInput = Op;
274     } else if (isa<IntToPtrInst>(I)) {
275       // Look through inttoptr.
276       // Make sure this isn't a truncating or extending cast.  We could
277       // support this eventually, but don't bother for now.
278       if (!isa<VectorType>(I->getType()) &&
279           DL.getPointerSizeInBits() ==
280               cast<IntegerType>(Op->getType())->getBitWidth())
281         NoopInput = Op;
282     } else if (isa<PtrToIntInst>(I)) {
283       // Look through ptrtoint.
284       // Make sure this isn't a truncating or extending cast.  We could
285       // support this eventually, but don't bother for now.
286       if (!isa<VectorType>(I->getType()) &&
287           DL.getPointerSizeInBits() ==
288               cast<IntegerType>(I->getType())->getBitWidth())
289         NoopInput = Op;
290     } else if (isa<TruncInst>(I) &&
291                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
292       DataBits = std::min((uint64_t)DataBits,
293                          I->getType()->getPrimitiveSizeInBits().getFixedSize());
294       NoopInput = Op;
295     } else if (auto *CB = dyn_cast<CallBase>(I)) {
296       const Value *ReturnedOp = CB->getReturnedArgOperand();
297       if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
298         NoopInput = ReturnedOp;
299     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
300       // Value may come from either the aggregate or the scalar
301       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
302       if (ValLoc.size() >= InsertLoc.size() &&
303           std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
304         // The type being inserted is a nested sub-type of the aggregate; we
305         // have to remove those initial indices to get the location we're
306         // interested in for the operand.
307         ValLoc.resize(ValLoc.size() - InsertLoc.size());
308         NoopInput = IVI->getInsertedValueOperand();
309       } else {
310         // The struct we're inserting into has the value we're interested in, no
311         // change of address.
312         NoopInput = Op;
313       }
314     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
315       // The part we're interested in will inevitably be some sub-section of the
316       // previous aggregate. Combine the two paths to obtain the true address of
317       // our element.
318       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
319       ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
320       NoopInput = Op;
321     }
322     // Terminate if we couldn't find anything to look through.
323     if (!NoopInput)
324       return V;
325 
326     V = NoopInput;
327   }
328 }
329 
330 /// Return true if this scalar return value only has bits discarded on its path
331 /// from the "tail call" to the "ret". This includes the obvious noop
332 /// instructions handled by getNoopInput above as well as free truncations (or
333 /// extensions prior to the call).
334 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
335                                  SmallVectorImpl<unsigned> &RetIndices,
336                                  SmallVectorImpl<unsigned> &CallIndices,
337                                  bool AllowDifferingSizes,
338                                  const TargetLoweringBase &TLI,
339                                  const DataLayout &DL) {
340 
341   // Trace the sub-value needed by the return value as far back up the graph as
342   // possible, in the hope that it will intersect with the value produced by the
343   // call. In the simple case with no "returned" attribute, the hope is actually
344   // that we end up back at the tail call instruction itself.
345   unsigned BitsRequired = UINT_MAX;
346   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
347 
348   // If this slot in the value returned is undef, it doesn't matter what the
349   // call puts there, it'll be fine.
350   if (isa<UndefValue>(RetVal))
351     return true;
352 
353   // Now do a similar search up through the graph to find where the value
354   // actually returned by the "tail call" comes from. In the simple case without
355   // a "returned" attribute, the search will be blocked immediately and the loop
356   // a Noop.
357   unsigned BitsProvided = UINT_MAX;
358   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
359 
360   // There's no hope if we can't actually trace them to (the same part of!) the
361   // same value.
362   if (CallVal != RetVal || CallIndices != RetIndices)
363     return false;
364 
365   // However, intervening truncates may have made the call non-tail. Make sure
366   // all the bits that are needed by the "ret" have been provided by the "tail
367   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
368   // extensions too.
369   if (BitsProvided < BitsRequired ||
370       (!AllowDifferingSizes && BitsProvided != BitsRequired))
371     return false;
372 
373   return true;
374 }
375 
376 /// For an aggregate type, determine whether a given index is within bounds or
377 /// not.
378 static bool indexReallyValid(Type *T, unsigned Idx) {
379   if (ArrayType *AT = dyn_cast<ArrayType>(T))
380     return Idx < AT->getNumElements();
381 
382   return Idx < cast<StructType>(T)->getNumElements();
383 }
384 
385 /// Move the given iterators to the next leaf type in depth first traversal.
386 ///
387 /// Performs a depth-first traversal of the type as specified by its arguments,
388 /// stopping at the next leaf node (which may be a legitimate scalar type or an
389 /// empty struct or array).
390 ///
391 /// @param SubTypes List of the partial components making up the type from
392 /// outermost to innermost non-empty aggregate. The element currently
393 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
394 ///
395 /// @param Path Set of extractvalue indices leading from the outermost type
396 /// (SubTypes[0]) to the leaf node currently represented.
397 ///
398 /// @returns true if a new type was found, false otherwise. Calling this
399 /// function again on a finished iterator will repeatedly return
400 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
401 /// aggregate or a non-aggregate
402 static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
403                                   SmallVectorImpl<unsigned> &Path) {
404   // First march back up the tree until we can successfully increment one of the
405   // coordinates in Path.
406   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
407     Path.pop_back();
408     SubTypes.pop_back();
409   }
410 
411   // If we reached the top, then the iterator is done.
412   if (Path.empty())
413     return false;
414 
415   // We know there's *some* valid leaf now, so march back down the tree picking
416   // out the left-most element at each node.
417   ++Path.back();
418   Type *DeeperType =
419       ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
420   while (DeeperType->isAggregateType()) {
421     if (!indexReallyValid(DeeperType, 0))
422       return true;
423 
424     SubTypes.push_back(DeeperType);
425     Path.push_back(0);
426 
427     DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
428   }
429 
430   return true;
431 }
432 
433 /// Find the first non-empty, scalar-like type in Next and setup the iterator
434 /// components.
435 ///
436 /// Assuming Next is an aggregate of some kind, this function will traverse the
437 /// tree from left to right (i.e. depth-first) looking for the first
438 /// non-aggregate type which will play a role in function return.
439 ///
440 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
441 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
442 /// i32 in that type.
443 static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
444                           SmallVectorImpl<unsigned> &Path) {
445   // First initialise the iterator components to the first "leaf" node
446   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
447   // despite nominally being an aggregate).
448   while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
449     SubTypes.push_back(Next);
450     Path.push_back(0);
451     Next = FirstInner;
452   }
453 
454   // If there's no Path now, Next was originally scalar already (or empty
455   // leaf). We're done.
456   if (Path.empty())
457     return true;
458 
459   // Otherwise, use normal iteration to keep looking through the tree until we
460   // find a non-aggregate type.
461   while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
462              ->isAggregateType()) {
463     if (!advanceToNextLeafType(SubTypes, Path))
464       return false;
465   }
466 
467   return true;
468 }
469 
470 /// Set the iterator data-structures to the next non-empty, non-aggregate
471 /// subtype.
472 static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
473                          SmallVectorImpl<unsigned> &Path) {
474   do {
475     if (!advanceToNextLeafType(SubTypes, Path))
476       return false;
477 
478     assert(!Path.empty() && "found a leaf but didn't set the path?");
479   } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
480                ->isAggregateType());
481 
482   return true;
483 }
484 
485 
486 /// Test if the given instruction is in a position to be optimized
487 /// with a tail-call. This roughly means that it's in a block with
488 /// a return and there's nothing that needs to be scheduled
489 /// between it and the return.
490 ///
491 /// This function only tests target-independent requirements.
492 bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
493   const BasicBlock *ExitBB = Call.getParent();
494   const Instruction *Term = ExitBB->getTerminator();
495   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
496 
497   // The block must end in a return statement or unreachable.
498   //
499   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
500   // an unreachable, for now. The way tailcall optimization is currently
501   // implemented means it will add an epilogue followed by a jump. That is
502   // not profitable. Also, if the callee is a special function (e.g.
503   // longjmp on x86), it can end up causing miscompilation that has not
504   // been fully understood.
505   if (!Ret &&
506       ((!TM.Options.GuaranteedTailCallOpt &&
507         Call.getCallingConv() != CallingConv::Tail) || !isa<UnreachableInst>(Term)))
508     return false;
509 
510   // If I will have a chain, make sure no other instruction that will have a
511   // chain interposes between I and the return.
512   // Check for all calls including speculatable functions.
513   for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
514     if (&*BBI == &Call)
515       break;
516     // Debug info intrinsics do not get in the way of tail call optimization.
517     if (isa<DbgInfoIntrinsic>(BBI))
518       continue;
519     // Pseudo probe intrinsics do not block tail call optimization either.
520     if (isa<PseudoProbeInst>(BBI))
521       continue;
522     // A lifetime end or assume intrinsic should not stop tail call
523     // optimization.
524     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
525       if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
526           II->getIntrinsicID() == Intrinsic::assume)
527         continue;
528     if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
529         !isSafeToSpeculativelyExecute(&*BBI))
530       return false;
531   }
532 
533   const Function *F = ExitBB->getParent();
534   return returnTypeIsEligibleForTailCall(
535       F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
536 }
537 
538 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
539                                     const ReturnInst *Ret,
540                                     const TargetLoweringBase &TLI,
541                                     bool *AllowDifferingSizes) {
542   // ADS may be null, so don't write to it directly.
543   bool DummyADS;
544   bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
545   ADS = true;
546 
547   AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
548   AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
549                           AttributeList::ReturnIndex);
550 
551   // Following attributes are completely benign as far as calling convention
552   // goes, they shouldn't affect whether the call is a tail call.
553   CallerAttrs.removeAttribute(Attribute::NoAlias);
554   CalleeAttrs.removeAttribute(Attribute::NoAlias);
555   CallerAttrs.removeAttribute(Attribute::NonNull);
556   CalleeAttrs.removeAttribute(Attribute::NonNull);
557   CallerAttrs.removeAttribute(Attribute::Dereferenceable);
558   CalleeAttrs.removeAttribute(Attribute::Dereferenceable);
559   CallerAttrs.removeAttribute(Attribute::DereferenceableOrNull);
560   CalleeAttrs.removeAttribute(Attribute::DereferenceableOrNull);
561 
562   if (CallerAttrs.contains(Attribute::ZExt)) {
563     if (!CalleeAttrs.contains(Attribute::ZExt))
564       return false;
565 
566     ADS = false;
567     CallerAttrs.removeAttribute(Attribute::ZExt);
568     CalleeAttrs.removeAttribute(Attribute::ZExt);
569   } else if (CallerAttrs.contains(Attribute::SExt)) {
570     if (!CalleeAttrs.contains(Attribute::SExt))
571       return false;
572 
573     ADS = false;
574     CallerAttrs.removeAttribute(Attribute::SExt);
575     CalleeAttrs.removeAttribute(Attribute::SExt);
576   }
577 
578   // Drop sext and zext return attributes if the result is not used.
579   // This enables tail calls for code like:
580   //
581   // define void @caller() {
582   // entry:
583   //   %unused_result = tail call zeroext i1 @callee()
584   //   br label %retlabel
585   // retlabel:
586   //   ret void
587   // }
588   if (I->use_empty()) {
589     CalleeAttrs.removeAttribute(Attribute::SExt);
590     CalleeAttrs.removeAttribute(Attribute::ZExt);
591   }
592 
593   // If they're still different, there's some facet we don't understand
594   // (currently only "inreg", but in future who knows). It may be OK but the
595   // only safe option is to reject the tail call.
596   return CallerAttrs == CalleeAttrs;
597 }
598 
599 /// Check whether B is a bitcast of a pointer type to another pointer type,
600 /// which is equal to A.
601 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
602   assert(A && B && "Expected non-null inputs!");
603 
604   auto *BitCastIn = dyn_cast<BitCastInst>(B);
605 
606   if (!BitCastIn)
607     return false;
608 
609   if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
610     return false;
611 
612   return A == BitCastIn->getOperand(0);
613 }
614 
615 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
616                                            const Instruction *I,
617                                            const ReturnInst *Ret,
618                                            const TargetLoweringBase &TLI) {
619   // If the block ends with a void return or unreachable, it doesn't matter
620   // what the call's return type is.
621   if (!Ret || Ret->getNumOperands() == 0) return true;
622 
623   // If the return value is undef, it doesn't matter what the call's
624   // return type is.
625   if (isa<UndefValue>(Ret->getOperand(0))) return true;
626 
627   // Make sure the attributes attached to each return are compatible.
628   bool AllowDifferingSizes;
629   if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
630     return false;
631 
632   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
633   // Intrinsic like llvm.memcpy has no return value, but the expanded
634   // libcall may or may not have return value. On most platforms, it
635   // will be expanded as memcpy in libc, which returns the first
636   // argument. On other platforms like arm-none-eabi, memcpy may be
637   // expanded as library call without return value, like __aeabi_memcpy.
638   const CallInst *Call = cast<CallInst>(I);
639   if (Function *F = Call->getCalledFunction()) {
640     Intrinsic::ID IID = F->getIntrinsicID();
641     if (((IID == Intrinsic::memcpy &&
642           TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
643          (IID == Intrinsic::memmove &&
644           TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
645          (IID == Intrinsic::memset &&
646           TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
647         (RetVal == Call->getArgOperand(0) ||
648          isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
649       return true;
650   }
651 
652   SmallVector<unsigned, 4> RetPath, CallPath;
653   SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
654 
655   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
656   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
657 
658   // Nothing's actually returned, it doesn't matter what the callee put there
659   // it's a valid tail call.
660   if (RetEmpty)
661     return true;
662 
663   // Iterate pairwise through each of the value types making up the tail call
664   // and the corresponding return. For each one we want to know whether it's
665   // essentially going directly from the tail call to the ret, via operations
666   // that end up not generating any code.
667   //
668   // We allow a certain amount of covariance here. For example it's permitted
669   // for the tail call to define more bits than the ret actually cares about
670   // (e.g. via a truncate).
671   do {
672     if (CallEmpty) {
673       // We've exhausted the values produced by the tail call instruction, the
674       // rest are essentially undef. The type doesn't really matter, but we need
675       // *something*.
676       Type *SlotType =
677           ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
678       CallVal = UndefValue::get(SlotType);
679     }
680 
681     // The manipulations performed when we're looking through an insertvalue or
682     // an extractvalue would happen at the front of the RetPath list, so since
683     // we have to copy it anyway it's more efficient to create a reversed copy.
684     SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
685     SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
686 
687     // Finally, we can check whether the value produced by the tail call at this
688     // index is compatible with the value we return.
689     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
690                               AllowDifferingSizes, TLI,
691                               F->getParent()->getDataLayout()))
692       return false;
693 
694     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
695   } while(nextRealType(RetSubTypes, RetPath));
696 
697   return true;
698 }
699 
700 static void collectEHScopeMembers(
701     DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
702     const MachineBasicBlock *MBB) {
703   SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
704   while (!Worklist.empty()) {
705     const MachineBasicBlock *Visiting = Worklist.pop_back_val();
706     // Don't follow blocks which start new scopes.
707     if (Visiting->isEHPad() && Visiting != MBB)
708       continue;
709 
710     // Add this MBB to our scope.
711     auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
712 
713     // Don't revisit blocks.
714     if (!P.second) {
715       assert(P.first->second == EHScope && "MBB is part of two scopes!");
716       continue;
717     }
718 
719     // Returns are boundaries where scope transfer can occur, don't follow
720     // successors.
721     if (Visiting->isEHScopeReturnBlock())
722       continue;
723 
724     for (const MachineBasicBlock *Succ : Visiting->successors())
725       Worklist.push_back(Succ);
726   }
727 }
728 
729 DenseMap<const MachineBasicBlock *, int>
730 llvm::getEHScopeMembership(const MachineFunction &MF) {
731   DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
732 
733   // We don't have anything to do if there aren't any EH pads.
734   if (!MF.hasEHScopes())
735     return EHScopeMembership;
736 
737   int EntryBBNumber = MF.front().getNumber();
738   bool IsSEH = isAsynchronousEHPersonality(
739       classifyEHPersonality(MF.getFunction().getPersonalityFn()));
740 
741   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
742   SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
743   SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
744   SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
745   SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
746   for (const MachineBasicBlock &MBB : MF) {
747     if (MBB.isEHScopeEntry()) {
748       EHScopeBlocks.push_back(&MBB);
749     } else if (IsSEH && MBB.isEHPad()) {
750       SEHCatchPads.push_back(&MBB);
751     } else if (MBB.pred_empty()) {
752       UnreachableBlocks.push_back(&MBB);
753     }
754 
755     MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
756 
757     // CatchPads are not scopes for SEH so do not consider CatchRet to
758     // transfer control to another scope.
759     if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
760       continue;
761 
762     // FIXME: SEH CatchPads are not necessarily in the parent function:
763     // they could be inside a finally block.
764     const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
765     const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
766     CatchRetSuccessors.push_back(
767         {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
768   }
769 
770   // We don't have anything to do if there aren't any EH pads.
771   if (EHScopeBlocks.empty())
772     return EHScopeMembership;
773 
774   // Identify all the basic blocks reachable from the function entry.
775   collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
776   // All blocks not part of a scope are in the parent function.
777   for (const MachineBasicBlock *MBB : UnreachableBlocks)
778     collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
779   // Next, identify all the blocks inside the scopes.
780   for (const MachineBasicBlock *MBB : EHScopeBlocks)
781     collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
782   // SEH CatchPads aren't really scopes, handle them separately.
783   for (const MachineBasicBlock *MBB : SEHCatchPads)
784     collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
785   // Finally, identify all the targets of a catchret.
786   for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
787        CatchRetSuccessors)
788     collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
789                           CatchRetPair.first);
790   return EHScopeMembership;
791 }
792