150f02cb2SNick Lewycky //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2450aa64fSDan Gohman //
3450aa64fSDan Gohman //                     The LLVM Compiler Infrastructure
4450aa64fSDan Gohman //
5450aa64fSDan Gohman // This file is distributed under the University of Illinois Open Source
6450aa64fSDan Gohman // License. See LICENSE.TXT for details.
7450aa64fSDan Gohman //
8450aa64fSDan Gohman //===----------------------------------------------------------------------===//
9450aa64fSDan Gohman //
10450aa64fSDan Gohman // This file defines several CodeGen-specific LLVM IR analysis utilties.
11450aa64fSDan Gohman //
12450aa64fSDan Gohman //===----------------------------------------------------------------------===//
13450aa64fSDan Gohman 
14450aa64fSDan Gohman #include "llvm/CodeGen/Analysis.h"
1575d7d5e9SDan Gohman #include "llvm/Analysis/ValueTracking.h"
16450aa64fSDan Gohman #include "llvm/DerivedTypes.h"
17450aa64fSDan Gohman #include "llvm/Function.h"
18450aa64fSDan Gohman #include "llvm/Instructions.h"
19450aa64fSDan Gohman #include "llvm/IntrinsicInst.h"
20450aa64fSDan Gohman #include "llvm/LLVMContext.h"
21450aa64fSDan Gohman #include "llvm/Module.h"
22450aa64fSDan Gohman #include "llvm/CodeGen/MachineFunction.h"
23d4b0873cSEvan Cheng #include "llvm/CodeGen/SelectionDAG.h"
24450aa64fSDan Gohman #include "llvm/Target/TargetData.h"
25450aa64fSDan Gohman #include "llvm/Target/TargetLowering.h"
26450aa64fSDan Gohman #include "llvm/Target/TargetOptions.h"
27450aa64fSDan Gohman #include "llvm/Support/ErrorHandling.h"
28450aa64fSDan Gohman #include "llvm/Support/MathExtras.h"
29450aa64fSDan Gohman using namespace llvm;
30450aa64fSDan Gohman 
31450aa64fSDan Gohman /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
32450aa64fSDan Gohman /// of insertvalue or extractvalue indices that identify a member, return
33450aa64fSDan Gohman /// the linearized index of the start of the member.
34450aa64fSDan Gohman ///
35229907cdSChris Lattner unsigned llvm::ComputeLinearIndex(Type *Ty,
36450aa64fSDan Gohman                                   const unsigned *Indices,
37450aa64fSDan Gohman                                   const unsigned *IndicesEnd,
38450aa64fSDan Gohman                                   unsigned CurIndex) {
39450aa64fSDan Gohman   // Base case: We're done.
40450aa64fSDan Gohman   if (Indices && Indices == IndicesEnd)
41450aa64fSDan Gohman     return CurIndex;
42450aa64fSDan Gohman 
43450aa64fSDan Gohman   // Given a struct type, recursively traverse the elements.
44229907cdSChris Lattner   if (StructType *STy = dyn_cast<StructType>(Ty)) {
45450aa64fSDan Gohman     for (StructType::element_iterator EB = STy->element_begin(),
46450aa64fSDan Gohman                                       EI = EB,
47450aa64fSDan Gohman                                       EE = STy->element_end();
48450aa64fSDan Gohman         EI != EE; ++EI) {
49450aa64fSDan Gohman       if (Indices && *Indices == unsigned(EI - EB))
50aadc5596SDan Gohman         return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
51aadc5596SDan Gohman       CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
52450aa64fSDan Gohman     }
53450aa64fSDan Gohman     return CurIndex;
54450aa64fSDan Gohman   }
55450aa64fSDan Gohman   // Given an array type, recursively traverse the elements.
56229907cdSChris Lattner   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
57229907cdSChris Lattner     Type *EltTy = ATy->getElementType();
58450aa64fSDan Gohman     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
59450aa64fSDan Gohman       if (Indices && *Indices == i)
60aadc5596SDan Gohman         return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
61aadc5596SDan Gohman       CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
62450aa64fSDan Gohman     }
63450aa64fSDan Gohman     return CurIndex;
64450aa64fSDan Gohman   }
65450aa64fSDan Gohman   // We haven't found the type we're looking for, so keep searching.
66450aa64fSDan Gohman   return CurIndex + 1;
67450aa64fSDan Gohman }
68450aa64fSDan Gohman 
69450aa64fSDan Gohman /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
70450aa64fSDan Gohman /// EVTs that represent all the individual underlying
71450aa64fSDan Gohman /// non-aggregate types that comprise it.
72450aa64fSDan Gohman ///
73450aa64fSDan Gohman /// If Offsets is non-null, it points to a vector to be filled in
74450aa64fSDan Gohman /// with the in-memory offsets of each of the individual values.
75450aa64fSDan Gohman ///
76229907cdSChris Lattner void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
77450aa64fSDan Gohman                            SmallVectorImpl<EVT> &ValueVTs,
78450aa64fSDan Gohman                            SmallVectorImpl<uint64_t> *Offsets,
79450aa64fSDan Gohman                            uint64_t StartingOffset) {
80450aa64fSDan Gohman   // Given a struct type, recursively traverse the elements.
81229907cdSChris Lattner   if (StructType *STy = dyn_cast<StructType>(Ty)) {
82450aa64fSDan Gohman     const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
83450aa64fSDan Gohman     for (StructType::element_iterator EB = STy->element_begin(),
84450aa64fSDan Gohman                                       EI = EB,
85450aa64fSDan Gohman                                       EE = STy->element_end();
86450aa64fSDan Gohman          EI != EE; ++EI)
87450aa64fSDan Gohman       ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
88450aa64fSDan Gohman                       StartingOffset + SL->getElementOffset(EI - EB));
89450aa64fSDan Gohman     return;
90450aa64fSDan Gohman   }
91450aa64fSDan Gohman   // Given an array type, recursively traverse the elements.
92229907cdSChris Lattner   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
93229907cdSChris Lattner     Type *EltTy = ATy->getElementType();
94450aa64fSDan Gohman     uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
95450aa64fSDan Gohman     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
96450aa64fSDan Gohman       ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
97450aa64fSDan Gohman                       StartingOffset + i * EltSize);
98450aa64fSDan Gohman     return;
99450aa64fSDan Gohman   }
100450aa64fSDan Gohman   // Interpret void as zero return values.
101450aa64fSDan Gohman   if (Ty->isVoidTy())
102450aa64fSDan Gohman     return;
103450aa64fSDan Gohman   // Base case: we can get an EVT for this LLVM IR type.
104450aa64fSDan Gohman   ValueVTs.push_back(TLI.getValueType(Ty));
105450aa64fSDan Gohman   if (Offsets)
106450aa64fSDan Gohman     Offsets->push_back(StartingOffset);
107450aa64fSDan Gohman }
108450aa64fSDan Gohman 
109450aa64fSDan Gohman /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
110450aa64fSDan Gohman GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
111450aa64fSDan Gohman   V = V->stripPointerCasts();
112450aa64fSDan Gohman   GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
113450aa64fSDan Gohman 
114fa60b0eeSBill Wendling   if (GV && GV->getName() == "llvm.eh.catch.all.value") {
115450aa64fSDan Gohman     assert(GV->hasInitializer() &&
116450aa64fSDan Gohman            "The EH catch-all value must have an initializer");
117450aa64fSDan Gohman     Value *Init = GV->getInitializer();
118450aa64fSDan Gohman     GV = dyn_cast<GlobalVariable>(Init);
119450aa64fSDan Gohman     if (!GV) V = cast<ConstantPointerNull>(Init);
120450aa64fSDan Gohman   }
121450aa64fSDan Gohman 
122450aa64fSDan Gohman   assert((GV || isa<ConstantPointerNull>(V)) &&
123450aa64fSDan Gohman          "TypeInfo must be a global variable or NULL");
124450aa64fSDan Gohman   return GV;
125450aa64fSDan Gohman }
126450aa64fSDan Gohman 
127450aa64fSDan Gohman /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
128450aa64fSDan Gohman /// processed uses a memory 'm' constraint.
129450aa64fSDan Gohman bool
130e8360b71SJohn Thompson llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
131450aa64fSDan Gohman                                 const TargetLowering &TLI) {
132450aa64fSDan Gohman   for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
133450aa64fSDan Gohman     InlineAsm::ConstraintInfo &CI = CInfos[i];
134450aa64fSDan Gohman     for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
135450aa64fSDan Gohman       TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
136450aa64fSDan Gohman       if (CType == TargetLowering::C_Memory)
137450aa64fSDan Gohman         return true;
138450aa64fSDan Gohman     }
139450aa64fSDan Gohman 
140450aa64fSDan Gohman     // Indirect operand accesses access memory.
141450aa64fSDan Gohman     if (CI.isIndirect)
142450aa64fSDan Gohman       return true;
143450aa64fSDan Gohman   }
144450aa64fSDan Gohman 
145450aa64fSDan Gohman   return false;
146450aa64fSDan Gohman }
147450aa64fSDan Gohman 
148450aa64fSDan Gohman /// getFCmpCondCode - Return the ISD condition code corresponding to
149450aa64fSDan Gohman /// the given LLVM IR floating-point condition code.  This includes
150450aa64fSDan Gohman /// consideration of global floating-point math flags.
151450aa64fSDan Gohman ///
152450aa64fSDan Gohman ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
153450aa64fSDan Gohman   switch (Pred) {
15450f02cb2SNick Lewycky   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
15550f02cb2SNick Lewycky   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
15650f02cb2SNick Lewycky   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
15750f02cb2SNick Lewycky   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
15850f02cb2SNick Lewycky   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
15950f02cb2SNick Lewycky   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
16050f02cb2SNick Lewycky   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
16150f02cb2SNick Lewycky   case FCmpInst::FCMP_ORD:   return ISD::SETO;
16250f02cb2SNick Lewycky   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
16350f02cb2SNick Lewycky   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
16450f02cb2SNick Lewycky   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
16550f02cb2SNick Lewycky   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
16650f02cb2SNick Lewycky   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
16750f02cb2SNick Lewycky   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
16850f02cb2SNick Lewycky   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
16950f02cb2SNick Lewycky   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
17046a9f016SDavid Blaikie   default: llvm_unreachable("Invalid FCmp predicate opcode!");
171450aa64fSDan Gohman   }
17250f02cb2SNick Lewycky }
17350f02cb2SNick Lewycky 
17450f02cb2SNick Lewycky ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
17550f02cb2SNick Lewycky   switch (CC) {
17650f02cb2SNick Lewycky     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
17750f02cb2SNick Lewycky     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
17850f02cb2SNick Lewycky     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
17950f02cb2SNick Lewycky     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
18050f02cb2SNick Lewycky     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
18150f02cb2SNick Lewycky     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
18246a9f016SDavid Blaikie     default: return CC;
18350f02cb2SNick Lewycky   }
184450aa64fSDan Gohman }
185450aa64fSDan Gohman 
186450aa64fSDan Gohman /// getICmpCondCode - Return the ISD condition code corresponding to
187450aa64fSDan Gohman /// the given LLVM IR integer condition code.
188450aa64fSDan Gohman ///
189450aa64fSDan Gohman ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
190450aa64fSDan Gohman   switch (Pred) {
191450aa64fSDan Gohman   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
192450aa64fSDan Gohman   case ICmpInst::ICMP_NE:  return ISD::SETNE;
193450aa64fSDan Gohman   case ICmpInst::ICMP_SLE: return ISD::SETLE;
194450aa64fSDan Gohman   case ICmpInst::ICMP_ULE: return ISD::SETULE;
195450aa64fSDan Gohman   case ICmpInst::ICMP_SGE: return ISD::SETGE;
196450aa64fSDan Gohman   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
197450aa64fSDan Gohman   case ICmpInst::ICMP_SLT: return ISD::SETLT;
198450aa64fSDan Gohman   case ICmpInst::ICMP_ULT: return ISD::SETULT;
199450aa64fSDan Gohman   case ICmpInst::ICMP_SGT: return ISD::SETGT;
200450aa64fSDan Gohman   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
201450aa64fSDan Gohman   default:
202450aa64fSDan Gohman     llvm_unreachable("Invalid ICmp predicate opcode!");
203450aa64fSDan Gohman   }
204450aa64fSDan Gohman }
205450aa64fSDan Gohman 
2064f3615deSChris Lattner 
2074f3615deSChris Lattner /// getNoopInput - If V is a noop (i.e., lowers to no machine code), look
2084f3615deSChris Lattner /// through it (and any transitive noop operands to it) and return its input
2094f3615deSChris Lattner /// value.  This is used to determine if a tail call can be formed.
2104f3615deSChris Lattner ///
2114f3615deSChris Lattner static const Value *getNoopInput(const Value *V, const TargetLowering &TLI) {
2124f3615deSChris Lattner   // If V is not an instruction, it can't be looked through.
213182fe3eeSChris Lattner   const Instruction *I = dyn_cast<Instruction>(V);
214182fe3eeSChris Lattner   if (I == 0 || !I->hasOneUse() || I->getNumOperands() == 0) return V;
215182fe3eeSChris Lattner 
216182fe3eeSChris Lattner   Value *Op = I->getOperand(0);
2174f3615deSChris Lattner 
2184f3615deSChris Lattner   // Look through truly no-op truncates.
219182fe3eeSChris Lattner   if (isa<TruncInst>(I) &&
220182fe3eeSChris Lattner       TLI.isTruncateFree(I->getOperand(0)->getType(), I->getType()))
221182fe3eeSChris Lattner     return getNoopInput(I->getOperand(0), TLI);
2224f3615deSChris Lattner 
2234f3615deSChris Lattner   // Look through truly no-op bitcasts.
224182fe3eeSChris Lattner   if (isa<BitCastInst>(I)) {
225182fe3eeSChris Lattner     // No type change at all.
226182fe3eeSChris Lattner     if (Op->getType() == I->getType())
227182fe3eeSChris Lattner       return getNoopInput(Op, TLI);
228182fe3eeSChris Lattner 
2294f3615deSChris Lattner     // Pointer to pointer cast.
230182fe3eeSChris Lattner     if (Op->getType()->isPointerTy() && I->getType()->isPointerTy())
231182fe3eeSChris Lattner       return getNoopInput(Op, TLI);
232182fe3eeSChris Lattner 
233182fe3eeSChris Lattner     if (isa<VectorType>(Op->getType()) && isa<VectorType>(I->getType()) &&
234182fe3eeSChris Lattner         TLI.isTypeLegal(EVT::getEVT(Op->getType())) &&
235182fe3eeSChris Lattner         TLI.isTypeLegal(EVT::getEVT(I->getType())))
2364f3615deSChris Lattner       return getNoopInput(Op, TLI);
2374f3615deSChris Lattner   }
2384f3615deSChris Lattner 
239182fe3eeSChris Lattner   // Look through inttoptr.
240182fe3eeSChris Lattner   if (isa<IntToPtrInst>(I) && !isa<VectorType>(I->getType())) {
241182fe3eeSChris Lattner     // Make sure this isn't a truncating or extending cast.  We could support
242182fe3eeSChris Lattner     // this eventually, but don't bother for now.
243182fe3eeSChris Lattner     if (TLI.getPointerTy().getSizeInBits() ==
244182fe3eeSChris Lattner           cast<IntegerType>(Op->getType())->getBitWidth())
245182fe3eeSChris Lattner       return getNoopInput(Op, TLI);
246182fe3eeSChris Lattner   }
247182fe3eeSChris Lattner 
248182fe3eeSChris Lattner   // Look through ptrtoint.
249182fe3eeSChris Lattner   if (isa<PtrToIntInst>(I) && !isa<VectorType>(I->getType())) {
250182fe3eeSChris Lattner     // Make sure this isn't a truncating or extending cast.  We could support
251182fe3eeSChris Lattner     // this eventually, but don't bother for now.
252182fe3eeSChris Lattner     if (TLI.getPointerTy().getSizeInBits() ==
253182fe3eeSChris Lattner         cast<IntegerType>(I->getType())->getBitWidth())
254182fe3eeSChris Lattner       return getNoopInput(Op, TLI);
255182fe3eeSChris Lattner   }
256182fe3eeSChris Lattner 
257182fe3eeSChris Lattner 
2584f3615deSChris Lattner   // Otherwise it's not something we can look through.
2594f3615deSChris Lattner   return V;
2604f3615deSChris Lattner }
2614f3615deSChris Lattner 
2624f3615deSChris Lattner 
263450aa64fSDan Gohman /// Test if the given instruction is in a position to be optimized
264450aa64fSDan Gohman /// with a tail-call. This roughly means that it's in a block with
265450aa64fSDan Gohman /// a return and there's nothing that needs to be scheduled
266450aa64fSDan Gohman /// between it and the return.
267450aa64fSDan Gohman ///
268450aa64fSDan Gohman /// This function only tests target-independent requirements.
269450aa64fSDan Gohman bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
270450aa64fSDan Gohman                                 const TargetLowering &TLI) {
271450aa64fSDan Gohman   const Instruction *I = CS.getInstruction();
272450aa64fSDan Gohman   const BasicBlock *ExitBB = I->getParent();
273450aa64fSDan Gohman   const TerminatorInst *Term = ExitBB->getTerminator();
274450aa64fSDan Gohman   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
275450aa64fSDan Gohman 
276450aa64fSDan Gohman   // The block must end in a return statement or unreachable.
277450aa64fSDan Gohman   //
278450aa64fSDan Gohman   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
279450aa64fSDan Gohman   // an unreachable, for now. The way tailcall optimization is currently
280450aa64fSDan Gohman   // implemented means it will add an epilogue followed by a jump. That is
281450aa64fSDan Gohman   // not profitable. Also, if the callee is a special function (e.g.
282450aa64fSDan Gohman   // longjmp on x86), it can end up causing miscompilation that has not
283450aa64fSDan Gohman   // been fully understood.
284450aa64fSDan Gohman   if (!Ret &&
28550f02cb2SNick Lewycky       (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
2864f3615deSChris Lattner        !isa<UnreachableInst>(Term)))
2874f3615deSChris Lattner     return false;
288450aa64fSDan Gohman 
289450aa64fSDan Gohman   // If I will have a chain, make sure no other instruction that will have a
290450aa64fSDan Gohman   // chain interposes between I and the return.
291450aa64fSDan Gohman   if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
29275d7d5e9SDan Gohman       !isSafeToSpeculativelyExecute(I))
293450aa64fSDan Gohman     for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
294450aa64fSDan Gohman          --BBI) {
295450aa64fSDan Gohman       if (&*BBI == I)
296450aa64fSDan Gohman         break;
297450aa64fSDan Gohman       // Debug info intrinsics do not get in the way of tail call optimization.
298450aa64fSDan Gohman       if (isa<DbgInfoIntrinsic>(BBI))
299450aa64fSDan Gohman         continue;
300450aa64fSDan Gohman       if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
30175d7d5e9SDan Gohman           !isSafeToSpeculativelyExecute(BBI))
302450aa64fSDan Gohman         return false;
303450aa64fSDan Gohman     }
304450aa64fSDan Gohman 
305450aa64fSDan Gohman   // If the block ends with a void return or unreachable, it doesn't matter
306450aa64fSDan Gohman   // what the call's return type is.
307450aa64fSDan Gohman   if (!Ret || Ret->getNumOperands() == 0) return true;
308450aa64fSDan Gohman 
309450aa64fSDan Gohman   // If the return value is undef, it doesn't matter what the call's
310450aa64fSDan Gohman   // return type is.
311450aa64fSDan Gohman   if (isa<UndefValue>(Ret->getOperand(0))) return true;
312450aa64fSDan Gohman 
313450aa64fSDan Gohman   // Conservatively require the attributes of the call to match those of
314450aa64fSDan Gohman   // the return. Ignore noalias because it doesn't affect the call sequence.
315b1f3b498SEvan Cheng   const Function *F = ExitBB->getParent();
316a5054ad2SKostya Serebryany   Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
317450aa64fSDan Gohman   if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
318450aa64fSDan Gohman     return false;
319450aa64fSDan Gohman 
320450aa64fSDan Gohman   // It's not safe to eliminate the sign / zero extension of the return value.
321450aa64fSDan Gohman   if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
322450aa64fSDan Gohman     return false;
323450aa64fSDan Gohman 
324450aa64fSDan Gohman   // Otherwise, make sure the unmodified return value of I is the return value.
325466076b9SChris Lattner   // We handle two cases: multiple return values + scalars.
326466076b9SChris Lattner   Value *RetVal = Ret->getOperand(0);
327466076b9SChris Lattner   if (!isa<InsertValueInst>(RetVal) || !isa<StructType>(RetVal->getType()))
328466076b9SChris Lattner     // Handle scalars first.
3294f3615deSChris Lattner     return getNoopInput(Ret->getOperand(0), TLI) == I;
330466076b9SChris Lattner 
331466076b9SChris Lattner   // If this is an aggregate return, look through the insert/extract values and
332466076b9SChris Lattner   // see if each is transparent.
333466076b9SChris Lattner   for (unsigned i = 0, e =cast<StructType>(RetVal->getType())->getNumElements();
334466076b9SChris Lattner        i != e; ++i) {
335*cc84e6d2SChris Lattner     const Value *InScalar = FindInsertedValue(RetVal, i);
336*cc84e6d2SChris Lattner     if (InScalar == 0) return false;
337*cc84e6d2SChris Lattner     InScalar = getNoopInput(InScalar, TLI);
338466076b9SChris Lattner 
339466076b9SChris Lattner     // If the scalar value being inserted is an extractvalue of the right index
340466076b9SChris Lattner     // from the call, then everything is good.
341466076b9SChris Lattner     const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(InScalar);
342466076b9SChris Lattner     if (EVI == 0 || EVI->getOperand(0) != I || EVI->getNumIndices() != 1 ||
343466076b9SChris Lattner         EVI->getIndices()[0] != i)
344466076b9SChris Lattner       return false;
345466076b9SChris Lattner   }
346466076b9SChris Lattner 
347466076b9SChris Lattner   return true;
348450aa64fSDan Gohman }
349450aa64fSDan Gohman 
350d4b0873cSEvan Cheng bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
351f8bad080SEvan Cheng                                 SDValue &Chain, const TargetLowering &TLI) {
352d4b0873cSEvan Cheng   const Function *F = DAG.getMachineFunction().getFunction();
353d4b0873cSEvan Cheng 
354d4b0873cSEvan Cheng   // Conservatively require the attributes of the call to match those of
355d4b0873cSEvan Cheng   // the return. Ignore noalias because it doesn't affect the call sequence.
356a5054ad2SKostya Serebryany   Attributes CallerRetAttr = F->getAttributes().getRetAttributes();
357d4b0873cSEvan Cheng   if (CallerRetAttr & ~Attribute::NoAlias)
358d4b0873cSEvan Cheng     return false;
359d4b0873cSEvan Cheng 
360d4b0873cSEvan Cheng   // It's not safe to eliminate the sign / zero extension of the return value.
361d4b0873cSEvan Cheng   if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
362d4b0873cSEvan Cheng     return false;
363d4b0873cSEvan Cheng 
364d4b0873cSEvan Cheng   // Check if the only use is a function return node.
365f8bad080SEvan Cheng   return TLI.isUsedByReturnOnly(Node, Chain);
366d4b0873cSEvan Cheng }
367