1 //===- CodeMetrics.cpp - Code cost measurements ---------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements code cost measurement utilities. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/CodeMetrics.h" 15 #include "llvm/Function.h" 16 #include "llvm/Support/CallSite.h" 17 #include "llvm/IntrinsicInst.h" 18 #include "llvm/Target/TargetData.h" 19 20 using namespace llvm; 21 22 /// callIsSmall - If a call is likely to lower to a single target instruction, 23 /// or is otherwise deemed small return true. 24 /// TODO: Perhaps calls like memcpy, strcpy, etc? 25 bool llvm::callIsSmall(const Function *F) { 26 if (!F) return false; 27 28 if (F->hasLocalLinkage()) return false; 29 30 if (!F->hasName()) return false; 31 32 StringRef Name = F->getName(); 33 34 // These will all likely lower to a single selection DAG node. 35 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 36 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || 37 Name == "sin" || Name == "sinf" || Name == "sinl" || 38 Name == "cos" || Name == "cosf" || Name == "cosl" || 39 Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" ) 40 return true; 41 42 // These are all likely to be optimized into something smaller. 43 if (Name == "pow" || Name == "powf" || Name == "powl" || 44 Name == "exp2" || Name == "exp2l" || Name == "exp2f" || 45 Name == "floor" || Name == "floorf" || Name == "ceil" || 46 Name == "round" || Name == "ffs" || Name == "ffsl" || 47 Name == "abs" || Name == "labs" || Name == "llabs") 48 return true; 49 50 return false; 51 } 52 53 /// analyzeBasicBlock - Fill in the current structure with information gleaned 54 /// from the specified block. 55 void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, 56 const TargetData *TD) { 57 ++NumBlocks; 58 unsigned NumInstsBeforeThisBB = NumInsts; 59 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); 60 II != E; ++II) { 61 if (isa<PHINode>(II)) continue; // PHI nodes don't count. 62 63 // Special handling for calls. 64 if (isa<CallInst>(II) || isa<InvokeInst>(II)) { 65 if (const IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(II)) { 66 switch (IntrinsicI->getIntrinsicID()) { 67 default: break; 68 case Intrinsic::dbg_declare: 69 case Intrinsic::dbg_value: 70 case Intrinsic::invariant_start: 71 case Intrinsic::invariant_end: 72 case Intrinsic::lifetime_start: 73 case Intrinsic::lifetime_end: 74 case Intrinsic::objectsize: 75 case Intrinsic::ptr_annotation: 76 case Intrinsic::var_annotation: 77 // These intrinsics don't count as size. 78 continue; 79 } 80 } 81 82 ImmutableCallSite CS(cast<Instruction>(II)); 83 84 if (const Function *F = CS.getCalledFunction()) { 85 // If a function is both internal and has a single use, then it is 86 // extremely likely to get inlined in the future (it was probably 87 // exposed by an interleaved devirtualization pass). 88 if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse()) 89 ++NumInlineCandidates; 90 91 // If this call is to function itself, then the function is recursive. 92 // Inlining it into other functions is a bad idea, because this is 93 // basically just a form of loop peeling, and our metrics aren't useful 94 // for that case. 95 if (F == BB->getParent()) 96 isRecursive = true; 97 } 98 99 if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) { 100 // Each argument to a call takes on average one instruction to set up. 101 NumInsts += CS.arg_size(); 102 103 // We don't want inline asm to count as a call - that would prevent loop 104 // unrolling. The argument setup cost is still real, though. 105 if (!isa<InlineAsm>(CS.getCalledValue())) 106 ++NumCalls; 107 } 108 } 109 110 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) { 111 if (!AI->isStaticAlloca()) 112 this->usesDynamicAlloca = true; 113 } 114 115 if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy()) 116 ++NumVectorInsts; 117 118 if (const CastInst *CI = dyn_cast<CastInst>(II)) { 119 // Noop casts, including ptr <-> int, don't count. 120 if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) || 121 isa<PtrToIntInst>(CI)) 122 continue; 123 // trunc to a native type is free (assuming the target has compare and 124 // shift-right of the same width). 125 if (isa<TruncInst>(CI) && TD && 126 TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType()))) 127 continue; 128 // Result of a cmp instruction is often extended (to be used by other 129 // cmp instructions, logical or return instructions). These are usually 130 // nop on most sane targets. 131 if (isa<CmpInst>(CI->getOperand(0))) 132 continue; 133 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){ 134 // If a GEP has all constant indices, it will probably be folded with 135 // a load/store. 136 if (GEPI->hasAllConstantIndices()) 137 continue; 138 } 139 140 ++NumInsts; 141 } 142 143 if (isa<ReturnInst>(BB->getTerminator())) 144 ++NumRets; 145 146 // We never want to inline functions that contain an indirectbr. This is 147 // incorrect because all the blockaddress's (in static global initializers 148 // for example) would be referring to the original function, and this indirect 149 // jump would jump from the inlined copy of the function into the original 150 // function which is extremely undefined behavior. 151 // FIXME: This logic isn't really right; we can safely inline functions 152 // with indirectbr's as long as no other function or global references the 153 // blockaddress of a block within the current function. And as a QOI issue, 154 // if someone is using a blockaddress without an indirectbr, and that 155 // reference somehow ends up in another function or global, we probably 156 // don't want to inline this function. 157 if (isa<IndirectBrInst>(BB->getTerminator())) 158 containsIndirectBr = true; 159 160 // Remember NumInsts for this BB. 161 NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB; 162 } 163 164 void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) { 165 // If this function contains a call that "returns twice" (e.g., setjmp or 166 // _setjmp) and it isn't marked with "returns twice" itself, never inline it. 167 // This is a hack because we depend on the user marking their local variables 168 // as volatile if they are live across a setjmp call, and they probably 169 // won't do this in callers. 170 exposesReturnsTwice = F->callsFunctionThatReturnsTwice() && 171 !F->hasFnAttr(Attribute::ReturnsTwice); 172 173 // Look at the size of the callee. 174 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) 175 analyzeBasicBlock(&*BB, TD); 176 } 177