12cab237bSDimitry Andric //===- AMDGPUInline.cpp - Code to perform simple function inlining --------===//
22cab237bSDimitry Andric //
32cab237bSDimitry Andric //                     The LLVM Compiler Infrastructure
42cab237bSDimitry Andric //
52cab237bSDimitry Andric // This file is distributed under the University of Illinois Open Source
62cab237bSDimitry Andric // License. See LICENSE.TXT for details.
72cab237bSDimitry Andric //
82cab237bSDimitry Andric //===----------------------------------------------------------------------===//
92cab237bSDimitry Andric //
102cab237bSDimitry Andric /// \file
114ba319b5SDimitry Andric /// This is AMDGPU specific replacement of the standard inliner.
122cab237bSDimitry Andric /// The main purpose is to account for the fact that calls not only expensive
132cab237bSDimitry Andric /// on the AMDGPU, but much more expensive if a private memory pointer is
142cab237bSDimitry Andric /// passed to a function as an argument. In this situation, we are unable to
152cab237bSDimitry Andric /// eliminate private memory in the caller unless inlined and end up with slow
162cab237bSDimitry Andric /// and expensive scratch access. Thus, we boost the inline threshold for such
172cab237bSDimitry Andric /// functions here.
182cab237bSDimitry Andric ///
192cab237bSDimitry Andric //===----------------------------------------------------------------------===//
202cab237bSDimitry Andric 
212cab237bSDimitry Andric 
222cab237bSDimitry Andric #include "AMDGPU.h"
232cab237bSDimitry Andric #include "llvm/Transforms/IPO.h"
242cab237bSDimitry Andric #include "llvm/Analysis/AssumptionCache.h"
252cab237bSDimitry Andric #include "llvm/Analysis/CallGraph.h"
262cab237bSDimitry Andric #include "llvm/Analysis/InlineCost.h"
272cab237bSDimitry Andric #include "llvm/Analysis/ValueTracking.h"
282cab237bSDimitry Andric #include "llvm/Analysis/TargetTransformInfo.h"
292cab237bSDimitry Andric #include "llvm/IR/CallSite.h"
302cab237bSDimitry Andric #include "llvm/IR/DataLayout.h"
312cab237bSDimitry Andric #include "llvm/IR/Instructions.h"
322cab237bSDimitry Andric #include "llvm/IR/Module.h"
332cab237bSDimitry Andric #include "llvm/IR/Type.h"
342cab237bSDimitry Andric #include "llvm/Support/CommandLine.h"
352cab237bSDimitry Andric #include "llvm/Support/Debug.h"
362cab237bSDimitry Andric #include "llvm/Transforms/IPO/Inliner.h"
372cab237bSDimitry Andric 
382cab237bSDimitry Andric using namespace llvm;
392cab237bSDimitry Andric 
402cab237bSDimitry Andric #define DEBUG_TYPE "inline"
412cab237bSDimitry Andric 
422cab237bSDimitry Andric static cl::opt<int>
432cab237bSDimitry Andric ArgAllocaCost("amdgpu-inline-arg-alloca-cost", cl::Hidden, cl::init(2200),
442cab237bSDimitry Andric               cl::desc("Cost of alloca argument"));
452cab237bSDimitry Andric 
462cab237bSDimitry Andric // If the amount of scratch memory to eliminate exceeds our ability to allocate
47*b5893f02SDimitry Andric // it into registers we gain nothing by aggressively inlining functions for that
482cab237bSDimitry Andric // heuristic.
492cab237bSDimitry Andric static cl::opt<unsigned>
502cab237bSDimitry Andric ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden, cl::init(256),
512cab237bSDimitry Andric                 cl::desc("Maximum alloca size to use for inline cost"));
522cab237bSDimitry Andric 
532cab237bSDimitry Andric namespace {
542cab237bSDimitry Andric 
552cab237bSDimitry Andric class AMDGPUInliner : public LegacyInlinerBase {
562cab237bSDimitry Andric 
572cab237bSDimitry Andric public:
AMDGPUInliner()582cab237bSDimitry Andric   AMDGPUInliner() : LegacyInlinerBase(ID) {
592cab237bSDimitry Andric     initializeAMDGPUInlinerPass(*PassRegistry::getPassRegistry());
602cab237bSDimitry Andric     Params = getInlineParams();
612cab237bSDimitry Andric   }
622cab237bSDimitry Andric 
632cab237bSDimitry Andric   static char ID; // Pass identification, replacement for typeid
642cab237bSDimitry Andric 
652cab237bSDimitry Andric   unsigned getInlineThreshold(CallSite CS) const;
662cab237bSDimitry Andric 
672cab237bSDimitry Andric   InlineCost getInlineCost(CallSite CS) override;
682cab237bSDimitry Andric 
692cab237bSDimitry Andric   bool runOnSCC(CallGraphSCC &SCC) override;
702cab237bSDimitry Andric 
712cab237bSDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override;
722cab237bSDimitry Andric 
732cab237bSDimitry Andric private:
742cab237bSDimitry Andric   TargetTransformInfoWrapperPass *TTIWP;
752cab237bSDimitry Andric 
762cab237bSDimitry Andric   InlineParams Params;
772cab237bSDimitry Andric };
782cab237bSDimitry Andric 
792cab237bSDimitry Andric } // end anonymous namespace
802cab237bSDimitry Andric 
812cab237bSDimitry Andric char AMDGPUInliner::ID = 0;
822cab237bSDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPUInliner, "amdgpu-inline",
832cab237bSDimitry Andric                 "AMDGPU Function Integration/Inlining", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)842cab237bSDimitry Andric INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
852cab237bSDimitry Andric INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
862cab237bSDimitry Andric INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
872cab237bSDimitry Andric INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
882cab237bSDimitry Andric INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
892cab237bSDimitry Andric INITIALIZE_PASS_END(AMDGPUInliner, "amdgpu-inline",
902cab237bSDimitry Andric                 "AMDGPU Function Integration/Inlining", false, false)
912cab237bSDimitry Andric 
922cab237bSDimitry Andric Pass *llvm::createAMDGPUFunctionInliningPass() { return new AMDGPUInliner(); }
932cab237bSDimitry Andric 
runOnSCC(CallGraphSCC & SCC)942cab237bSDimitry Andric bool AMDGPUInliner::runOnSCC(CallGraphSCC &SCC) {
952cab237bSDimitry Andric   TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
962cab237bSDimitry Andric   return LegacyInlinerBase::runOnSCC(SCC);
972cab237bSDimitry Andric }
982cab237bSDimitry Andric 
getAnalysisUsage(AnalysisUsage & AU) const992cab237bSDimitry Andric void AMDGPUInliner::getAnalysisUsage(AnalysisUsage &AU) const {
1002cab237bSDimitry Andric   AU.addRequired<TargetTransformInfoWrapperPass>();
1012cab237bSDimitry Andric   LegacyInlinerBase::getAnalysisUsage(AU);
1022cab237bSDimitry Andric }
1032cab237bSDimitry Andric 
getInlineThreshold(CallSite CS) const1042cab237bSDimitry Andric unsigned AMDGPUInliner::getInlineThreshold(CallSite CS) const {
1052cab237bSDimitry Andric   int Thres = Params.DefaultThreshold;
1062cab237bSDimitry Andric 
1072cab237bSDimitry Andric   Function *Caller = CS.getCaller();
1082cab237bSDimitry Andric   // Listen to the inlinehint attribute when it would increase the threshold
1092cab237bSDimitry Andric   // and the caller does not need to minimize its size.
1102cab237bSDimitry Andric   Function *Callee = CS.getCalledFunction();
1112cab237bSDimitry Andric   bool InlineHint = Callee && !Callee->isDeclaration() &&
1122cab237bSDimitry Andric     Callee->hasFnAttribute(Attribute::InlineHint);
1132cab237bSDimitry Andric   if (InlineHint && Params.HintThreshold && Params.HintThreshold > Thres
1142cab237bSDimitry Andric       && !Caller->hasFnAttribute(Attribute::MinSize))
1152cab237bSDimitry Andric     Thres = Params.HintThreshold.getValue();
1162cab237bSDimitry Andric 
1172cab237bSDimitry Andric   const DataLayout &DL = Caller->getParent()->getDataLayout();
1182cab237bSDimitry Andric   if (!Callee)
1192cab237bSDimitry Andric     return (unsigned)Thres;
1202cab237bSDimitry Andric 
1212cab237bSDimitry Andric   // If we have a pointer to private array passed into a function
1222cab237bSDimitry Andric   // it will not be optimized out, leaving scratch usage.
1232cab237bSDimitry Andric   // Increase the inline threshold to allow inliniting in this case.
1242cab237bSDimitry Andric   uint64_t AllocaSize = 0;
1252cab237bSDimitry Andric   SmallPtrSet<const AllocaInst *, 8> AIVisited;
1262cab237bSDimitry Andric   for (Value *PtrArg : CS.args()) {
1272cab237bSDimitry Andric     Type *Ty = PtrArg->getType();
1282cab237bSDimitry Andric     if (!Ty->isPointerTy() ||
129*b5893f02SDimitry Andric         Ty->getPointerAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
1302cab237bSDimitry Andric       continue;
1312cab237bSDimitry Andric     PtrArg = GetUnderlyingObject(PtrArg, DL);
1322cab237bSDimitry Andric     if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
1332cab237bSDimitry Andric       if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1342cab237bSDimitry Andric         continue;
1352cab237bSDimitry Andric       AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1362cab237bSDimitry Andric       // If the amount of stack memory is excessive we will not be able
1372cab237bSDimitry Andric       // to get rid of the scratch anyway, bail out.
1382cab237bSDimitry Andric       if (AllocaSize > ArgAllocaCutoff) {
1392cab237bSDimitry Andric         AllocaSize = 0;
1402cab237bSDimitry Andric         break;
1412cab237bSDimitry Andric       }
1422cab237bSDimitry Andric     }
1432cab237bSDimitry Andric   }
1442cab237bSDimitry Andric   if (AllocaSize)
1452cab237bSDimitry Andric     Thres += ArgAllocaCost;
1462cab237bSDimitry Andric 
1472cab237bSDimitry Andric   return (unsigned)Thres;
1482cab237bSDimitry Andric }
1492cab237bSDimitry Andric 
1502cab237bSDimitry Andric // Check if call is just a wrapper around another call.
1512cab237bSDimitry Andric // In this case we only have call and ret instructions.
isWrapperOnlyCall(CallSite CS)1522cab237bSDimitry Andric static bool isWrapperOnlyCall(CallSite CS) {
1532cab237bSDimitry Andric   Function *Callee = CS.getCalledFunction();
1542cab237bSDimitry Andric   if (!Callee || Callee->size() != 1)
1552cab237bSDimitry Andric     return false;
1562cab237bSDimitry Andric   const BasicBlock &BB = Callee->getEntryBlock();
1572cab237bSDimitry Andric   if (const Instruction *I = BB.getFirstNonPHI()) {
1582cab237bSDimitry Andric     if (!isa<CallInst>(I)) {
1592cab237bSDimitry Andric       return false;
1602cab237bSDimitry Andric     }
1612cab237bSDimitry Andric     if (isa<ReturnInst>(*std::next(I->getIterator()))) {
1624ba319b5SDimitry Andric       LLVM_DEBUG(dbgs() << "    Wrapper only call detected: "
1632cab237bSDimitry Andric                         << Callee->getName() << '\n');
1642cab237bSDimitry Andric       return true;
1652cab237bSDimitry Andric     }
1662cab237bSDimitry Andric   }
1672cab237bSDimitry Andric   return false;
1682cab237bSDimitry Andric }
1692cab237bSDimitry Andric 
getInlineCost(CallSite CS)1702cab237bSDimitry Andric InlineCost AMDGPUInliner::getInlineCost(CallSite CS) {
1712cab237bSDimitry Andric   Function *Callee = CS.getCalledFunction();
1722cab237bSDimitry Andric   Function *Caller = CS.getCaller();
1732cab237bSDimitry Andric   TargetTransformInfo &TTI = TTIWP->getTTI(*Callee);
1742cab237bSDimitry Andric 
175*b5893f02SDimitry Andric   if (!Callee || Callee->isDeclaration())
176*b5893f02SDimitry Andric     return llvm::InlineCost::getNever("undefined callee");
177*b5893f02SDimitry Andric 
178*b5893f02SDimitry Andric   if (CS.isNoInline())
179*b5893f02SDimitry Andric     return llvm::InlineCost::getNever("noinline");
180*b5893f02SDimitry Andric 
181*b5893f02SDimitry Andric   if (!TTI.areInlineCompatible(Caller, Callee))
182*b5893f02SDimitry Andric     return llvm::InlineCost::getNever("incompatible");
1832cab237bSDimitry Andric 
1842cab237bSDimitry Andric   if (CS.hasFnAttr(Attribute::AlwaysInline)) {
1852cab237bSDimitry Andric     if (isInlineViable(*Callee))
186*b5893f02SDimitry Andric       return llvm::InlineCost::getAlways("alwaysinline viable");
187*b5893f02SDimitry Andric     return llvm::InlineCost::getNever("alwaysinline unviable");
1882cab237bSDimitry Andric   }
1892cab237bSDimitry Andric 
1902cab237bSDimitry Andric   if (isWrapperOnlyCall(CS))
191*b5893f02SDimitry Andric     return llvm::InlineCost::getAlways("wrapper-only call");
1922cab237bSDimitry Andric 
1932cab237bSDimitry Andric   InlineParams LocalParams = Params;
1942cab237bSDimitry Andric   LocalParams.DefaultThreshold = (int)getInlineThreshold(CS);
1952cab237bSDimitry Andric   bool RemarksEnabled = false;
1962cab237bSDimitry Andric   const auto &BBs = Caller->getBasicBlockList();
1972cab237bSDimitry Andric   if (!BBs.empty()) {
1982cab237bSDimitry Andric     auto DI = OptimizationRemark(DEBUG_TYPE, "", DebugLoc(), &BBs.front());
1992cab237bSDimitry Andric     if (DI.isEnabled())
2002cab237bSDimitry Andric       RemarksEnabled = true;
2012cab237bSDimitry Andric   }
2022cab237bSDimitry Andric 
2032cab237bSDimitry Andric   OptimizationRemarkEmitter ORE(Caller);
2042cab237bSDimitry Andric   std::function<AssumptionCache &(Function &)> GetAssumptionCache =
2052cab237bSDimitry Andric       [this](Function &F) -> AssumptionCache & {
2062cab237bSDimitry Andric     return ACT->getAssumptionCache(F);
2072cab237bSDimitry Andric   };
2082cab237bSDimitry Andric 
2092cab237bSDimitry Andric   return llvm::getInlineCost(CS, Callee, LocalParams, TTI, GetAssumptionCache,
2102cab237bSDimitry Andric                              None, PSI, RemarksEnabled ? &ORE : nullptr);
2112cab237bSDimitry Andric }
212