152ef4019SMatt Arsenault //===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
252ef4019SMatt Arsenault //
352ef4019SMatt Arsenault //                     The LLVM Compiler Infrastructure
452ef4019SMatt Arsenault //
552ef4019SMatt Arsenault // This file is distributed under the University of Illinois Open Source
652ef4019SMatt Arsenault // License. See LICENSE.TXT for details.
752ef4019SMatt Arsenault //
852ef4019SMatt Arsenault //===----------------------------------------------------------------------===//
952ef4019SMatt Arsenault 
1045bb48eaSTom Stellard #include "AMDGPUMachineFunction.h"
1152ef4019SMatt Arsenault #include "AMDGPUSubtarget.h"
121c538423SStanislav Mekhanoshin #include "AMDGPUPerfHintAnalysis.h"
131c538423SStanislav Mekhanoshin #include "llvm/CodeGen/MachineModuleInfo.h"
14e935f05aSMatt Arsenault 
1545bb48eaSTom Stellard using namespace llvm;
1645bb48eaSTom Stellard 
1745bb48eaSTom Stellard AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
1845bb48eaSTom Stellard   MachineFunctionInfo(),
1952ef4019SMatt Arsenault   LocalMemoryObjects(),
2075e7192bSMatt Arsenault   ExplicitKernArgSize(0),
21e935f05aSMatt Arsenault   MaxKernArgAlign(0),
2245bb48eaSTom Stellard   LDSSize(0),
23f1caa283SMatthias Braun   IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
241c538423SStanislav Mekhanoshin   NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
251c538423SStanislav Mekhanoshin   MemoryBound(false),
261c538423SStanislav Mekhanoshin   WaveLimiter(false) {
27*4bec7d42SMatt Arsenault   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
28*4bec7d42SMatt Arsenault 
2952ef4019SMatt Arsenault   // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
3052ef4019SMatt Arsenault   // except reserved size is not correctly aligned.
31*4bec7d42SMatt Arsenault   const Function &F = MF.getFunction();
321c538423SStanislav Mekhanoshin 
331c538423SStanislav Mekhanoshin   if (auto *Resolver = MF.getMMI().getResolver()) {
341c538423SStanislav Mekhanoshin     if (AMDGPUPerfHintAnalysis *PHA = static_cast<AMDGPUPerfHintAnalysis*>(
351c538423SStanislav Mekhanoshin           Resolver->getAnalysisIfAvailable(&AMDGPUPerfHintAnalysisID, true))) {
36*4bec7d42SMatt Arsenault       MemoryBound = PHA->isMemoryBound(&F);
37*4bec7d42SMatt Arsenault       WaveLimiter = PHA->needsWaveLimiter(&F);
381c538423SStanislav Mekhanoshin     }
391c538423SStanislav Mekhanoshin   }
40*4bec7d42SMatt Arsenault 
41*4bec7d42SMatt Arsenault   CallingConv::ID CC = F.getCallingConv();
42*4bec7d42SMatt Arsenault   if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
43*4bec7d42SMatt Arsenault     ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
44beb24f5bSNikolay Haustov }
45beb24f5bSNikolay Haustov 
4652ef4019SMatt Arsenault unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
4752ef4019SMatt Arsenault                                                   const GlobalValue &GV) {
4852ef4019SMatt Arsenault   auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
4952ef4019SMatt Arsenault   if (!Entry.second)
5052ef4019SMatt Arsenault     return Entry.first->second;
5152ef4019SMatt Arsenault 
5252ef4019SMatt Arsenault   unsigned Align = GV.getAlignment();
5352ef4019SMatt Arsenault   if (Align == 0)
5452ef4019SMatt Arsenault     Align = DL.getABITypeAlignment(GV.getValueType());
5552ef4019SMatt Arsenault 
5652ef4019SMatt Arsenault   /// TODO: We should sort these to minimize wasted space due to alignment
5752ef4019SMatt Arsenault   /// padding. Currently the padding is decided by the first encountered use
5852ef4019SMatt Arsenault   /// during lowering.
5952ef4019SMatt Arsenault   unsigned Offset = LDSSize = alignTo(LDSSize, Align);
6052ef4019SMatt Arsenault 
6152ef4019SMatt Arsenault   Entry.first->second = Offset;
6252ef4019SMatt Arsenault   LDSSize += DL.getTypeAllocSize(GV.getValueType());
6352ef4019SMatt Arsenault 
6452ef4019SMatt Arsenault   return Offset;
65beb24f5bSNikolay Haustov }
66