152ef4019SMatt Arsenault //===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
252ef4019SMatt Arsenault //
352ef4019SMatt Arsenault //                     The LLVM Compiler Infrastructure
452ef4019SMatt Arsenault //
552ef4019SMatt Arsenault // This file is distributed under the University of Illinois Open Source
652ef4019SMatt Arsenault // License. See LICENSE.TXT for details.
752ef4019SMatt Arsenault //
852ef4019SMatt Arsenault //===----------------------------------------------------------------------===//
952ef4019SMatt Arsenault 
1045bb48eaSTom Stellard #include "AMDGPUMachineFunction.h"
1152ef4019SMatt Arsenault #include "AMDGPUSubtarget.h"
121c538423SStanislav Mekhanoshin #include "AMDGPUPerfHintAnalysis.h"
131c538423SStanislav Mekhanoshin #include "llvm/CodeGen/MachineModuleInfo.h"
14e935f05aSMatt Arsenault 
1545bb48eaSTom Stellard using namespace llvm;
1645bb48eaSTom Stellard 
1745bb48eaSTom Stellard AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
1845bb48eaSTom Stellard   MachineFunctionInfo(),
1952ef4019SMatt Arsenault   LocalMemoryObjects(),
20*75e7192bSMatt Arsenault   ExplicitKernArgSize(0),
21e935f05aSMatt Arsenault   MaxKernArgAlign(0),
2245bb48eaSTom Stellard   LDSSize(0),
23f1caa283SMatthias Braun   IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
241c538423SStanislav Mekhanoshin   NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
251c538423SStanislav Mekhanoshin   MemoryBound(false),
261c538423SStanislav Mekhanoshin   WaveLimiter(false) {
2752ef4019SMatt Arsenault   // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
2852ef4019SMatt Arsenault   // except reserved size is not correctly aligned.
291c538423SStanislav Mekhanoshin 
301c538423SStanislav Mekhanoshin   if (auto *Resolver = MF.getMMI().getResolver()) {
311c538423SStanislav Mekhanoshin     if (AMDGPUPerfHintAnalysis *PHA = static_cast<AMDGPUPerfHintAnalysis*>(
321c538423SStanislav Mekhanoshin           Resolver->getAnalysisIfAvailable(&AMDGPUPerfHintAnalysisID, true))) {
331c538423SStanislav Mekhanoshin       MemoryBound = PHA->isMemoryBound(&MF.getFunction());
341c538423SStanislav Mekhanoshin       WaveLimiter = PHA->needsWaveLimiter(&MF.getFunction());
351c538423SStanislav Mekhanoshin     }
361c538423SStanislav Mekhanoshin   }
37beb24f5bSNikolay Haustov }
38beb24f5bSNikolay Haustov 
3952ef4019SMatt Arsenault unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
4052ef4019SMatt Arsenault                                                   const GlobalValue &GV) {
4152ef4019SMatt Arsenault   auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
4252ef4019SMatt Arsenault   if (!Entry.second)
4352ef4019SMatt Arsenault     return Entry.first->second;
4452ef4019SMatt Arsenault 
4552ef4019SMatt Arsenault   unsigned Align = GV.getAlignment();
4652ef4019SMatt Arsenault   if (Align == 0)
4752ef4019SMatt Arsenault     Align = DL.getABITypeAlignment(GV.getValueType());
4852ef4019SMatt Arsenault 
4952ef4019SMatt Arsenault   /// TODO: We should sort these to minimize wasted space due to alignment
5052ef4019SMatt Arsenault   /// padding. Currently the padding is decided by the first encountered use
5152ef4019SMatt Arsenault   /// during lowering.
5252ef4019SMatt Arsenault   unsigned Offset = LDSSize = alignTo(LDSSize, Align);
5352ef4019SMatt Arsenault 
5452ef4019SMatt Arsenault   Entry.first->second = Offset;
5552ef4019SMatt Arsenault   LDSSize += DL.getTypeAllocSize(GV.getValueType());
5652ef4019SMatt Arsenault 
5752ef4019SMatt Arsenault   return Offset;
58beb24f5bSNikolay Haustov }
59