1d88c1a5aSDimitry Andric //===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
2d88c1a5aSDimitry Andric //
3d88c1a5aSDimitry Andric // The LLVM Compiler Infrastructure
4d88c1a5aSDimitry Andric //
5d88c1a5aSDimitry Andric // This file is distributed under the University of Illinois Open Source
6d88c1a5aSDimitry Andric // License. See LICENSE.TXT for details.
7d88c1a5aSDimitry Andric //
8d88c1a5aSDimitry Andric //===----------------------------------------------------------------------===//
9d88c1a5aSDimitry Andric
108f0fd8f6SDimitry Andric #include "AMDGPUMachineFunction.h"
11d88c1a5aSDimitry Andric #include "AMDGPUSubtarget.h"
12*4ba319b5SDimitry Andric #include "AMDGPUPerfHintAnalysis.h"
13*4ba319b5SDimitry Andric #include "llvm/CodeGen/MachineModuleInfo.h"
143ca95b02SDimitry Andric
158f0fd8f6SDimitry Andric using namespace llvm;
168f0fd8f6SDimitry Andric
AMDGPUMachineFunction(const MachineFunction & MF)178f0fd8f6SDimitry Andric AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
188f0fd8f6SDimitry Andric MachineFunctionInfo(),
19d88c1a5aSDimitry Andric LocalMemoryObjects(),
20*4ba319b5SDimitry Andric ExplicitKernArgSize(0),
213ca95b02SDimitry Andric MaxKernArgAlign(0),
228f0fd8f6SDimitry Andric LDSSize(0),
232cab237bSDimitry Andric IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
24*4ba319b5SDimitry Andric NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
25*4ba319b5SDimitry Andric MemoryBound(false),
26*4ba319b5SDimitry Andric WaveLimiter(false) {
27*4ba319b5SDimitry Andric const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
28*4ba319b5SDimitry Andric
29d88c1a5aSDimitry Andric // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
30d88c1a5aSDimitry Andric // except reserved size is not correctly aligned.
31*4ba319b5SDimitry Andric const Function &F = MF.getFunction();
32*4ba319b5SDimitry Andric
33*4ba319b5SDimitry Andric if (auto *Resolver = MF.getMMI().getResolver()) {
34*4ba319b5SDimitry Andric if (AMDGPUPerfHintAnalysis *PHA = static_cast<AMDGPUPerfHintAnalysis*>(
35*4ba319b5SDimitry Andric Resolver->getAnalysisIfAvailable(&AMDGPUPerfHintAnalysisID, true))) {
36*4ba319b5SDimitry Andric MemoryBound = PHA->isMemoryBound(&F);
37*4ba319b5SDimitry Andric WaveLimiter = PHA->needsWaveLimiter(&F);
38*4ba319b5SDimitry Andric }
39*4ba319b5SDimitry Andric }
40*4ba319b5SDimitry Andric
41*4ba319b5SDimitry Andric CallingConv::ID CC = F.getCallingConv();
42*4ba319b5SDimitry Andric if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
43*4ba319b5SDimitry Andric ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
443ca95b02SDimitry Andric }
458f0fd8f6SDimitry Andric
allocateLDSGlobal(const DataLayout & DL,const GlobalValue & GV)46d88c1a5aSDimitry Andric unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
47d88c1a5aSDimitry Andric const GlobalValue &GV) {
48d88c1a5aSDimitry Andric auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
49d88c1a5aSDimitry Andric if (!Entry.second)
50d88c1a5aSDimitry Andric return Entry.first->second;
51d88c1a5aSDimitry Andric
52d88c1a5aSDimitry Andric unsigned Align = GV.getAlignment();
53d88c1a5aSDimitry Andric if (Align == 0)
54d88c1a5aSDimitry Andric Align = DL.getABITypeAlignment(GV.getValueType());
55d88c1a5aSDimitry Andric
56d88c1a5aSDimitry Andric /// TODO: We should sort these to minimize wasted space due to alignment
57d88c1a5aSDimitry Andric /// padding. Currently the padding is decided by the first encountered use
58d88c1a5aSDimitry Andric /// during lowering.
59d88c1a5aSDimitry Andric unsigned Offset = LDSSize = alignTo(LDSSize, Align);
60d88c1a5aSDimitry Andric
61d88c1a5aSDimitry Andric Entry.first->second = Offset;
62d88c1a5aSDimitry Andric LDSSize += DL.getTypeAllocSize(GV.getValueType());
63d88c1a5aSDimitry Andric
64d88c1a5aSDimitry Andric return Offset;
658f0fd8f6SDimitry Andric }
66