1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file implements the lowering of LLVM calls to machine code calls for
12 /// GlobalISel.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUCallLowering.h"
17 #include "AMDGPU.h"
18 #include "AMDGPUISelLowering.h"
19 #include "AMDGPUSubtarget.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "SIRegisterInfo.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 
27 using namespace llvm;
28 
29 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
30   : CallLowering(&TLI), AMDGPUASI(TLI.getAMDGPUAS()) {
31 }
32 
33 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
34                                      const Value *Val, unsigned VReg) const {
35   // FIXME: Add support for non-void returns.
36   if (Val)
37     return false;
38 
39   MIRBuilder.buildInstr(AMDGPU::S_ENDPGM);
40   return true;
41 }
42 
43 unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
44                                                Type *ParamTy,
45                                                unsigned Offset) const {
46 
47   MachineFunction &MF = MIRBuilder.getMF();
48   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
49   MachineRegisterInfo &MRI = MF.getRegInfo();
50   const Function &F = MF.getFunction();
51   const DataLayout &DL = F.getParent()->getDataLayout();
52   PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
53   LLT PtrType = getLLTForType(*PtrTy, DL);
54   unsigned DstReg = MRI.createGenericVirtualRegister(PtrType);
55   unsigned KernArgSegmentPtr =
56     MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
57   unsigned KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
58 
59   unsigned OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
60   MIRBuilder.buildConstant(OffsetReg, Offset);
61 
62   MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
63 
64   return DstReg;
65 }
66 
67 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
68                                         Type *ParamTy, unsigned Offset,
69                                         unsigned DstReg) const {
70   MachineFunction &MF = MIRBuilder.getMF();
71   const Function &F = MF.getFunction();
72   const DataLayout &DL = F.getParent()->getDataLayout();
73   PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
74   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
75   unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
76   unsigned Align = DL.getABITypeAlignment(ParamTy);
77   unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
78 
79   MachineMemOperand *MMO =
80       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
81                                        MachineMemOperand::MONonTemporal |
82                                        MachineMemOperand::MOInvariant,
83                                        TypeSize, Align);
84 
85   MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
86 }
87 
88 bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
89                                               const Function &F,
90                                               ArrayRef<unsigned> VRegs) const {
91   // AMDGPU_GS is not supported yet.
92   if (F.getCallingConv() == CallingConv::AMDGPU_GS)
93     return false;
94 
95   MachineFunction &MF = MIRBuilder.getMF();
96   const SISubtarget *Subtarget = static_cast<const SISubtarget *>(&MF.getSubtarget());
97   MachineRegisterInfo &MRI = MF.getRegInfo();
98   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
99   const SIRegisterInfo *TRI = MF.getSubtarget<SISubtarget>().getRegisterInfo();
100   const DataLayout &DL = F.getParent()->getDataLayout();
101 
102   SmallVector<CCValAssign, 16> ArgLocs;
103   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
104 
105   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
106   if (Info->hasPrivateSegmentBuffer()) {
107     unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
108     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
109     CCInfo.AllocateReg(PrivateSegmentBufferReg);
110   }
111 
112   if (Info->hasDispatchPtr()) {
113     unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI);
114     // FIXME: Need to add reg as live-in
115     CCInfo.AllocateReg(DispatchPtrReg);
116   }
117 
118   if (Info->hasQueuePtr()) {
119     unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
120     // FIXME: Need to add reg as live-in
121     CCInfo.AllocateReg(QueuePtrReg);
122   }
123 
124   if (Info->hasKernargSegmentPtr()) {
125     unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
126     const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
127     unsigned VReg = MRI.createGenericVirtualRegister(P2);
128     MRI.addLiveIn(InputPtrReg, VReg);
129     MIRBuilder.getMBB().addLiveIn(InputPtrReg);
130     MIRBuilder.buildCopy(VReg, InputPtrReg);
131     CCInfo.AllocateReg(InputPtrReg);
132   }
133 
134   if (Info->hasDispatchID()) {
135     unsigned DispatchIDReg = Info->addDispatchID(*TRI);
136     // FIXME: Need to add reg as live-in
137     CCInfo.AllocateReg(DispatchIDReg);
138   }
139 
140   if (Info->hasFlatScratchInit()) {
141     unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
142     // FIXME: Need to add reg as live-in
143     CCInfo.AllocateReg(FlatScratchInitReg);
144   }
145 
146   unsigned NumArgs = F.arg_size();
147   Function::const_arg_iterator CurOrigArg = F.arg_begin();
148   const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>();
149   unsigned PSInputNum = 0;
150   BitVector Skipped(NumArgs);
151   for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
152     EVT ValEVT = TLI.getValueType(DL, CurOrigArg->getType());
153 
154     // We can only hanlde simple value types at the moment.
155     ISD::ArgFlagsTy Flags;
156     ArgInfo OrigArg{VRegs[i], CurOrigArg->getType()};
157     setArgFlags(OrigArg, i + 1, DL, F);
158     Flags.setOrigAlign(DL.getABITypeAlignment(CurOrigArg->getType()));
159 
160     if (F.getCallingConv() == CallingConv::AMDGPU_PS &&
161         !OrigArg.Flags.isInReg() && !OrigArg.Flags.isByVal() &&
162         PSInputNum <= 15) {
163       if (CurOrigArg->use_empty() && !Info->isPSInputAllocated(PSInputNum)) {
164         Skipped.set(i);
165         ++PSInputNum;
166         continue;
167       }
168 
169       Info->markPSInputAllocated(PSInputNum);
170       if (!CurOrigArg->use_empty())
171         Info->markPSInputEnabled(PSInputNum);
172 
173       ++PSInputNum;
174     }
175 
176     CCAssignFn *AssignFn = CCAssignFnForCall(F.getCallingConv(),
177                                              /*IsVarArg=*/false);
178 
179     if (ValEVT.isVector()) {
180       EVT ElemVT = ValEVT.getVectorElementType();
181       if (!ValEVT.isSimple())
182         return false;
183       MVT ValVT = ElemVT.getSimpleVT();
184       bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full,
185                           OrigArg.Flags, CCInfo);
186       if (!Res)
187         return false;
188     } else {
189       MVT ValVT = ValEVT.getSimpleVT();
190       if (!ValEVT.isSimple())
191         return false;
192       bool Res =
193           AssignFn(i, ValVT, ValVT, CCValAssign::Full, OrigArg.Flags, CCInfo);
194 
195       // Fail if we don't know how to handle this type.
196       if (Res)
197         return false;
198     }
199   }
200 
201   Function::const_arg_iterator Arg = F.arg_begin();
202 
203   if (F.getCallingConv() == CallingConv::AMDGPU_VS ||
204       F.getCallingConv() == CallingConv::AMDGPU_PS) {
205     for (unsigned i = 0, OrigArgIdx = 0;
206          OrigArgIdx != NumArgs && i != ArgLocs.size(); ++Arg, ++OrigArgIdx) {
207        if (Skipped.test(OrigArgIdx))
208           continue;
209       CCValAssign &VA = ArgLocs[i++];
210       MRI.addLiveIn(VA.getLocReg(), VRegs[OrigArgIdx]);
211       MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
212       MIRBuilder.buildCopy(VRegs[OrigArgIdx], VA.getLocReg());
213     }
214     return true;
215   }
216 
217   for (unsigned i = 0; i != ArgLocs.size(); ++i, ++Arg) {
218     // FIXME: We should be getting DebugInfo from the arguments some how.
219     CCValAssign &VA = ArgLocs[i];
220     lowerParameter(MIRBuilder, Arg->getType(),
221                    VA.getLocMemOffset() +
222                    Subtarget->getExplicitKernelArgOffset(MF), VRegs[i]);
223   }
224 
225   return true;
226 }
227