1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUCallLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUSubtarget.h"
19 #include "SIISelLowering.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "SIRegisterInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/Support/LowLevelTypeImpl.h"
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 struct OutgoingArgHandler : public CallLowering::ValueHandler {
34   OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
35                      MachineInstrBuilder MIB, CCAssignFn *AssignFn)
36       : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
37 
38   MachineInstrBuilder MIB;
39 
40   Register getStackAddress(uint64_t Size, int64_t Offset,
41                            MachinePointerInfo &MPO) override {
42     llvm_unreachable("not implemented");
43   }
44 
45   void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
46                             MachinePointerInfo &MPO, CCValAssign &VA) override {
47     llvm_unreachable("not implemented");
48   }
49 
50   void assignValueToReg(Register ValVReg, Register PhysReg,
51                         CCValAssign &VA) override {
52     MIB.addUse(PhysReg);
53     MIRBuilder.buildCopy(PhysReg, ValVReg);
54   }
55 
56   bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
57                  CCValAssign::LocInfo LocInfo,
58                  const CallLowering::ArgInfo &Info,
59                  CCState &State) override {
60     return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State);
61   }
62 };
63 
64 }
65 
66 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
67   : CallLowering(&TLI) {
68 }
69 
70 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
71                                      const Value *Val,
72                                      ArrayRef<Register> VRegs) const {
73 
74   MachineFunction &MF = MIRBuilder.getMF();
75   MachineRegisterInfo &MRI = MF.getRegInfo();
76   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
77   MFI->setIfReturnsVoid(!Val);
78 
79   if (!Val) {
80     MIRBuilder.buildInstr(AMDGPU::S_ENDPGM).addImm(0);
81     return true;
82   }
83 
84   Register VReg = VRegs[0];
85 
86   const Function &F = MF.getFunction();
87   auto &DL = F.getParent()->getDataLayout();
88   if (!AMDGPU::isShader(F.getCallingConv()))
89     return false;
90 
91 
92   const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>();
93   SmallVector<EVT, 4> SplitVTs;
94   SmallVector<uint64_t, 4> Offsets;
95   ArgInfo OrigArg{VReg, Val->getType()};
96   setArgFlags(OrigArg, AttributeList::ReturnIndex, DL, F);
97   ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
98 
99   SmallVector<ArgInfo, 8> SplitArgs;
100   CCAssignFn *AssignFn = CCAssignFnForReturn(F.getCallingConv(), false);
101   for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
102     Type *SplitTy = SplitVTs[i].getTypeForEVT(F.getContext());
103     SplitArgs.push_back({VRegs[i], SplitTy, OrigArg.Flags, OrigArg.IsFixed});
104   }
105   auto RetInstr = MIRBuilder.buildInstrNoInsert(AMDGPU::SI_RETURN_TO_EPILOG);
106   OutgoingArgHandler Handler(MIRBuilder, MRI, RetInstr, AssignFn);
107   if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
108     return false;
109   MIRBuilder.insertInstr(RetInstr);
110 
111   return true;
112 }
113 
114 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder,
115                                                Type *ParamTy,
116                                                uint64_t Offset) const {
117 
118   MachineFunction &MF = MIRBuilder.getMF();
119   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
120   MachineRegisterInfo &MRI = MF.getRegInfo();
121   const Function &F = MF.getFunction();
122   const DataLayout &DL = F.getParent()->getDataLayout();
123   PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
124   LLT PtrType = getLLTForType(*PtrTy, DL);
125   Register DstReg = MRI.createGenericVirtualRegister(PtrType);
126   Register KernArgSegmentPtr =
127     MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
128   Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
129 
130   Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
131   MIRBuilder.buildConstant(OffsetReg, Offset);
132 
133   MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg);
134 
135   return DstReg;
136 }
137 
138 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
139                                         Type *ParamTy, uint64_t Offset,
140                                         unsigned Align,
141                                         Register DstReg) const {
142   MachineFunction &MF = MIRBuilder.getMF();
143   const Function &F = MF.getFunction();
144   const DataLayout &DL = F.getParent()->getDataLayout();
145   PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
146   MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
147   unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
148   Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
149 
150   MachineMemOperand *MMO =
151       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
152                                        MachineMemOperand::MONonTemporal |
153                                        MachineMemOperand::MOInvariant,
154                                        TypeSize, Align);
155 
156   MIRBuilder.buildLoad(DstReg, PtrReg, *MMO);
157 }
158 
159 static Register findFirstFreeSGPR(CCState &CCInfo) {
160   unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
161   for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
162     if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
163       return AMDGPU::SGPR0 + Reg;
164     }
165   }
166   llvm_unreachable("Cannot allocate sgpr");
167 }
168 
169 static void allocateSystemSGPRs(CCState &CCInfo,
170                                 MachineFunction &MF,
171                                 SIMachineFunctionInfo &Info,
172                                 CallingConv::ID CallConv,
173                                 bool IsShader) {
174   if (Info.hasPrivateSegmentWaveByteOffset()) {
175     // Scratch wave offset passed in system SGPR.
176     unsigned PrivateSegmentWaveByteOffsetReg;
177 
178     if (IsShader) {
179       PrivateSegmentWaveByteOffsetReg =
180         Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
181 
182       // This is true if the scratch wave byte offset doesn't have a fixed
183       // location.
184       if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
185         PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
186         Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
187       }
188     } else
189       PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
190 
191     MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
192     CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
193   }
194 }
195 
196 bool AMDGPUCallLowering::lowerFormalArguments(
197     MachineIRBuilder &MIRBuilder, const Function &F,
198     ArrayRef<ArrayRef<Register>> VRegs) const {
199   // AMDGPU_GS and AMDGP_HS are not supported yet.
200   if (F.getCallingConv() == CallingConv::AMDGPU_GS ||
201       F.getCallingConv() == CallingConv::AMDGPU_HS)
202     return false;
203 
204   MachineFunction &MF = MIRBuilder.getMF();
205   const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
206   MachineRegisterInfo &MRI = MF.getRegInfo();
207   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
208   const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
209   const DataLayout &DL = F.getParent()->getDataLayout();
210 
211   bool IsShader = AMDGPU::isShader(F.getCallingConv());
212 
213   SmallVector<CCValAssign, 16> ArgLocs;
214   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
215 
216   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
217   if (Info->hasPrivateSegmentBuffer()) {
218     Register PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
219     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
220     CCInfo.AllocateReg(PrivateSegmentBufferReg);
221   }
222 
223   if (Info->hasDispatchPtr()) {
224     Register DispatchPtrReg = Info->addDispatchPtr(*TRI);
225     // FIXME: Need to add reg as live-in
226     CCInfo.AllocateReg(DispatchPtrReg);
227   }
228 
229   if (Info->hasQueuePtr()) {
230     Register QueuePtrReg = Info->addQueuePtr(*TRI);
231     // FIXME: Need to add reg as live-in
232     CCInfo.AllocateReg(QueuePtrReg);
233   }
234 
235   if (Info->hasKernargSegmentPtr()) {
236     Register InputPtrReg = Info->addKernargSegmentPtr(*TRI);
237     const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
238     Register VReg = MRI.createGenericVirtualRegister(P2);
239     MRI.addLiveIn(InputPtrReg, VReg);
240     MIRBuilder.getMBB().addLiveIn(InputPtrReg);
241     MIRBuilder.buildCopy(VReg, InputPtrReg);
242     CCInfo.AllocateReg(InputPtrReg);
243   }
244 
245   if (Info->hasDispatchID()) {
246     unsigned DispatchIDReg = Info->addDispatchID(*TRI);
247     // FIXME: Need to add reg as live-in
248     CCInfo.AllocateReg(DispatchIDReg);
249   }
250 
251   if (Info->hasFlatScratchInit()) {
252     unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
253     // FIXME: Need to add reg as live-in
254     CCInfo.AllocateReg(FlatScratchInitReg);
255   }
256 
257   // The infrastructure for normal calling convention lowering is essentially
258   // useless for kernels. We want to avoid any kind of legalization or argument
259   // splitting.
260   if (F.getCallingConv() == CallingConv::AMDGPU_KERNEL) {
261     unsigned i = 0;
262     const unsigned KernArgBaseAlign = 16;
263     const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
264     uint64_t ExplicitArgOffset = 0;
265 
266     // TODO: Align down to dword alignment and extract bits for extending loads.
267     for (auto &Arg : F.args()) {
268       Type *ArgTy = Arg.getType();
269       unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
270       if (AllocSize == 0)
271         continue;
272 
273       unsigned ABIAlign = DL.getABITypeAlignment(ArgTy);
274 
275       uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
276       ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
277 
278       ArrayRef<Register> OrigArgRegs = VRegs[i];
279       Register ArgReg =
280           OrigArgRegs.size() == 1
281               ? OrigArgRegs[0]
282               : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL));
283       unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset);
284       ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy));
285       lowerParameter(MIRBuilder, ArgTy, ArgOffset, Align, ArgReg);
286       if (OrigArgRegs.size() > 1)
287         unpackRegs(OrigArgRegs, ArgReg, ArgTy, MIRBuilder);
288       ++i;
289     }
290 
291     allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), IsShader);
292     return true;
293   }
294 
295   unsigned NumArgs = F.arg_size();
296   Function::const_arg_iterator CurOrigArg = F.arg_begin();
297   const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>();
298   unsigned PSInputNum = 0;
299   BitVector Skipped(NumArgs);
300   for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) {
301     EVT ValEVT = TLI.getValueType(DL, CurOrigArg->getType());
302 
303     // We can only hanlde simple value types at the moment.
304     ISD::ArgFlagsTy Flags;
305     assert(VRegs[i].size() == 1 && "Can't lower into more than one register");
306     ArgInfo OrigArg{VRegs[i][0], CurOrigArg->getType()};
307     setArgFlags(OrigArg, i + 1, DL, F);
308     Flags.setOrigAlign(DL.getABITypeAlignment(CurOrigArg->getType()));
309 
310     if (F.getCallingConv() == CallingConv::AMDGPU_PS &&
311         !OrigArg.Flags.isInReg() && !OrigArg.Flags.isByVal() &&
312         PSInputNum <= 15) {
313       if (CurOrigArg->use_empty() && !Info->isPSInputAllocated(PSInputNum)) {
314         Skipped.set(i);
315         ++PSInputNum;
316         continue;
317       }
318 
319       Info->markPSInputAllocated(PSInputNum);
320       if (!CurOrigArg->use_empty())
321         Info->markPSInputEnabled(PSInputNum);
322 
323       ++PSInputNum;
324     }
325 
326     CCAssignFn *AssignFn = CCAssignFnForCall(F.getCallingConv(),
327                                              /*IsVarArg=*/false);
328 
329     if (ValEVT.isVector()) {
330       EVT ElemVT = ValEVT.getVectorElementType();
331       if (!ValEVT.isSimple())
332         return false;
333       MVT ValVT = ElemVT.getSimpleVT();
334       bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full,
335                           OrigArg.Flags, CCInfo);
336       if (!Res)
337         return false;
338     } else {
339       MVT ValVT = ValEVT.getSimpleVT();
340       if (!ValEVT.isSimple())
341         return false;
342       bool Res =
343           AssignFn(i, ValVT, ValVT, CCValAssign::Full, OrigArg.Flags, CCInfo);
344 
345       // Fail if we don't know how to handle this type.
346       if (Res)
347         return false;
348     }
349   }
350 
351   Function::const_arg_iterator Arg = F.arg_begin();
352 
353   if (F.getCallingConv() == CallingConv::AMDGPU_VS ||
354       F.getCallingConv() == CallingConv::AMDGPU_PS) {
355     for (unsigned i = 0, OrigArgIdx = 0;
356          OrigArgIdx != NumArgs && i != ArgLocs.size(); ++Arg, ++OrigArgIdx) {
357        if (Skipped.test(OrigArgIdx))
358           continue;
359        assert(VRegs[OrigArgIdx].size() == 1 &&
360               "Can't lower into more than 1 reg");
361        CCValAssign &VA = ArgLocs[i++];
362        MRI.addLiveIn(VA.getLocReg(), VRegs[OrigArgIdx][0]);
363        MIRBuilder.getMBB().addLiveIn(VA.getLocReg());
364        MIRBuilder.buildCopy(VRegs[OrigArgIdx][0], VA.getLocReg());
365     }
366 
367     allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), IsShader);
368     return true;
369   }
370 
371   return false;
372 }
373