1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUCallLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUSubtarget.h"
19 #include "SIISelLowering.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "SIRegisterInfo.h"
22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/Support/LowLevelTypeImpl.h"
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 struct OutgoingValueHandler : public CallLowering::ValueHandler {
34   OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
35                        MachineInstrBuilder MIB, CCAssignFn *AssignFn)
36       : ValueHandler(B, MRI, AssignFn), MIB(MIB) {}
37 
38   MachineInstrBuilder MIB;
39 
40   bool isIncomingArgumentHandler() const override { return false; }
41 
42   Register getStackAddress(uint64_t Size, int64_t Offset,
43                            MachinePointerInfo &MPO) override {
44     llvm_unreachable("not implemented");
45   }
46 
47   void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
48                             MachinePointerInfo &MPO, CCValAssign &VA) override {
49     llvm_unreachable("not implemented");
50   }
51 
52   void assignValueToReg(Register ValVReg, Register PhysReg,
53                         CCValAssign &VA) override {
54     Register ExtReg;
55     if (VA.getLocVT().getSizeInBits() < 32) {
56       // 16-bit types are reported as legal for 32-bit registers. We need to
57       // extend and do a 32-bit copy to avoid the verifier complaining about it.
58       ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
59     } else
60       ExtReg = extendRegister(ValVReg, VA);
61 
62     MIRBuilder.buildCopy(PhysReg, ExtReg);
63     MIB.addUse(PhysReg, RegState::Implicit);
64   }
65 
66   bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
67                  CCValAssign::LocInfo LocInfo,
68                  const CallLowering::ArgInfo &Info,
69                  ISD::ArgFlagsTy Flags,
70                  CCState &State) override {
71     return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
72   }
73 };
74 
75 struct IncomingArgHandler : public CallLowering::ValueHandler {
76   uint64_t StackUsed = 0;
77 
78   IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
79                      CCAssignFn *AssignFn)
80     : ValueHandler(B, MRI, AssignFn) {}
81 
82   Register getStackAddress(uint64_t Size, int64_t Offset,
83                            MachinePointerInfo &MPO) override {
84     auto &MFI = MIRBuilder.getMF().getFrameInfo();
85     int FI = MFI.CreateFixedObject(Size, Offset, true);
86     MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
87     auto AddrReg = MIRBuilder.buildFrameIndex(
88         LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI);
89     StackUsed = std::max(StackUsed, Size + Offset);
90     return AddrReg.getReg(0);
91   }
92 
93   void assignValueToReg(Register ValVReg, Register PhysReg,
94                         CCValAssign &VA) override {
95     markPhysRegUsed(PhysReg);
96 
97     if (VA.getLocVT().getSizeInBits() < 32) {
98       // 16-bit types are reported as legal for 32-bit registers. We need to do
99       // a 32-bit copy, and truncate to avoid the verifier complaining about it.
100       auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg);
101       MIRBuilder.buildTrunc(ValVReg, Copy);
102       return;
103     }
104 
105     switch (VA.getLocInfo()) {
106     case CCValAssign::LocInfo::SExt:
107     case CCValAssign::LocInfo::ZExt:
108     case CCValAssign::LocInfo::AExt: {
109       auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
110       MIRBuilder.buildTrunc(ValVReg, Copy);
111       break;
112     }
113     default:
114       MIRBuilder.buildCopy(ValVReg, PhysReg);
115       break;
116     }
117   }
118 
119   void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
120                             MachinePointerInfo &MPO, CCValAssign &VA) override {
121     // FIXME: Get alignment
122     auto MMO = MIRBuilder.getMF().getMachineMemOperand(
123       MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 1);
124     MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
125   }
126 
127   /// How the physical register gets marked varies between formal
128   /// parameters (it's a basic-block live-in), and a call instruction
129   /// (it's an implicit-def of the BL).
130   virtual void markPhysRegUsed(unsigned PhysReg) = 0;
131 
132   // FIXME: What is the point of this being a callback?
133   bool isIncomingArgumentHandler() const override { return true; }
134 };
135 
136 struct FormalArgHandler : public IncomingArgHandler {
137   FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
138                    CCAssignFn *AssignFn)
139     : IncomingArgHandler(B, MRI, AssignFn) {}
140 
141   void markPhysRegUsed(unsigned PhysReg) override {
142     MIRBuilder.getMBB().addLiveIn(PhysReg);
143   }
144 };
145 
146 }
147 
148 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
149   : CallLowering(&TLI) {
150 }
151 
152 void AMDGPUCallLowering::splitToValueTypes(
153     const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
154     const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv,
155     SplitArgTy PerformArgSplit) const {
156   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
157   LLVMContext &Ctx = OrigArg.Ty->getContext();
158 
159   if (OrigArg.Ty->isVoidTy())
160     return;
161 
162   SmallVector<EVT, 4> SplitVTs;
163   ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs);
164 
165   assert(OrigArg.Regs.size() == SplitVTs.size());
166 
167   int SplitIdx = 0;
168   for (EVT VT : SplitVTs) {
169     unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
170     Type *Ty = VT.getTypeForEVT(Ctx);
171 
172 
173 
174     if (NumParts == 1) {
175       // No splitting to do, but we want to replace the original type (e.g. [1 x
176       // double] -> double).
177       SplitArgs.emplace_back(OrigArg.Regs[SplitIdx], Ty,
178                              OrigArg.Flags, OrigArg.IsFixed);
179 
180       ++SplitIdx;
181       continue;
182     }
183 
184     LLT LLTy = getLLTForType(*Ty, DL);
185 
186     SmallVector<Register, 8> SplitRegs;
187 
188     EVT PartVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
189     Type *PartTy = PartVT.getTypeForEVT(Ctx);
190     LLT PartLLT = getLLTForType(*PartTy, DL);
191 
192     // FIXME: Should we be reporting all of the part registers for a single
193     // argument, and let handleAssignments take care of the repacking?
194     for (unsigned i = 0; i < NumParts; ++i) {
195       Register PartReg = MRI.createGenericVirtualRegister(PartLLT);
196       SplitRegs.push_back(PartReg);
197       SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags);
198     }
199 
200     PerformArgSplit(SplitRegs, LLTy, PartLLT, SplitIdx);
201 
202     ++SplitIdx;
203   }
204 }
205 
206 // Get the appropriate type to make \p OrigTy \p Factor times bigger.
207 static LLT getMultipleType(LLT OrigTy, int Factor) {
208   if (OrigTy.isVector()) {
209     return LLT::vector(OrigTy.getNumElements() * Factor,
210                        OrigTy.getElementType());
211   }
212 
213   return LLT::scalar(OrigTy.getSizeInBits() * Factor);
214 }
215 
216 // TODO: Move to generic code
217 static void unpackRegsToOrigType(MachineIRBuilder &B,
218                                  ArrayRef<Register> DstRegs,
219                                  Register SrcReg,
220                                  LLT SrcTy,
221                                  LLT PartTy) {
222   assert(DstRegs.size() > 1 && "Nothing to unpack");
223 
224   const unsigned SrcSize = SrcTy.getSizeInBits();
225   const unsigned PartSize = PartTy.getSizeInBits();
226 
227   if (SrcTy.isVector() && !PartTy.isVector() &&
228       PartSize > SrcTy.getElementType().getSizeInBits()) {
229     // Vector was scalarized, and the elements extended.
230     auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(),
231                                                   SrcReg);
232     for (int i = 0, e = DstRegs.size(); i != e; ++i)
233       B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
234     return;
235   }
236 
237   if (SrcSize % PartSize == 0) {
238     B.buildUnmerge(DstRegs, SrcReg);
239     return;
240   }
241 
242   const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize;
243 
244   LLT BigTy = getMultipleType(PartTy, NumRoundedParts);
245   auto ImpDef = B.buildUndef(BigTy);
246 
247   auto Big = B.buildInsert(BigTy, ImpDef.getReg(0), SrcReg, 0).getReg(0);
248 
249   int64_t Offset = 0;
250   for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize)
251     B.buildExtract(DstRegs[i], Big, Offset);
252 }
253 
254 /// Lower the return value for the already existing \p Ret. This assumes that
255 /// \p B's insertion point is correct.
256 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
257                                         const Value *Val, ArrayRef<Register> VRegs,
258                                         MachineInstrBuilder &Ret) const {
259   if (!Val)
260     return true;
261 
262   auto &MF = B.getMF();
263   const auto &F = MF.getFunction();
264   const DataLayout &DL = MF.getDataLayout();
265 
266   CallingConv::ID CC = F.getCallingConv();
267   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
268   MachineRegisterInfo &MRI = MF.getRegInfo();
269 
270   ArgInfo OrigRetInfo(VRegs, Val->getType());
271   setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
272   SmallVector<ArgInfo, 4> SplitRetInfos;
273 
274   splitToValueTypes(
275     OrigRetInfo, SplitRetInfos, DL, MRI, CC,
276     [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) {
277       unpackRegsToOrigType(B, Regs, VRegs[VTSplitIdx], LLTy, PartLLT);
278     });
279 
280   CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
281 
282   OutgoingValueHandler RetHandler(B, MF.getRegInfo(), Ret, AssignFn);
283   return handleAssignments(B, SplitRetInfos, RetHandler);
284 }
285 
286 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B,
287                                      const Value *Val,
288                                      ArrayRef<Register> VRegs) const {
289 
290   MachineFunction &MF = B.getMF();
291   MachineRegisterInfo &MRI = MF.getRegInfo();
292   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
293   MFI->setIfReturnsVoid(!Val);
294 
295   assert(!Val == VRegs.empty() && "Return value without a vreg");
296 
297   CallingConv::ID CC = B.getMF().getFunction().getCallingConv();
298   const bool IsShader = AMDGPU::isShader(CC);
299   const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) ||
300                          AMDGPU::isKernel(CC);
301   if (IsWaveEnd) {
302     B.buildInstr(AMDGPU::S_ENDPGM)
303       .addImm(0);
304     return true;
305   }
306 
307   auto const &ST = B.getMF().getSubtarget<GCNSubtarget>();
308 
309   unsigned ReturnOpc =
310       IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return;
311 
312   auto Ret = B.buildInstrNoInsert(ReturnOpc);
313   Register ReturnAddrVReg;
314   if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
315     ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass);
316     Ret.addUse(ReturnAddrVReg);
317   }
318 
319   if (!lowerReturnVal(B, Val, VRegs, Ret))
320     return false;
321 
322   if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
323     const SIRegisterInfo *TRI = ST.getRegisterInfo();
324     Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF),
325                                          &AMDGPU::SGPR_64RegClass);
326     B.buildCopy(ReturnAddrVReg, LiveInReturn);
327   }
328 
329   // TODO: Handle CalleeSavedRegsViaCopy.
330 
331   B.insertInstr(Ret);
332   return true;
333 }
334 
335 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B,
336                                                Type *ParamTy,
337                                                uint64_t Offset) const {
338 
339   MachineFunction &MF = B.getMF();
340   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
341   MachineRegisterInfo &MRI = MF.getRegInfo();
342   const Function &F = MF.getFunction();
343   const DataLayout &DL = F.getParent()->getDataLayout();
344   PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
345   LLT PtrType = getLLTForType(*PtrTy, DL);
346   Register KernArgSegmentPtr =
347     MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
348   Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
349 
350   auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset);
351 
352   return B.buildPtrAdd(PtrType, KernArgSegmentVReg, OffsetReg).getReg(0);
353 }
354 
355 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B,
356                                         Type *ParamTy, uint64_t Offset,
357                                         unsigned Align,
358                                         Register DstReg) const {
359   MachineFunction &MF = B.getMF();
360   const Function &F = MF.getFunction();
361   const DataLayout &DL = F.getParent()->getDataLayout();
362   MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
363   unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
364   Register PtrReg = lowerParameterPtr(B, ParamTy, Offset);
365 
366   MachineMemOperand *MMO =
367       MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
368                                        MachineMemOperand::MODereferenceable |
369                                        MachineMemOperand::MOInvariant,
370                                        TypeSize, Align);
371 
372   B.buildLoad(DstReg, PtrReg, *MMO);
373 }
374 
375 // Allocate special inputs passed in user SGPRs.
376 static void allocateHSAUserSGPRs(CCState &CCInfo,
377                                  MachineIRBuilder &B,
378                                  MachineFunction &MF,
379                                  const SIRegisterInfo &TRI,
380                                  SIMachineFunctionInfo &Info) {
381   // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
382   if (Info.hasPrivateSegmentBuffer()) {
383     unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
384     MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
385     CCInfo.AllocateReg(PrivateSegmentBufferReg);
386   }
387 
388   if (Info.hasDispatchPtr()) {
389     unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
390     MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
391     CCInfo.AllocateReg(DispatchPtrReg);
392   }
393 
394   if (Info.hasQueuePtr()) {
395     unsigned QueuePtrReg = Info.addQueuePtr(TRI);
396     MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
397     CCInfo.AllocateReg(QueuePtrReg);
398   }
399 
400   if (Info.hasKernargSegmentPtr()) {
401     MachineRegisterInfo &MRI = MF.getRegInfo();
402     Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
403     const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
404     Register VReg = MRI.createGenericVirtualRegister(P4);
405     MRI.addLiveIn(InputPtrReg, VReg);
406     B.getMBB().addLiveIn(InputPtrReg);
407     B.buildCopy(VReg, InputPtrReg);
408     CCInfo.AllocateReg(InputPtrReg);
409   }
410 
411   if (Info.hasDispatchID()) {
412     unsigned DispatchIDReg = Info.addDispatchID(TRI);
413     MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
414     CCInfo.AllocateReg(DispatchIDReg);
415   }
416 
417   if (Info.hasFlatScratchInit()) {
418     unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
419     MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
420     CCInfo.AllocateReg(FlatScratchInitReg);
421   }
422 
423   // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
424   // these from the dispatch pointer.
425 }
426 
427 bool AMDGPUCallLowering::lowerFormalArgumentsKernel(
428     MachineIRBuilder &B, const Function &F,
429     ArrayRef<ArrayRef<Register>> VRegs) const {
430   MachineFunction &MF = B.getMF();
431   const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
432   MachineRegisterInfo &MRI = MF.getRegInfo();
433   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
434   const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
435   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
436 
437   const DataLayout &DL = F.getParent()->getDataLayout();
438 
439   SmallVector<CCValAssign, 16> ArgLocs;
440   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
441 
442   allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info);
443 
444   unsigned i = 0;
445   const unsigned KernArgBaseAlign = 16;
446   const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
447   uint64_t ExplicitArgOffset = 0;
448 
449   // TODO: Align down to dword alignment and extract bits for extending loads.
450   for (auto &Arg : F.args()) {
451     Type *ArgTy = Arg.getType();
452     unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
453     if (AllocSize == 0)
454       continue;
455 
456     unsigned ABIAlign = DL.getABITypeAlignment(ArgTy);
457 
458     uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
459     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
460 
461     ArrayRef<Register> OrigArgRegs = VRegs[i];
462     Register ArgReg =
463       OrigArgRegs.size() == 1
464       ? OrigArgRegs[0]
465       : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL));
466     unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset);
467     ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy));
468     lowerParameter(B, ArgTy, ArgOffset, Align, ArgReg);
469     if (OrigArgRegs.size() > 1)
470       unpackRegs(OrigArgRegs, ArgReg, ArgTy, B);
471     ++i;
472   }
473 
474   TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
475   TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
476   return true;
477 }
478 
479 // TODO: Move this to generic code
480 static void packSplitRegsToOrigType(MachineIRBuilder &B,
481                                     ArrayRef<Register> OrigRegs,
482                                     ArrayRef<Register> Regs,
483                                     LLT LLTy,
484                                     LLT PartLLT) {
485   if (!LLTy.isVector() && !PartLLT.isVector()) {
486     B.buildMerge(OrigRegs[0], Regs);
487     return;
488   }
489 
490   if (LLTy.isVector() && PartLLT.isVector()) {
491     assert(LLTy.getElementType() == PartLLT.getElementType());
492 
493     int DstElts = LLTy.getNumElements();
494     int PartElts = PartLLT.getNumElements();
495     if (DstElts % PartElts == 0)
496       B.buildConcatVectors(OrigRegs[0], Regs);
497     else {
498       // Deal with v3s16 split into v2s16
499       assert(PartElts == 2 && DstElts % 2 != 0);
500       int RoundedElts = PartElts * ((DstElts + PartElts - 1) / PartElts);
501 
502       LLT RoundedDestTy = LLT::vector(RoundedElts, PartLLT.getElementType());
503       auto RoundedConcat = B.buildConcatVectors(RoundedDestTy, Regs);
504       B.buildExtract(OrigRegs[0], RoundedConcat, 0);
505     }
506 
507     return;
508   }
509 
510   MachineRegisterInfo &MRI = *B.getMRI();
511 
512   assert(LLTy.isVector() && !PartLLT.isVector());
513 
514   LLT DstEltTy = LLTy.getElementType();
515 
516   // Pointer information was discarded. We'll need to coerce some register types
517   // to avoid violating type constraints.
518   LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
519 
520   assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
521 
522   if (DstEltTy == PartLLT) {
523     // Vector was trivially scalarized.
524 
525     if (RealDstEltTy.isPointer()) {
526       for (Register Reg : Regs)
527         MRI.setType(Reg, RealDstEltTy);
528     }
529 
530     B.buildBuildVector(OrigRegs[0], Regs);
531   } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
532     // Deal with vector with 64-bit elements decomposed to 32-bit
533     // registers. Need to create intermediate 64-bit elements.
534     SmallVector<Register, 8> EltMerges;
535     int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
536 
537     assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
538 
539     for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I)  {
540       auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
541       // Fix the type in case this is really a vector of pointers.
542       MRI.setType(Merge.getReg(0), RealDstEltTy);
543       EltMerges.push_back(Merge.getReg(0));
544       Regs = Regs.drop_front(PartsPerElt);
545     }
546 
547     B.buildBuildVector(OrigRegs[0], EltMerges);
548   } else {
549     // Vector was split, and elements promoted to a wider type.
550     LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
551     auto BV = B.buildBuildVector(BVType, Regs);
552     B.buildTrunc(OrigRegs[0], BV);
553   }
554 }
555 
556 bool AMDGPUCallLowering::lowerFormalArguments(
557     MachineIRBuilder &B, const Function &F,
558     ArrayRef<ArrayRef<Register>> VRegs) const {
559   CallingConv::ID CC = F.getCallingConv();
560 
561   // The infrastructure for normal calling convention lowering is essentially
562   // useless for kernels. We want to avoid any kind of legalization or argument
563   // splitting.
564   if (CC == CallingConv::AMDGPU_KERNEL)
565     return lowerFormalArgumentsKernel(B, F, VRegs);
566 
567   const bool IsShader = AMDGPU::isShader(CC);
568   const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC);
569 
570   MachineFunction &MF = B.getMF();
571   MachineBasicBlock &MBB = B.getMBB();
572   MachineRegisterInfo &MRI = MF.getRegInfo();
573   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
574   const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
575   const SIRegisterInfo *TRI = Subtarget.getRegisterInfo();
576   const DataLayout &DL = F.getParent()->getDataLayout();
577 
578 
579   SmallVector<CCValAssign, 16> ArgLocs;
580   CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
581 
582   if (!IsEntryFunc) {
583     Register ReturnAddrReg = TRI->getReturnAddressReg(MF);
584     Register LiveInReturn = MF.addLiveIn(ReturnAddrReg,
585                                          &AMDGPU::SGPR_64RegClass);
586     MBB.addLiveIn(ReturnAddrReg);
587     B.buildCopy(LiveInReturn, ReturnAddrReg);
588   }
589 
590   if (Info->hasImplicitBufferPtr()) {
591     Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
592     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
593     CCInfo.AllocateReg(ImplicitBufferPtrReg);
594   }
595 
596 
597   SmallVector<ArgInfo, 32> SplitArgs;
598   unsigned Idx = 0;
599   unsigned PSInputNum = 0;
600 
601   for (auto &Arg : F.args()) {
602     if (DL.getTypeStoreSize(Arg.getType()) == 0)
603       continue;
604 
605     const bool InReg = Arg.hasAttribute(Attribute::InReg);
606 
607     // SGPR arguments to functions not implemented.
608     if (!IsShader && InReg)
609       return false;
610 
611     if (Arg.hasAttribute(Attribute::SwiftSelf) ||
612         Arg.hasAttribute(Attribute::SwiftError) ||
613         Arg.hasAttribute(Attribute::Nest))
614       return false;
615 
616     if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) {
617       const bool ArgUsed = !Arg.use_empty();
618       bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
619 
620       if (!SkipArg) {
621         Info->markPSInputAllocated(PSInputNum);
622         if (ArgUsed)
623           Info->markPSInputEnabled(PSInputNum);
624       }
625 
626       ++PSInputNum;
627 
628       if (SkipArg) {
629         for (int I = 0, E = VRegs[Idx].size(); I != E; ++I)
630           B.buildUndef(VRegs[Idx][I]);
631 
632         ++Idx;
633         continue;
634       }
635     }
636 
637     ArgInfo OrigArg(VRegs[Idx], Arg.getType());
638     setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
639 
640     splitToValueTypes(
641       OrigArg, SplitArgs, DL, MRI, CC,
642       // FIXME: We should probably be passing multiple registers to
643       // handleAssignments to do this
644       [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) {
645         packSplitRegsToOrigType(B, VRegs[Idx][VTSplitIdx], Regs,
646                                 LLTy, PartLLT);
647       });
648 
649     ++Idx;
650   }
651 
652   // At least one interpolation mode must be enabled or else the GPU will
653   // hang.
654   //
655   // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
656   // set PSInputAddr, the user wants to enable some bits after the compilation
657   // based on run-time states. Since we can't know what the final PSInputEna
658   // will look like, so we shouldn't do anything here and the user should take
659   // responsibility for the correct programming.
660   //
661   // Otherwise, the following restrictions apply:
662   // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
663   // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
664   //   enabled too.
665   if (CC == CallingConv::AMDGPU_PS) {
666     if ((Info->getPSInputAddr() & 0x7F) == 0 ||
667         ((Info->getPSInputAddr() & 0xF) == 0 &&
668          Info->isPSInputAllocated(11))) {
669       CCInfo.AllocateReg(AMDGPU::VGPR0);
670       CCInfo.AllocateReg(AMDGPU::VGPR1);
671       Info->markPSInputAllocated(0);
672       Info->markPSInputEnabled(0);
673     }
674 
675     if (Subtarget.isAmdPalOS()) {
676       // For isAmdPalOS, the user does not enable some bits after compilation
677       // based on run-time states; the register values being generated here are
678       // the final ones set in hardware. Therefore we need to apply the
679       // workaround to PSInputAddr and PSInputEnable together.  (The case where
680       // a bit is set in PSInputAddr but not PSInputEnable is where the frontend
681       // set up an input arg for a particular interpolation mode, but nothing
682       // uses that input arg. Really we should have an earlier pass that removes
683       // such an arg.)
684       unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
685       if ((PsInputBits & 0x7F) == 0 ||
686           ((PsInputBits & 0xF) == 0 &&
687            (PsInputBits >> 11 & 1)))
688         Info->markPSInputEnabled(
689           countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
690     }
691   }
692 
693   const SITargetLowering &TLI = *getTLI<SITargetLowering>();
694   CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg());
695 
696   if (!MBB.empty())
697     B.setInstr(*MBB.begin());
698 
699   FormalArgHandler Handler(B, MRI, AssignFn);
700   if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler))
701     return false;
702 
703   if (!IsEntryFunc) {
704     // Special inputs come after user arguments.
705     TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
706   }
707 
708   // Start adding system SGPRs.
709   if (IsEntryFunc) {
710     TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader);
711   } else {
712     CCInfo.AllocateReg(Info->getScratchRSrcReg());
713     CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
714     CCInfo.AllocateReg(Info->getFrameOffsetReg());
715     TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
716   }
717 
718   // Move back to the end of the basic block.
719   B.setMBB(MBB);
720 
721   return true;
722 }
723