1d8ea85acSTom Stellard //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
2000c5af3STom Stellard //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6000c5af3STom Stellard //
7000c5af3STom Stellard //===----------------------------------------------------------------------===//
8000c5af3STom Stellard ///
9000c5af3STom Stellard /// \file
10000c5af3STom Stellard /// This file implements the lowering of LLVM calls to machine code calls for
11000c5af3STom Stellard /// GlobalISel.
12000c5af3STom Stellard ///
13000c5af3STom Stellard //===----------------------------------------------------------------------===//
14000c5af3STom Stellard
15000c5af3STom Stellard #include "AMDGPUCallLowering.h"
16ca16621bSTom Stellard #include "AMDGPU.h"
1761f1f2a2SMatt Arsenault #include "AMDGPULegalizerInfo.h"
18a162048aSMatt Arsenault #include "AMDGPUTargetMachine.h"
19ca16621bSTom Stellard #include "SIMachineFunctionInfo.h"
206bda14b3SChandler Carruth #include "SIRegisterInfo.h"
21206b9927STom Stellard #include "llvm/CodeGen/Analysis.h"
22ae25a397SChristudasan Devadasan #include "llvm/CodeGen/FunctionLoweringInfo.h"
23000c5af3STom Stellard #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24989f1c72Sserge-sans-paille #include "llvm/CodeGen/MachineFrameInfo.h"
256a87e9b0Sdfukalov #include "llvm/IR/IntrinsicsAMDGPU.h"
26000c5af3STom Stellard
2761f1f2a2SMatt Arsenault #define DEBUG_TYPE "amdgpu-call-lowering"
2861f1f2a2SMatt Arsenault
29000c5af3STom Stellard using namespace llvm;
30000c5af3STom Stellard
31206b9927STom Stellard namespace {
32206b9927STom Stellard
3378dcff48SMatt Arsenault /// Wrapper around extendRegister to ensure we extend to a full 32-bit register.
extendRegisterMin32(CallLowering::ValueHandler & Handler,Register ValVReg,CCValAssign & VA)3478dcff48SMatt Arsenault static Register extendRegisterMin32(CallLowering::ValueHandler &Handler,
3578dcff48SMatt Arsenault Register ValVReg, CCValAssign &VA) {
3661f1f2a2SMatt Arsenault if (VA.getLocVT().getSizeInBits() < 32) {
3761f1f2a2SMatt Arsenault // 16-bit types are reported as legal for 32-bit registers. We need to
3861f1f2a2SMatt Arsenault // extend and do a 32-bit copy to avoid the verifier complaining about it.
3978dcff48SMatt Arsenault return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
4061f1f2a2SMatt Arsenault }
4161f1f2a2SMatt Arsenault
4278dcff48SMatt Arsenault return Handler.extendRegister(ValVReg, VA);
4361f1f2a2SMatt Arsenault }
4461f1f2a2SMatt Arsenault
4578dcff48SMatt Arsenault struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
AMDGPUOutgoingValueHandler__anonb83e3ee40111::AMDGPUOutgoingValueHandler460c92bfa4SMatt Arsenault AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4724e2e5dfSMatt Arsenault MachineInstrBuilder MIB)
4824e2e5dfSMatt Arsenault : OutgoingValueHandler(B, MRI), MIB(MIB) {}
49206b9927STom Stellard
50206b9927STom Stellard MachineInstrBuilder MIB;
51206b9927STom Stellard
getStackAddress__anonb83e3ee40111::AMDGPUOutgoingValueHandler52faeaedf8SMatt Arsenault Register getStackAddress(uint64_t Size, int64_t Offset,
536b76d828SMatt Arsenault MachinePointerInfo &MPO,
546b76d828SMatt Arsenault ISD::ArgFlagsTy Flags) override {
55206b9927STom Stellard llvm_unreachable("not implemented");
56206b9927STom Stellard }
57206b9927STom Stellard
assignValueToAddress__anonb83e3ee40111::AMDGPUOutgoingValueHandler5899c7e918SMatt Arsenault void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
59206b9927STom Stellard MachinePointerInfo &MPO, CCValAssign &VA) override {
60206b9927STom Stellard llvm_unreachable("not implemented");
61206b9927STom Stellard }
62206b9927STom Stellard
assignValueToReg__anonb83e3ee40111::AMDGPUOutgoingValueHandler63faeaedf8SMatt Arsenault void assignValueToReg(Register ValVReg, Register PhysReg,
648bde5e58SAmara Emerson CCValAssign VA) override {
6578dcff48SMatt Arsenault Register ExtReg = extendRegisterMin32(*this, ValVReg, VA);
66a9ea8a9aSMatt Arsenault
6767cfbec7SMatt Arsenault // If this is a scalar return, insert a readfirstlane just in case the value
6867cfbec7SMatt Arsenault // ends up in a VGPR.
6967cfbec7SMatt Arsenault // FIXME: Assert this is a shader return.
7067cfbec7SMatt Arsenault const SIRegisterInfo *TRI
7167cfbec7SMatt Arsenault = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
7267cfbec7SMatt Arsenault if (TRI->isSGPRReg(MRI, PhysReg)) {
7367cfbec7SMatt Arsenault auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane,
7467cfbec7SMatt Arsenault {MRI.getType(ExtReg)}, false)
7567cfbec7SMatt Arsenault .addReg(ExtReg);
7667cfbec7SMatt Arsenault ExtReg = ToSGPR.getReg(0);
7767cfbec7SMatt Arsenault }
7867cfbec7SMatt Arsenault
79a9ea8a9aSMatt Arsenault MIRBuilder.buildCopy(PhysReg, ExtReg);
80a9ea8a9aSMatt Arsenault MIB.addUse(PhysReg, RegState::Implicit);
81206b9927STom Stellard }
82206b9927STom Stellard };
83206b9927STom Stellard
8478dcff48SMatt Arsenault struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler {
85fecf43ebSMatt Arsenault uint64_t StackUsed = 0;
86fecf43ebSMatt Arsenault
AMDGPUIncomingArgHandler__anonb83e3ee40111::AMDGPUIncomingArgHandler8724e2e5dfSMatt Arsenault AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
8824e2e5dfSMatt Arsenault : IncomingValueHandler(B, MRI) {}
89fecf43ebSMatt Arsenault
getStackAddress__anonb83e3ee40111::AMDGPUIncomingArgHandler90fecf43ebSMatt Arsenault Register getStackAddress(uint64_t Size, int64_t Offset,
916b76d828SMatt Arsenault MachinePointerInfo &MPO,
926b76d828SMatt Arsenault ISD::ArgFlagsTy Flags) override {
93fecf43ebSMatt Arsenault auto &MFI = MIRBuilder.getMF().getFrameInfo();
946b76d828SMatt Arsenault
956b76d828SMatt Arsenault // Byval is assumed to be writable memory, but other stack passed arguments
966b76d828SMatt Arsenault // are not.
976b76d828SMatt Arsenault const bool IsImmutable = !Flags.isByVal();
986b76d828SMatt Arsenault int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
99fecf43ebSMatt Arsenault MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
1002a1b5af2SJay Foad auto AddrReg = MIRBuilder.buildFrameIndex(
1012a1b5af2SJay Foad LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI);
102fecf43ebSMatt Arsenault StackUsed = std::max(StackUsed, Size + Offset);
1032a1b5af2SJay Foad return AddrReg.getReg(0);
104fecf43ebSMatt Arsenault }
105fecf43ebSMatt Arsenault
assignValueToReg__anonb83e3ee40111::AMDGPUIncomingArgHandler106fecf43ebSMatt Arsenault void assignValueToReg(Register ValVReg, Register PhysReg,
1078bde5e58SAmara Emerson CCValAssign VA) override {
108fecf43ebSMatt Arsenault markPhysRegUsed(PhysReg);
109fecf43ebSMatt Arsenault
110fecf43ebSMatt Arsenault if (VA.getLocVT().getSizeInBits() < 32) {
111fecf43ebSMatt Arsenault // 16-bit types are reported as legal for 32-bit registers. We need to do
112fecf43ebSMatt Arsenault // a 32-bit copy, and truncate to avoid the verifier complaining about it.
113fecf43ebSMatt Arsenault auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg);
11478dcff48SMatt Arsenault
11578dcff48SMatt Arsenault // If we have signext/zeroext, it applies to the whole 32-bit register
11678dcff48SMatt Arsenault // before truncation.
11778dcff48SMatt Arsenault auto Extended =
11878dcff48SMatt Arsenault buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT()));
11978dcff48SMatt Arsenault MIRBuilder.buildTrunc(ValVReg, Extended);
120fecf43ebSMatt Arsenault return;
121fecf43ebSMatt Arsenault }
122fecf43ebSMatt Arsenault
12378dcff48SMatt Arsenault IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
124fecf43ebSMatt Arsenault }
125fecf43ebSMatt Arsenault
assignValueToAddress__anonb83e3ee40111::AMDGPUIncomingArgHandler12699c7e918SMatt Arsenault void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
127fecf43ebSMatt Arsenault MachinePointerInfo &MPO, CCValAssign &VA) override {
128fb0c35faSMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
129fb0c35faSMatt Arsenault
130fb0c35faSMatt Arsenault auto MMO = MF.getMachineMemOperand(
13199c7e918SMatt Arsenault MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy,
1320de874adSGuillaume Chatelet inferAlignFromPtrInfo(MF, MPO));
133fecf43ebSMatt Arsenault MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
134fecf43ebSMatt Arsenault }
135fecf43ebSMatt Arsenault
136fecf43ebSMatt Arsenault /// How the physical register gets marked varies between formal
137fecf43ebSMatt Arsenault /// parameters (it's a basic-block live-in), and a call instruction
138fecf43ebSMatt Arsenault /// (it's an implicit-def of the BL).
139fecf43ebSMatt Arsenault virtual void markPhysRegUsed(unsigned PhysReg) = 0;
140fecf43ebSMatt Arsenault };
141fecf43ebSMatt Arsenault
1420c92bfa4SMatt Arsenault struct FormalArgHandler : public AMDGPUIncomingArgHandler {
FormalArgHandler__anonb83e3ee40111::FormalArgHandler14324e2e5dfSMatt Arsenault FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
14424e2e5dfSMatt Arsenault : AMDGPUIncomingArgHandler(B, MRI) {}
145fecf43ebSMatt Arsenault
markPhysRegUsed__anonb83e3ee40111::FormalArgHandler146fecf43ebSMatt Arsenault void markPhysRegUsed(unsigned PhysReg) override {
147fecf43ebSMatt Arsenault MIRBuilder.getMBB().addLiveIn(PhysReg);
148fecf43ebSMatt Arsenault }
149fecf43ebSMatt Arsenault };
150fecf43ebSMatt Arsenault
1510c92bfa4SMatt Arsenault struct CallReturnHandler : public AMDGPUIncomingArgHandler {
CallReturnHandler__anonb83e3ee40111::CallReturnHandler15261f1f2a2SMatt Arsenault CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
15324e2e5dfSMatt Arsenault MachineInstrBuilder MIB)
15424e2e5dfSMatt Arsenault : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
15561f1f2a2SMatt Arsenault
markPhysRegUsed__anonb83e3ee40111::CallReturnHandler15661f1f2a2SMatt Arsenault void markPhysRegUsed(unsigned PhysReg) override {
15761f1f2a2SMatt Arsenault MIB.addDef(PhysReg, RegState::Implicit);
15861f1f2a2SMatt Arsenault }
15961f1f2a2SMatt Arsenault
16061f1f2a2SMatt Arsenault MachineInstrBuilder MIB;
16161f1f2a2SMatt Arsenault };
16261f1f2a2SMatt Arsenault
16378dcff48SMatt Arsenault struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler {
16461f1f2a2SMatt Arsenault /// For tail calls, the byte offset of the call's argument area from the
16561f1f2a2SMatt Arsenault /// callee's. Unused elsewhere.
16661f1f2a2SMatt Arsenault int FPDiff;
16761f1f2a2SMatt Arsenault
16861f1f2a2SMatt Arsenault // Cache the SP register vreg if we need it more than once in this call site.
16961f1f2a2SMatt Arsenault Register SPReg;
17061f1f2a2SMatt Arsenault
17161f1f2a2SMatt Arsenault bool IsTailCall;
17261f1f2a2SMatt Arsenault
AMDGPUOutgoingArgHandler__anonb83e3ee40111::AMDGPUOutgoingArgHandler1730c92bfa4SMatt Arsenault AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder,
1740c92bfa4SMatt Arsenault MachineRegisterInfo &MRI, MachineInstrBuilder MIB,
1750c92bfa4SMatt Arsenault bool IsTailCall = false, int FPDiff = 0)
17624e2e5dfSMatt Arsenault : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff),
17724e2e5dfSMatt Arsenault IsTailCall(IsTailCall) {}
17861f1f2a2SMatt Arsenault
getStackAddress__anonb83e3ee40111::AMDGPUOutgoingArgHandler17961f1f2a2SMatt Arsenault Register getStackAddress(uint64_t Size, int64_t Offset,
1806b76d828SMatt Arsenault MachinePointerInfo &MPO,
1816b76d828SMatt Arsenault ISD::ArgFlagsTy Flags) override {
18261f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
18361f1f2a2SMatt Arsenault const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32);
18461f1f2a2SMatt Arsenault const LLT S32 = LLT::scalar(32);
18561f1f2a2SMatt Arsenault
18661f1f2a2SMatt Arsenault if (IsTailCall) {
1876a70874dSMatt Arsenault Offset += FPDiff;
1886a70874dSMatt Arsenault int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
1896a70874dSMatt Arsenault auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI);
1906a70874dSMatt Arsenault MPO = MachinePointerInfo::getFixedStack(MF, FI);
1916a70874dSMatt Arsenault return FIReg.getReg(0);
19261f1f2a2SMatt Arsenault }
19361f1f2a2SMatt Arsenault
19461f1f2a2SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
19561f1f2a2SMatt Arsenault
1967f26a102SMatt Arsenault if (!SPReg) {
1977f26a102SMatt Arsenault const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>();
1987f26a102SMatt Arsenault if (ST.enableFlatScratch()) {
1997f26a102SMatt Arsenault // The stack is accessed unswizzled, so we can use a regular copy.
2007f26a102SMatt Arsenault SPReg = MIRBuilder.buildCopy(PtrTy,
2017f26a102SMatt Arsenault MFI->getStackPtrOffsetReg()).getReg(0);
2027f26a102SMatt Arsenault } else {
2037f26a102SMatt Arsenault // The address we produce here, without knowing the use context, is going
2047f26a102SMatt Arsenault // to be interpreted as a vector address, so we need to convert to a
2057f26a102SMatt Arsenault // swizzled address.
2067f26a102SMatt Arsenault SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy},
2077f26a102SMatt Arsenault {MFI->getStackPtrOffsetReg()}).getReg(0);
2087f26a102SMatt Arsenault }
2097f26a102SMatt Arsenault }
21061f1f2a2SMatt Arsenault
21161f1f2a2SMatt Arsenault auto OffsetReg = MIRBuilder.buildConstant(S32, Offset);
21261f1f2a2SMatt Arsenault
21361f1f2a2SMatt Arsenault auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg);
21461f1f2a2SMatt Arsenault MPO = MachinePointerInfo::getStack(MF, Offset);
21561f1f2a2SMatt Arsenault return AddrReg.getReg(0);
21661f1f2a2SMatt Arsenault }
21761f1f2a2SMatt Arsenault
assignValueToReg__anonb83e3ee40111::AMDGPUOutgoingArgHandler21861f1f2a2SMatt Arsenault void assignValueToReg(Register ValVReg, Register PhysReg,
2198bde5e58SAmara Emerson CCValAssign VA) override {
22061f1f2a2SMatt Arsenault MIB.addUse(PhysReg, RegState::Implicit);
22178dcff48SMatt Arsenault Register ExtReg = extendRegisterMin32(*this, ValVReg, VA);
22261f1f2a2SMatt Arsenault MIRBuilder.buildCopy(PhysReg, ExtReg);
22361f1f2a2SMatt Arsenault }
22461f1f2a2SMatt Arsenault
assignValueToAddress__anonb83e3ee40111::AMDGPUOutgoingArgHandler22599c7e918SMatt Arsenault void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
22661f1f2a2SMatt Arsenault MachinePointerInfo &MPO, CCValAssign &VA) override {
22761f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
22861f1f2a2SMatt Arsenault uint64_t LocMemOffset = VA.getLocMemOffset();
22961f1f2a2SMatt Arsenault const auto &ST = MF.getSubtarget<GCNSubtarget>();
23061f1f2a2SMatt Arsenault
23161f1f2a2SMatt Arsenault auto MMO = MF.getMachineMemOperand(
23299c7e918SMatt Arsenault MPO, MachineMemOperand::MOStore, MemTy,
23361f1f2a2SMatt Arsenault commonAlignment(ST.getStackAlignment(), LocMemOffset));
23461f1f2a2SMatt Arsenault MIRBuilder.buildStore(ValVReg, Addr, *MMO);
23561f1f2a2SMatt Arsenault }
23661f1f2a2SMatt Arsenault
assignValueToAddress__anonb83e3ee40111::AMDGPUOutgoingArgHandler237392e0fcfSMatt Arsenault void assignValueToAddress(const CallLowering::ArgInfo &Arg,
23899c7e918SMatt Arsenault unsigned ValRegIndex, Register Addr, LLT MemTy,
23999c7e918SMatt Arsenault MachinePointerInfo &MPO, CCValAssign &VA) override {
24061f1f2a2SMatt Arsenault Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt
241392e0fcfSMatt Arsenault ? extendRegister(Arg.Regs[ValRegIndex], VA)
242392e0fcfSMatt Arsenault : Arg.Regs[ValRegIndex];
24399c7e918SMatt Arsenault assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
24461f1f2a2SMatt Arsenault }
24561f1f2a2SMatt Arsenault };
246206b9927STom Stellard }
247206b9927STom Stellard
AMDGPUCallLowering(const AMDGPUTargetLowering & TLI)248000c5af3STom Stellard AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
2490da6350dSMatt Arsenault : CallLowering(&TLI) {
250000c5af3STom Stellard }
251000c5af3STom Stellard
252dc6e8dfdSJacob Lambert // FIXME: Compatibility shim
extOpcodeToISDExtOpcode(unsigned MIOpc)253eb416277SMatt Arsenault static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) {
254eb416277SMatt Arsenault switch (MIOpc) {
255eb416277SMatt Arsenault case TargetOpcode::G_SEXT:
256eb416277SMatt Arsenault return ISD::SIGN_EXTEND;
257eb416277SMatt Arsenault case TargetOpcode::G_ZEXT:
258eb416277SMatt Arsenault return ISD::ZERO_EXTEND;
259eb416277SMatt Arsenault case TargetOpcode::G_ANYEXT:
260eb416277SMatt Arsenault return ISD::ANY_EXTEND;
261eb416277SMatt Arsenault default:
262eb416277SMatt Arsenault llvm_unreachable("not an extend opcode");
263eb416277SMatt Arsenault }
264eb416277SMatt Arsenault }
265eb416277SMatt Arsenault
canLowerReturn(MachineFunction & MF,CallingConv::ID CallConv,SmallVectorImpl<BaseArgInfo> & Outs,bool IsVarArg) const266ae25a397SChristudasan Devadasan bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF,
267ae25a397SChristudasan Devadasan CallingConv::ID CallConv,
268ae25a397SChristudasan Devadasan SmallVectorImpl<BaseArgInfo> &Outs,
269ae25a397SChristudasan Devadasan bool IsVarArg) const {
270ae25a397SChristudasan Devadasan // For shaders. Vector types should be explicitly handled by CC.
271ae25a397SChristudasan Devadasan if (AMDGPU::isEntryFunctionCC(CallConv))
272ae25a397SChristudasan Devadasan return true;
273ae25a397SChristudasan Devadasan
274ae25a397SChristudasan Devadasan SmallVector<CCValAssign, 16> ArgLocs;
275ae25a397SChristudasan Devadasan const SITargetLowering &TLI = *getTLI<SITargetLowering>();
276ae25a397SChristudasan Devadasan CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
277ae25a397SChristudasan Devadasan MF.getFunction().getContext());
278ae25a397SChristudasan Devadasan
279ae25a397SChristudasan Devadasan return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg));
280ae25a397SChristudasan Devadasan }
281ae25a397SChristudasan Devadasan
282a9ea8a9aSMatt Arsenault /// Lower the return value for the already existing \p Ret. This assumes that
28306c8cb03SAustin Kerbow /// \p B's insertion point is correct.
lowerReturnVal(MachineIRBuilder & B,const Value * Val,ArrayRef<Register> VRegs,MachineInstrBuilder & Ret) const28406c8cb03SAustin Kerbow bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
285a9ea8a9aSMatt Arsenault const Value *Val, ArrayRef<Register> VRegs,
286a9ea8a9aSMatt Arsenault MachineInstrBuilder &Ret) const {
287a9ea8a9aSMatt Arsenault if (!Val)
288a9ea8a9aSMatt Arsenault return true;
289a9ea8a9aSMatt Arsenault
29006c8cb03SAustin Kerbow auto &MF = B.getMF();
291a9ea8a9aSMatt Arsenault const auto &F = MF.getFunction();
292a9ea8a9aSMatt Arsenault const DataLayout &DL = MF.getDataLayout();
293eb416277SMatt Arsenault MachineRegisterInfo *MRI = B.getMRI();
2946b7d5a92SMatt Arsenault LLVMContext &Ctx = F.getContext();
295a9ea8a9aSMatt Arsenault
296a9ea8a9aSMatt Arsenault CallingConv::ID CC = F.getCallingConv();
297a9ea8a9aSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
298a9ea8a9aSMatt Arsenault
2996b7d5a92SMatt Arsenault SmallVector<EVT, 8> SplitEVTs;
3006b7d5a92SMatt Arsenault ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
3016b7d5a92SMatt Arsenault assert(VRegs.size() == SplitEVTs.size() &&
3026b7d5a92SMatt Arsenault "For each split Type there should be exactly one VReg.");
303a9ea8a9aSMatt Arsenault
3046b7d5a92SMatt Arsenault SmallVector<ArgInfo, 8> SplitRetInfos;
3056b7d5a92SMatt Arsenault
3066b7d5a92SMatt Arsenault for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
3076b7d5a92SMatt Arsenault EVT VT = SplitEVTs[i];
3086b7d5a92SMatt Arsenault Register Reg = VRegs[i];
3099b057f64SMatt Arsenault ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0);
3106b7d5a92SMatt Arsenault setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
3116b7d5a92SMatt Arsenault
3126b7d5a92SMatt Arsenault if (VT.isScalarInteger()) {
3136b7d5a92SMatt Arsenault unsigned ExtendOp = TargetOpcode::G_ANYEXT;
3146b7d5a92SMatt Arsenault if (RetInfo.Flags[0].isSExt()) {
3156b7d5a92SMatt Arsenault assert(RetInfo.Regs.size() == 1 && "expect only simple return values");
3166b7d5a92SMatt Arsenault ExtendOp = TargetOpcode::G_SEXT;
3176b7d5a92SMatt Arsenault } else if (RetInfo.Flags[0].isZExt()) {
3186b7d5a92SMatt Arsenault assert(RetInfo.Regs.size() == 1 && "expect only simple return values");
3196b7d5a92SMatt Arsenault ExtendOp = TargetOpcode::G_ZEXT;
3206b7d5a92SMatt Arsenault }
3216b7d5a92SMatt Arsenault
3226b7d5a92SMatt Arsenault EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT,
3236b7d5a92SMatt Arsenault extOpcodeToISDExtOpcode(ExtendOp));
3246b7d5a92SMatt Arsenault if (ExtVT != VT) {
3256b7d5a92SMatt Arsenault RetInfo.Ty = ExtVT.getTypeForEVT(Ctx);
3266b7d5a92SMatt Arsenault LLT ExtTy = getLLTForType(*RetInfo.Ty, DL);
3276b7d5a92SMatt Arsenault Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0);
3286b7d5a92SMatt Arsenault }
3296b7d5a92SMatt Arsenault }
3306b7d5a92SMatt Arsenault
3316b7d5a92SMatt Arsenault if (Reg != RetInfo.Regs[0]) {
3326b7d5a92SMatt Arsenault RetInfo.Regs[0] = Reg;
3336b7d5a92SMatt Arsenault // Reset the arg flags after modifying Reg.
3346b7d5a92SMatt Arsenault setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F);
3356b7d5a92SMatt Arsenault }
3366b7d5a92SMatt Arsenault
337fd82cbcfSMatt Arsenault splitToValueTypes(RetInfo, SplitRetInfos, DL, CC);
3386b7d5a92SMatt Arsenault }
339a9ea8a9aSMatt Arsenault
340a9ea8a9aSMatt Arsenault CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
34124e2e5dfSMatt Arsenault
34224e2e5dfSMatt Arsenault OutgoingValueAssigner Assigner(AssignFn);
34324e2e5dfSMatt Arsenault AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret);
34424e2e5dfSMatt Arsenault return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B,
34524e2e5dfSMatt Arsenault CC, F.isVarArg());
346a9ea8a9aSMatt Arsenault }
347a9ea8a9aSMatt Arsenault
lowerReturn(MachineIRBuilder & B,const Value * Val,ArrayRef<Register> VRegs,FunctionLoweringInfo & FLI) const348d68458bdSChristudasan Devadasan bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val,
349d68458bdSChristudasan Devadasan ArrayRef<Register> VRegs,
350d68458bdSChristudasan Devadasan FunctionLoweringInfo &FLI) const {
351206b9927STom Stellard
35206c8cb03SAustin Kerbow MachineFunction &MF = B.getMF();
353206b9927STom Stellard SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
354206b9927STom Stellard MFI->setIfReturnsVoid(!Val);
355206b9927STom Stellard
356a9ea8a9aSMatt Arsenault assert(!Val == VRegs.empty() && "Return value without a vreg");
357a9ea8a9aSMatt Arsenault
35806c8cb03SAustin Kerbow CallingConv::ID CC = B.getMF().getFunction().getCallingConv();
359a9ea8a9aSMatt Arsenault const bool IsShader = AMDGPU::isShader(CC);
360a022b1ccSSebastian Neubauer const bool IsWaveEnd =
361a022b1ccSSebastian Neubauer (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC);
362a9ea8a9aSMatt Arsenault if (IsWaveEnd) {
36306c8cb03SAustin Kerbow B.buildInstr(AMDGPU::S_ENDPGM)
364a9ea8a9aSMatt Arsenault .addImm(0);
365206b9927STom Stellard return true;
366206b9927STom Stellard }
367206b9927STom Stellard
36804fff547SVenkata Ramanaiah Nalamothu unsigned ReturnOpc =
36904fff547SVenkata Ramanaiah Nalamothu IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::SI_RETURN;
37006c8cb03SAustin Kerbow auto Ret = B.buildInstrNoInsert(ReturnOpc);
371206b9927STom Stellard
372ae25a397SChristudasan Devadasan if (!FLI.CanLowerReturn)
373ae25a397SChristudasan Devadasan insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister);
374ae25a397SChristudasan Devadasan else if (!lowerReturnVal(B, Val, VRegs, Ret))
375a9ea8a9aSMatt Arsenault return false;
376a9ea8a9aSMatt Arsenault
377a9ea8a9aSMatt Arsenault // TODO: Handle CalleeSavedRegsViaCopy.
378a9ea8a9aSMatt Arsenault
37906c8cb03SAustin Kerbow B.insertInstr(Ret);
380000c5af3STom Stellard return true;
381000c5af3STom Stellard }
382000c5af3STom Stellard
lowerParameterPtr(Register DstReg,MachineIRBuilder & B,uint64_t Offset) const3831168119cSMatt Arsenault void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B,
38429f30379SMatt Arsenault uint64_t Offset) const {
38506c8cb03SAustin Kerbow MachineFunction &MF = B.getMF();
3868623e8d8SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
387ca16621bSTom Stellard MachineRegisterInfo &MRI = MF.getRegInfo();
388faeaedf8SMatt Arsenault Register KernArgSegmentPtr =
3898623e8d8SMatt Arsenault MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
390faeaedf8SMatt Arsenault Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
391ca16621bSTom Stellard
3922a1b5af2SJay Foad auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset);
393ca16621bSTom Stellard
3941168119cSMatt Arsenault B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg);
395ca16621bSTom Stellard }
396ca16621bSTom Stellard
lowerParameter(MachineIRBuilder & B,ArgInfo & OrigArg,uint64_t Offset,Align Alignment) const39721a0ef8dSMatt Arsenault void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg,
39821a0ef8dSMatt Arsenault uint64_t Offset,
39921a0ef8dSMatt Arsenault Align Alignment) const {
40006c8cb03SAustin Kerbow MachineFunction &MF = B.getMF();
401f1caa283SMatthias Braun const Function &F = MF.getFunction();
402ca16621bSTom Stellard const DataLayout &DL = F.getParent()->getDataLayout();
403c7c05b0cSJay Foad MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
4041168119cSMatt Arsenault
4051168119cSMatt Arsenault LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
40621a0ef8dSMatt Arsenault
40721a0ef8dSMatt Arsenault SmallVector<ArgInfo, 32> SplitArgs;
40821a0ef8dSMatt Arsenault SmallVector<uint64_t> FieldOffsets;
40921a0ef8dSMatt Arsenault splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets);
41021a0ef8dSMatt Arsenault
41121a0ef8dSMatt Arsenault unsigned Idx = 0;
41221a0ef8dSMatt Arsenault for (ArgInfo &SplitArg : SplitArgs) {
4131168119cSMatt Arsenault Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy);
41421a0ef8dSMatt Arsenault lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]);
41521a0ef8dSMatt Arsenault
4163ceb9229SMatt Arsenault LLT ArgTy = getLLTForType(*SplitArg.Ty, DL);
4173ceb9229SMatt Arsenault if (SplitArg.Flags[0].isPointer()) {
4183ceb9229SMatt Arsenault // Compensate for losing pointeriness in splitValueTypes.
4193ceb9229SMatt Arsenault LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(),
4203ceb9229SMatt Arsenault ArgTy.getScalarSizeInBits());
4213ceb9229SMatt Arsenault ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy)
4223ceb9229SMatt Arsenault : PtrTy;
4233ceb9229SMatt Arsenault }
424ca16621bSTom Stellard
4250de874adSGuillaume Chatelet MachineMemOperand *MMO = MF.getMachineMemOperand(
4260de874adSGuillaume Chatelet PtrInfo,
4270de874adSGuillaume Chatelet MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
428ca16621bSTom Stellard MachineMemOperand::MOInvariant,
42921a0ef8dSMatt Arsenault ArgTy, commonAlignment(Alignment, FieldOffsets[Idx]));
430ca16621bSTom Stellard
43121a0ef8dSMatt Arsenault assert(SplitArg.Regs.size() == 1);
43221a0ef8dSMatt Arsenault
43321a0ef8dSMatt Arsenault B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO);
43421a0ef8dSMatt Arsenault ++Idx;
43521a0ef8dSMatt Arsenault }
436ca16621bSTom Stellard }
437ca16621bSTom Stellard
438bae3636fSMatt Arsenault // Allocate special inputs passed in user SGPRs.
allocateHSAUserSGPRs(CCState & CCInfo,MachineIRBuilder & B,MachineFunction & MF,const SIRegisterInfo & TRI,SIMachineFunctionInfo & Info)439bae3636fSMatt Arsenault static void allocateHSAUserSGPRs(CCState &CCInfo,
44006c8cb03SAustin Kerbow MachineIRBuilder &B,
441bae3636fSMatt Arsenault MachineFunction &MF,
442bae3636fSMatt Arsenault const SIRegisterInfo &TRI,
443bae3636fSMatt Arsenault SIMachineFunctionInfo &Info) {
444bae3636fSMatt Arsenault // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
445bae3636fSMatt Arsenault if (Info.hasPrivateSegmentBuffer()) {
4464dad4914SMatt Arsenault Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
447bae3636fSMatt Arsenault MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
448bae3636fSMatt Arsenault CCInfo.AllocateReg(PrivateSegmentBufferReg);
449bae3636fSMatt Arsenault }
450bae3636fSMatt Arsenault
451bae3636fSMatt Arsenault if (Info.hasDispatchPtr()) {
4524dad4914SMatt Arsenault Register DispatchPtrReg = Info.addDispatchPtr(TRI);
453bae3636fSMatt Arsenault MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
454bae3636fSMatt Arsenault CCInfo.AllocateReg(DispatchPtrReg);
455bae3636fSMatt Arsenault }
456bae3636fSMatt Arsenault
4570f20a35bSChangpeng Fang if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) {
4584dad4914SMatt Arsenault Register QueuePtrReg = Info.addQueuePtr(TRI);
459bae3636fSMatt Arsenault MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
460bae3636fSMatt Arsenault CCInfo.AllocateReg(QueuePtrReg);
461bae3636fSMatt Arsenault }
462bae3636fSMatt Arsenault
463bae3636fSMatt Arsenault if (Info.hasKernargSegmentPtr()) {
464bae3636fSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
465bae3636fSMatt Arsenault Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
466bae3636fSMatt Arsenault const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
467bae3636fSMatt Arsenault Register VReg = MRI.createGenericVirtualRegister(P4);
468bae3636fSMatt Arsenault MRI.addLiveIn(InputPtrReg, VReg);
46906c8cb03SAustin Kerbow B.getMBB().addLiveIn(InputPtrReg);
47006c8cb03SAustin Kerbow B.buildCopy(VReg, InputPtrReg);
471bae3636fSMatt Arsenault CCInfo.AllocateReg(InputPtrReg);
472bae3636fSMatt Arsenault }
473bae3636fSMatt Arsenault
474bae3636fSMatt Arsenault if (Info.hasDispatchID()) {
4754dad4914SMatt Arsenault Register DispatchIDReg = Info.addDispatchID(TRI);
476bae3636fSMatt Arsenault MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
477bae3636fSMatt Arsenault CCInfo.AllocateReg(DispatchIDReg);
478bae3636fSMatt Arsenault }
479bae3636fSMatt Arsenault
480bae3636fSMatt Arsenault if (Info.hasFlatScratchInit()) {
4814dad4914SMatt Arsenault Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
482bae3636fSMatt Arsenault MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
483bae3636fSMatt Arsenault CCInfo.AllocateReg(FlatScratchInitReg);
484bae3636fSMatt Arsenault }
485bae3636fSMatt Arsenault
486bae3636fSMatt Arsenault // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
487bae3636fSMatt Arsenault // these from the dispatch pointer.
488bae3636fSMatt Arsenault }
489bae3636fSMatt Arsenault
lowerFormalArgumentsKernel(MachineIRBuilder & B,const Function & F,ArrayRef<ArrayRef<Register>> VRegs) const490b725d273SMatt Arsenault bool AMDGPUCallLowering::lowerFormalArgumentsKernel(
49106c8cb03SAustin Kerbow MachineIRBuilder &B, const Function &F,
492c3dbe239SDiana Picus ArrayRef<ArrayRef<Register>> VRegs) const {
49306c8cb03SAustin Kerbow MachineFunction &MF = B.getMF();
4945bfbae5cSTom Stellard const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
495ca16621bSTom Stellard MachineRegisterInfo &MRI = MF.getRegInfo();
496ca16621bSTom Stellard SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
497fecf43ebSMatt Arsenault const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
498fecf43ebSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
499ca16621bSTom Stellard const DataLayout &DL = F.getParent()->getDataLayout();
500ca16621bSTom Stellard
501bc78c099SJon Chesterfield Info->allocateModuleLDSGlobal(F);
50213e49dceSJon Chesterfield
503ca16621bSTom Stellard SmallVector<CCValAssign, 16> ArgLocs;
504ca16621bSTom Stellard CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
505ca16621bSTom Stellard
50606c8cb03SAustin Kerbow allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info);
507bae3636fSMatt Arsenault
50829f30379SMatt Arsenault unsigned i = 0;
5090de874adSGuillaume Chatelet const Align KernArgBaseAlign(16);
51029f30379SMatt Arsenault const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
51129f30379SMatt Arsenault uint64_t ExplicitArgOffset = 0;
51229f30379SMatt Arsenault
51329f30379SMatt Arsenault // TODO: Align down to dword alignment and extract bits for extending loads.
51429f30379SMatt Arsenault for (auto &Arg : F.args()) {
5151168119cSMatt Arsenault const bool IsByRef = Arg.hasByRefAttr();
5161168119cSMatt Arsenault Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
51729f30379SMatt Arsenault unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
51829f30379SMatt Arsenault if (AllocSize == 0)
51929f30379SMatt Arsenault continue;
52029f30379SMatt Arsenault
521d154d0acSGuillaume Chatelet MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : None;
522d154d0acSGuillaume Chatelet Align ABIAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
52329f30379SMatt Arsenault
52429f30379SMatt Arsenault uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
52529f30379SMatt Arsenault ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
52629f30379SMatt Arsenault
52742bb4814SMatt Arsenault if (Arg.use_empty()) {
52842bb4814SMatt Arsenault ++i;
529a5b9ad7eSMatt Arsenault continue;
53042bb4814SMatt Arsenault }
531a5b9ad7eSMatt Arsenault
5321168119cSMatt Arsenault Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset);
5331168119cSMatt Arsenault
5341168119cSMatt Arsenault if (IsByRef) {
5351168119cSMatt Arsenault unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace();
5361168119cSMatt Arsenault
5371168119cSMatt Arsenault assert(VRegs[i].size() == 1 &&
5381168119cSMatt Arsenault "expected only one register for byval pointers");
5391168119cSMatt Arsenault if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) {
54021a0ef8dSMatt Arsenault lowerParameterPtr(VRegs[i][0], B, ArgOffset);
5411168119cSMatt Arsenault } else {
5421168119cSMatt Arsenault const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
5431168119cSMatt Arsenault Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy);
54421a0ef8dSMatt Arsenault lowerParameterPtr(PtrReg, B, ArgOffset);
5451168119cSMatt Arsenault
5461168119cSMatt Arsenault B.buildAddrSpaceCast(VRegs[i][0], PtrReg);
5471168119cSMatt Arsenault }
5481168119cSMatt Arsenault } else {
54921a0ef8dSMatt Arsenault ArgInfo OrigArg(VRegs[i], Arg, i);
55021a0ef8dSMatt Arsenault const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex;
55121a0ef8dSMatt Arsenault setArgFlags(OrigArg, OrigArgIdx, DL, F);
55221a0ef8dSMatt Arsenault lowerParameter(B, OrigArg, ArgOffset, Alignment);
5531168119cSMatt Arsenault }
5541168119cSMatt Arsenault
55529f30379SMatt Arsenault ++i;
55629f30379SMatt Arsenault }
55729f30379SMatt Arsenault
558fecf43ebSMatt Arsenault TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
559fecf43ebSMatt Arsenault TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
56029f30379SMatt Arsenault return true;
56129f30379SMatt Arsenault }
56229f30379SMatt Arsenault
lowerFormalArguments(MachineIRBuilder & B,const Function & F,ArrayRef<ArrayRef<Register>> VRegs,FunctionLoweringInfo & FLI) const563b725d273SMatt Arsenault bool AMDGPUCallLowering::lowerFormalArguments(
564d68458bdSChristudasan Devadasan MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs,
565d68458bdSChristudasan Devadasan FunctionLoweringInfo &FLI) const {
566fecf43ebSMatt Arsenault CallingConv::ID CC = F.getCallingConv();
567fecf43ebSMatt Arsenault
568b725d273SMatt Arsenault // The infrastructure for normal calling convention lowering is essentially
569b725d273SMatt Arsenault // useless for kernels. We want to avoid any kind of legalization or argument
570b725d273SMatt Arsenault // splitting.
571fecf43ebSMatt Arsenault if (CC == CallingConv::AMDGPU_KERNEL)
57206c8cb03SAustin Kerbow return lowerFormalArgumentsKernel(B, F, VRegs);
573b725d273SMatt Arsenault
574a022b1ccSSebastian Neubauer const bool IsGraphics = AMDGPU::isGraphics(CC);
575fecf43ebSMatt Arsenault const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC);
576fecf43ebSMatt Arsenault
57706c8cb03SAustin Kerbow MachineFunction &MF = B.getMF();
57806c8cb03SAustin Kerbow MachineBasicBlock &MBB = B.getMBB();
579b725d273SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
580b725d273SMatt Arsenault SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
581fecf43ebSMatt Arsenault const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
582fecf43ebSMatt Arsenault const SIRegisterInfo *TRI = Subtarget.getRegisterInfo();
583b725d273SMatt Arsenault const DataLayout &DL = F.getParent()->getDataLayout();
584b725d273SMatt Arsenault
585bc78c099SJon Chesterfield Info->allocateModuleLDSGlobal(F);
586b725d273SMatt Arsenault
587b725d273SMatt Arsenault SmallVector<CCValAssign, 16> ArgLocs;
588fecf43ebSMatt Arsenault CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
589b725d273SMatt Arsenault
590bae3636fSMatt Arsenault if (Info->hasImplicitBufferPtr()) {
591fecf43ebSMatt Arsenault Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
592bae3636fSMatt Arsenault MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
593bae3636fSMatt Arsenault CCInfo.AllocateReg(ImplicitBufferPtrReg);
594bae3636fSMatt Arsenault }
595bae3636fSMatt Arsenault
596f482e869SMatt Arsenault // FIXME: This probably isn't defined for mesa
597f482e869SMatt Arsenault if (Info->hasFlatScratchInit() && !Subtarget.isAmdPalOS()) {
598f482e869SMatt Arsenault Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
599f482e869SMatt Arsenault MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
600f482e869SMatt Arsenault CCInfo.AllocateReg(FlatScratchInitReg);
601f482e869SMatt Arsenault }
602f482e869SMatt Arsenault
603fecf43ebSMatt Arsenault SmallVector<ArgInfo, 32> SplitArgs;
604fecf43ebSMatt Arsenault unsigned Idx = 0;
605c7709e1cSTom Stellard unsigned PSInputNum = 0;
6069d8337d8STom Stellard
607ae25a397SChristudasan Devadasan // Insert the hidden sret parameter if the return value won't fit in the
608ae25a397SChristudasan Devadasan // return registers.
609ae25a397SChristudasan Devadasan if (!FLI.CanLowerReturn)
610ae25a397SChristudasan Devadasan insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
611ae25a397SChristudasan Devadasan
612fecf43ebSMatt Arsenault for (auto &Arg : F.args()) {
613fecf43ebSMatt Arsenault if (DL.getTypeStoreSize(Arg.getType()) == 0)
614c7709e1cSTom Stellard continue;
615c7709e1cSTom Stellard
616fecf43ebSMatt Arsenault const bool InReg = Arg.hasAttribute(Attribute::InReg);
617fecf43ebSMatt Arsenault
618fecf43ebSMatt Arsenault // SGPR arguments to functions not implemented.
619a022b1ccSSebastian Neubauer if (!IsGraphics && InReg)
620fecf43ebSMatt Arsenault return false;
621fecf43ebSMatt Arsenault
622a9ea8a9aSMatt Arsenault if (Arg.hasAttribute(Attribute::SwiftSelf) ||
623fecf43ebSMatt Arsenault Arg.hasAttribute(Attribute::SwiftError) ||
624b60a2ae4SMatt Arsenault Arg.hasAttribute(Attribute::Nest))
625fecf43ebSMatt Arsenault return false;
626fecf43ebSMatt Arsenault
627fecf43ebSMatt Arsenault if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) {
628fecf43ebSMatt Arsenault const bool ArgUsed = !Arg.use_empty();
629fecf43ebSMatt Arsenault bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
630fecf43ebSMatt Arsenault
631fecf43ebSMatt Arsenault if (!SkipArg) {
632c7709e1cSTom Stellard Info->markPSInputAllocated(PSInputNum);
633fecf43ebSMatt Arsenault if (ArgUsed)
634c7709e1cSTom Stellard Info->markPSInputEnabled(PSInputNum);
635fecf43ebSMatt Arsenault }
636c7709e1cSTom Stellard
637c7709e1cSTom Stellard ++PSInputNum;
638c7709e1cSTom Stellard
639fecf43ebSMatt Arsenault if (SkipArg) {
640d395befaSKazu Hirata for (Register R : VRegs[Idx])
641d395befaSKazu Hirata B.buildUndef(R);
642b60a2ae4SMatt Arsenault
643fecf43ebSMatt Arsenault ++Idx;
644c7709e1cSTom Stellard continue;
645fecf43ebSMatt Arsenault }
6469d8337d8STom Stellard }
647e0a4da8cSMatt Arsenault
6489b057f64SMatt Arsenault ArgInfo OrigArg(VRegs[Idx], Arg, Idx);
649eb416277SMatt Arsenault const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex;
650eb416277SMatt Arsenault setArgFlags(OrigArg, OrigArgIdx, DL, F);
651b60a2ae4SMatt Arsenault
6526c260d3bSMatt Arsenault splitToValueTypes(OrigArg, SplitArgs, DL, CC);
653fecf43ebSMatt Arsenault ++Idx;
6549d8337d8STom Stellard }
6559d8337d8STom Stellard
656fecf43ebSMatt Arsenault // At least one interpolation mode must be enabled or else the GPU will
657fecf43ebSMatt Arsenault // hang.
658fecf43ebSMatt Arsenault //
659fecf43ebSMatt Arsenault // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
660fecf43ebSMatt Arsenault // set PSInputAddr, the user wants to enable some bits after the compilation
661fecf43ebSMatt Arsenault // based on run-time states. Since we can't know what the final PSInputEna
662fecf43ebSMatt Arsenault // will look like, so we shouldn't do anything here and the user should take
663fecf43ebSMatt Arsenault // responsibility for the correct programming.
664fecf43ebSMatt Arsenault //
665fecf43ebSMatt Arsenault // Otherwise, the following restrictions apply:
666fecf43ebSMatt Arsenault // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
667fecf43ebSMatt Arsenault // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
668fecf43ebSMatt Arsenault // enabled too.
669fecf43ebSMatt Arsenault if (CC == CallingConv::AMDGPU_PS) {
670fecf43ebSMatt Arsenault if ((Info->getPSInputAddr() & 0x7F) == 0 ||
671fecf43ebSMatt Arsenault ((Info->getPSInputAddr() & 0xF) == 0 &&
672fecf43ebSMatt Arsenault Info->isPSInputAllocated(11))) {
673fecf43ebSMatt Arsenault CCInfo.AllocateReg(AMDGPU::VGPR0);
674fecf43ebSMatt Arsenault CCInfo.AllocateReg(AMDGPU::VGPR1);
675fecf43ebSMatt Arsenault Info->markPSInputAllocated(0);
676fecf43ebSMatt Arsenault Info->markPSInputEnabled(0);
677fecf43ebSMatt Arsenault }
678fecf43ebSMatt Arsenault
679fecf43ebSMatt Arsenault if (Subtarget.isAmdPalOS()) {
680fecf43ebSMatt Arsenault // For isAmdPalOS, the user does not enable some bits after compilation
681fecf43ebSMatt Arsenault // based on run-time states; the register values being generated here are
682fecf43ebSMatt Arsenault // the final ones set in hardware. Therefore we need to apply the
683fecf43ebSMatt Arsenault // workaround to PSInputAddr and PSInputEnable together. (The case where
684fecf43ebSMatt Arsenault // a bit is set in PSInputAddr but not PSInputEnable is where the frontend
685fecf43ebSMatt Arsenault // set up an input arg for a particular interpolation mode, but nothing
686fecf43ebSMatt Arsenault // uses that input arg. Really we should have an earlier pass that removes
687fecf43ebSMatt Arsenault // such an arg.)
688fecf43ebSMatt Arsenault unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
689fecf43ebSMatt Arsenault if ((PsInputBits & 0x7F) == 0 ||
690fecf43ebSMatt Arsenault ((PsInputBits & 0xF) == 0 &&
691fecf43ebSMatt Arsenault (PsInputBits >> 11 & 1)))
692fecf43ebSMatt Arsenault Info->markPSInputEnabled(
693fecf43ebSMatt Arsenault countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
694fecf43ebSMatt Arsenault }
695fecf43ebSMatt Arsenault }
696fecf43ebSMatt Arsenault
697fecf43ebSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
698fecf43ebSMatt Arsenault CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg());
699fecf43ebSMatt Arsenault
700fecf43ebSMatt Arsenault if (!MBB.empty())
70106c8cb03SAustin Kerbow B.setInstr(*MBB.begin());
702fecf43ebSMatt Arsenault
70326924b57SNeubauer, Sebastian if (!IsEntryFunc && !IsGraphics) {
704a162048aSMatt Arsenault // For the fixed ABI, pass workitem IDs in the last argument register.
705a162048aSMatt Arsenault TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
706a162048aSMatt Arsenault }
707a162048aSMatt Arsenault
70824e2e5dfSMatt Arsenault IncomingValueAssigner Assigner(AssignFn);
70924e2e5dfSMatt Arsenault if (!determineAssignments(Assigner, SplitArgs, CCInfo))
71024e2e5dfSMatt Arsenault return false;
71124e2e5dfSMatt Arsenault
71224e2e5dfSMatt Arsenault FormalArgHandler Handler(B, MRI);
71324e2e5dfSMatt Arsenault if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B))
71429f30379SMatt Arsenault return false;
715fecf43ebSMatt Arsenault
7166a70874dSMatt Arsenault uint64_t StackOffset = Assigner.StackOffset;
7176a70874dSMatt Arsenault
718fecf43ebSMatt Arsenault // Start adding system SGPRs.
719fecf43ebSMatt Arsenault if (IsEntryFunc) {
720a022b1ccSSebastian Neubauer TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics);
721fecf43ebSMatt Arsenault } else {
722d5a46586SStanislav Mekhanoshin if (!Subtarget.enableFlatScratch())
723fecf43ebSMatt Arsenault CCInfo.AllocateReg(Info->getScratchRSrcReg());
724fecf43ebSMatt Arsenault TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
725fecf43ebSMatt Arsenault }
726fecf43ebSMatt Arsenault
7276a70874dSMatt Arsenault // When we tail call, we need to check if the callee's arguments will fit on
7286a70874dSMatt Arsenault // the caller's stack. So, whenever we lower formal arguments, we should keep
7296a70874dSMatt Arsenault // track of this information, since we might lower a tail call in this
7306a70874dSMatt Arsenault // function later.
7316a70874dSMatt Arsenault Info->setBytesInStackArgArea(StackOffset);
7326a70874dSMatt Arsenault
733fecf43ebSMatt Arsenault // Move back to the end of the basic block.
73406c8cb03SAustin Kerbow B.setMBB(MBB);
735fecf43ebSMatt Arsenault
736fecf43ebSMatt Arsenault return true;
737000c5af3STom Stellard }
73861f1f2a2SMatt Arsenault
passSpecialInputs(MachineIRBuilder & MIRBuilder,CCState & CCInfo,SmallVectorImpl<std::pair<MCRegister,Register>> & ArgRegs,CallLoweringInfo & Info) const73961f1f2a2SMatt Arsenault bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
74061f1f2a2SMatt Arsenault CCState &CCInfo,
74161f1f2a2SMatt Arsenault SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs,
74261f1f2a2SMatt Arsenault CallLoweringInfo &Info) const {
74361f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
74461f1f2a2SMatt Arsenault
7450197cd0bSMatt Arsenault // If there's no call site, this doesn't correspond to a call from the IR and
7460197cd0bSMatt Arsenault // doesn't need implicit inputs.
7470197cd0bSMatt Arsenault if (!Info.CB)
7480197cd0bSMatt Arsenault return true;
7490197cd0bSMatt Arsenault
75061f1f2a2SMatt Arsenault const AMDGPUFunctionArgInfo *CalleeArgInfo
75161f1f2a2SMatt Arsenault = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
75261f1f2a2SMatt Arsenault
75361f1f2a2SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
75461f1f2a2SMatt Arsenault const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo();
75561f1f2a2SMatt Arsenault
75661f1f2a2SMatt Arsenault
75761f1f2a2SMatt Arsenault // TODO: Unify with private memory register handling. This is complicated by
75861f1f2a2SMatt Arsenault // the fact that at least in kernels, the input argument is not necessarily
75961f1f2a2SMatt Arsenault // in the same location as the input.
76061f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = {
76161f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::DISPATCH_PTR,
76261f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::QUEUE_PTR,
76361f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR,
76461f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::DISPATCH_ID,
76561f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::WORKGROUP_ID_X,
76661f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::WORKGROUP_ID_Y,
7673a205977SJon Chesterfield AMDGPUFunctionArgInfo::WORKGROUP_ID_Z,
7683a205977SJon Chesterfield AMDGPUFunctionArgInfo::LDS_KERNEL_ID,
76961f1f2a2SMatt Arsenault };
77061f1f2a2SMatt Arsenault
7710197cd0bSMatt Arsenault static constexpr StringLiteral ImplicitAttrNames[] = {
7720197cd0bSMatt Arsenault "amdgpu-no-dispatch-ptr",
7730197cd0bSMatt Arsenault "amdgpu-no-queue-ptr",
7740197cd0bSMatt Arsenault "amdgpu-no-implicitarg-ptr",
7750197cd0bSMatt Arsenault "amdgpu-no-dispatch-id",
7760197cd0bSMatt Arsenault "amdgpu-no-workgroup-id-x",
7770197cd0bSMatt Arsenault "amdgpu-no-workgroup-id-y",
7783a205977SJon Chesterfield "amdgpu-no-workgroup-id-z",
7793a205977SJon Chesterfield "amdgpu-no-lds-kernel-id",
7800197cd0bSMatt Arsenault };
7810197cd0bSMatt Arsenault
78261f1f2a2SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
78361f1f2a2SMatt Arsenault
78461f1f2a2SMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
78561f1f2a2SMatt Arsenault const AMDGPULegalizerInfo *LI
78661f1f2a2SMatt Arsenault = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo());
78761f1f2a2SMatt Arsenault
7880197cd0bSMatt Arsenault unsigned I = 0;
78961f1f2a2SMatt Arsenault for (auto InputID : InputRegs) {
79061f1f2a2SMatt Arsenault const ArgDescriptor *OutgoingArg;
79161f1f2a2SMatt Arsenault const TargetRegisterClass *ArgRC;
79261f1f2a2SMatt Arsenault LLT ArgTy;
79361f1f2a2SMatt Arsenault
7940197cd0bSMatt Arsenault // If the callee does not use the attribute value, skip copying the value.
7950197cd0bSMatt Arsenault if (Info.CB->hasFnAttr(ImplicitAttrNames[I++]))
7960197cd0bSMatt Arsenault continue;
7970197cd0bSMatt Arsenault
79861f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) =
79961f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(InputID);
80061f1f2a2SMatt Arsenault if (!OutgoingArg)
80161f1f2a2SMatt Arsenault continue;
80261f1f2a2SMatt Arsenault
80361f1f2a2SMatt Arsenault const ArgDescriptor *IncomingArg;
80461f1f2a2SMatt Arsenault const TargetRegisterClass *IncomingArgRC;
80561f1f2a2SMatt Arsenault std::tie(IncomingArg, IncomingArgRC, ArgTy) =
80661f1f2a2SMatt Arsenault CallerArgInfo.getPreloadedValue(InputID);
80761f1f2a2SMatt Arsenault assert(IncomingArgRC == ArgRC);
80861f1f2a2SMatt Arsenault
80961f1f2a2SMatt Arsenault Register InputReg = MRI.createGenericVirtualRegister(ArgTy);
81061f1f2a2SMatt Arsenault
81161f1f2a2SMatt Arsenault if (IncomingArg) {
812200bb519SMatt Arsenault LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy);
813729bf9b2SMatt Arsenault } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
81461f1f2a2SMatt Arsenault LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
8153a205977SJon Chesterfield } else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
8163a205977SJon Chesterfield Optional<uint32_t> Id =
8173a205977SJon Chesterfield AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction());
81841ae78eaSKazu Hirata if (Id.has_value()) {
819*0387da6fSKazu Hirata MIRBuilder.buildConstant(InputReg, Id.value());
8203a205977SJon Chesterfield } else {
8213a205977SJon Chesterfield MIRBuilder.buildUndef(InputReg);
8223a205977SJon Chesterfield }
823729bf9b2SMatt Arsenault } else {
824729bf9b2SMatt Arsenault // We may have proven the input wasn't needed, although the ABI is
825729bf9b2SMatt Arsenault // requiring it. We just need to allocate the register appropriately.
826729bf9b2SMatt Arsenault MIRBuilder.buildUndef(InputReg);
82761f1f2a2SMatt Arsenault }
82861f1f2a2SMatt Arsenault
82961f1f2a2SMatt Arsenault if (OutgoingArg->isRegister()) {
83061f1f2a2SMatt Arsenault ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
83161f1f2a2SMatt Arsenault if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
83261f1f2a2SMatt Arsenault report_fatal_error("failed to allocate implicit input argument");
83361f1f2a2SMatt Arsenault } else {
83461f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
83561f1f2a2SMatt Arsenault return false;
83661f1f2a2SMatt Arsenault }
83761f1f2a2SMatt Arsenault }
83861f1f2a2SMatt Arsenault
83961f1f2a2SMatt Arsenault // Pack workitem IDs into a single register or pass it as is if already
84061f1f2a2SMatt Arsenault // packed.
84161f1f2a2SMatt Arsenault const ArgDescriptor *OutgoingArg;
84261f1f2a2SMatt Arsenault const TargetRegisterClass *ArgRC;
84361f1f2a2SMatt Arsenault LLT ArgTy;
84461f1f2a2SMatt Arsenault
84561f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) =
84661f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
84761f1f2a2SMatt Arsenault if (!OutgoingArg)
84861f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) =
84961f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
85061f1f2a2SMatt Arsenault if (!OutgoingArg)
85161f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) =
85261f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
85361f1f2a2SMatt Arsenault if (!OutgoingArg)
85461f1f2a2SMatt Arsenault return false;
85561f1f2a2SMatt Arsenault
856200bb519SMatt Arsenault auto WorkitemIDX =
857200bb519SMatt Arsenault CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X);
858200bb519SMatt Arsenault auto WorkitemIDY =
859200bb519SMatt Arsenault CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
860200bb519SMatt Arsenault auto WorkitemIDZ =
861200bb519SMatt Arsenault CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
86261f1f2a2SMatt Arsenault
863200bb519SMatt Arsenault const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX);
864200bb519SMatt Arsenault const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY);
865200bb519SMatt Arsenault const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ);
86661f1f2a2SMatt Arsenault const LLT S32 = LLT::scalar(32);
86761f1f2a2SMatt Arsenault
8680197cd0bSMatt Arsenault const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x");
8690197cd0bSMatt Arsenault const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y");
8700197cd0bSMatt Arsenault const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z");
8710197cd0bSMatt Arsenault
87261f1f2a2SMatt Arsenault // If incoming ids are not packed we need to pack them.
87361f1f2a2SMatt Arsenault // FIXME: Should consider known workgroup size to eliminate known 0 cases.
87461f1f2a2SMatt Arsenault Register InputReg;
8750197cd0bSMatt Arsenault if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX &&
8760197cd0bSMatt Arsenault NeedWorkItemIDX) {
877a6f49423SMatt Arsenault if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) {
87861f1f2a2SMatt Arsenault InputReg = MRI.createGenericVirtualRegister(S32);
879200bb519SMatt Arsenault LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX,
880200bb519SMatt Arsenault std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX));
881a6f49423SMatt Arsenault } else {
882a6f49423SMatt Arsenault InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0);
883a6f49423SMatt Arsenault }
88461f1f2a2SMatt Arsenault }
88561f1f2a2SMatt Arsenault
8860197cd0bSMatt Arsenault if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY &&
887a6f49423SMatt Arsenault NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) {
88861f1f2a2SMatt Arsenault Register Y = MRI.createGenericVirtualRegister(S32);
889200bb519SMatt Arsenault LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY),
890200bb519SMatt Arsenault std::get<2>(WorkitemIDY));
89161f1f2a2SMatt Arsenault
89261f1f2a2SMatt Arsenault Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0);
89361f1f2a2SMatt Arsenault InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y;
89461f1f2a2SMatt Arsenault }
89561f1f2a2SMatt Arsenault
8960197cd0bSMatt Arsenault if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ &&
897a6f49423SMatt Arsenault NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) {
89861f1f2a2SMatt Arsenault Register Z = MRI.createGenericVirtualRegister(S32);
899200bb519SMatt Arsenault LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ),
900200bb519SMatt Arsenault std::get<2>(WorkitemIDZ));
90161f1f2a2SMatt Arsenault
90261f1f2a2SMatt Arsenault Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0);
90361f1f2a2SMatt Arsenault InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z;
90461f1f2a2SMatt Arsenault }
90561f1f2a2SMatt Arsenault
906dc2457c8SMatt Arsenault if (!InputReg &&
907dc2457c8SMatt Arsenault (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) {
90861f1f2a2SMatt Arsenault InputReg = MRI.createGenericVirtualRegister(S32);
909dc2457c8SMatt Arsenault if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) {
910dc2457c8SMatt Arsenault // We're in a situation where the outgoing function requires the workitem
911dc2457c8SMatt Arsenault // ID, but the calling function does not have it (e.g a graphics function
912dc2457c8SMatt Arsenault // calling a C calling convention function). This is illegal, but we need
913dc2457c8SMatt Arsenault // to produce something.
914dc2457c8SMatt Arsenault MIRBuilder.buildUndef(InputReg);
915dc2457c8SMatt Arsenault } else {
91661f1f2a2SMatt Arsenault // Workitem ids are already packed, any of present incoming arguments will
91761f1f2a2SMatt Arsenault // carry all required fields.
91861f1f2a2SMatt Arsenault ArgDescriptor IncomingArg = ArgDescriptor::createArg(
91961f1f2a2SMatt Arsenault IncomingArgX ? *IncomingArgX :
92061f1f2a2SMatt Arsenault IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u);
921200bb519SMatt Arsenault LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg,
922200bb519SMatt Arsenault &AMDGPU::VGPR_32RegClass, S32);
92361f1f2a2SMatt Arsenault }
924dc2457c8SMatt Arsenault }
92561f1f2a2SMatt Arsenault
92661f1f2a2SMatt Arsenault if (OutgoingArg->isRegister()) {
9270197cd0bSMatt Arsenault if (InputReg)
92861f1f2a2SMatt Arsenault ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg);
9290197cd0bSMatt Arsenault
93061f1f2a2SMatt Arsenault if (!CCInfo.AllocateReg(OutgoingArg->getRegister()))
93161f1f2a2SMatt Arsenault report_fatal_error("failed to allocate implicit input argument");
93261f1f2a2SMatt Arsenault } else {
93361f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n");
93461f1f2a2SMatt Arsenault return false;
93561f1f2a2SMatt Arsenault }
93661f1f2a2SMatt Arsenault
93761f1f2a2SMatt Arsenault return true;
93861f1f2a2SMatt Arsenault }
93961f1f2a2SMatt Arsenault
94061f1f2a2SMatt Arsenault /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
94161f1f2a2SMatt Arsenault /// CC.
94261f1f2a2SMatt Arsenault static std::pair<CCAssignFn *, CCAssignFn *>
getAssignFnsForCC(CallingConv::ID CC,const SITargetLowering & TLI)94361f1f2a2SMatt Arsenault getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) {
94461f1f2a2SMatt Arsenault return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
94561f1f2a2SMatt Arsenault }
94661f1f2a2SMatt Arsenault
getCallOpcode(const MachineFunction & CallerF,bool IsIndirect,bool IsTailCall)94761f1f2a2SMatt Arsenault static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
94861f1f2a2SMatt Arsenault bool IsTailCall) {
949fd1cfc90SSebastian Neubauer assert(!(IsIndirect && IsTailCall) && "Indirect calls can't be tail calls, "
950fd1cfc90SSebastian Neubauer "because the address can be divergent");
951fd1cfc90SSebastian Neubauer return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::G_SI_CALL;
95261f1f2a2SMatt Arsenault }
95361f1f2a2SMatt Arsenault
95461f1f2a2SMatt Arsenault // Add operands to call instruction to track the callee.
addCallTargetOperands(MachineInstrBuilder & CallInst,MachineIRBuilder & MIRBuilder,AMDGPUCallLowering::CallLoweringInfo & Info)95561f1f2a2SMatt Arsenault static bool addCallTargetOperands(MachineInstrBuilder &CallInst,
95661f1f2a2SMatt Arsenault MachineIRBuilder &MIRBuilder,
95761f1f2a2SMatt Arsenault AMDGPUCallLowering::CallLoweringInfo &Info) {
95861f1f2a2SMatt Arsenault if (Info.Callee.isReg()) {
9591fd1beeaSMatt Arsenault CallInst.addReg(Info.Callee.getReg());
96061f1f2a2SMatt Arsenault CallInst.addImm(0);
96161f1f2a2SMatt Arsenault } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) {
96261f1f2a2SMatt Arsenault // The call lowering lightly assumed we can directly encode a call target in
96361f1f2a2SMatt Arsenault // the instruction, which is not the case. Materialize the address here.
96461f1f2a2SMatt Arsenault const GlobalValue *GV = Info.Callee.getGlobal();
96561f1f2a2SMatt Arsenault auto Ptr = MIRBuilder.buildGlobalValue(
96661f1f2a2SMatt Arsenault LLT::pointer(GV->getAddressSpace(), 64), GV);
96761f1f2a2SMatt Arsenault CallInst.addReg(Ptr.getReg(0));
96861f1f2a2SMatt Arsenault CallInst.add(Info.Callee);
96961f1f2a2SMatt Arsenault } else
97061f1f2a2SMatt Arsenault return false;
97161f1f2a2SMatt Arsenault
97261f1f2a2SMatt Arsenault return true;
97361f1f2a2SMatt Arsenault }
97461f1f2a2SMatt Arsenault
doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & InArgs) const9756a70874dSMatt Arsenault bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay(
9766a70874dSMatt Arsenault CallLoweringInfo &Info, MachineFunction &MF,
9776a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &InArgs) const {
9786a70874dSMatt Arsenault const Function &CallerF = MF.getFunction();
9796a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv;
9806a70874dSMatt Arsenault CallingConv::ID CallerCC = CallerF.getCallingConv();
9816a70874dSMatt Arsenault
9826a70874dSMatt Arsenault // If the calling conventions match, then everything must be the same.
9836a70874dSMatt Arsenault if (CalleeCC == CallerCC)
9846a70874dSMatt Arsenault return true;
9856a70874dSMatt Arsenault
9866a70874dSMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
9876a70874dSMatt Arsenault
9886a70874dSMatt Arsenault // Make sure that the caller and callee preserve all of the same registers.
9896a70874dSMatt Arsenault auto TRI = ST.getRegisterInfo();
9906a70874dSMatt Arsenault
9916a70874dSMatt Arsenault const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
9926a70874dSMatt Arsenault const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
9936a70874dSMatt Arsenault if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9946a70874dSMatt Arsenault return false;
9956a70874dSMatt Arsenault
9966a70874dSMatt Arsenault // Check if the caller and callee will handle arguments in the same way.
9976a70874dSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
9986a70874dSMatt Arsenault CCAssignFn *CalleeAssignFnFixed;
9996a70874dSMatt Arsenault CCAssignFn *CalleeAssignFnVarArg;
10006a70874dSMatt Arsenault std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
10016a70874dSMatt Arsenault getAssignFnsForCC(CalleeCC, TLI);
10026a70874dSMatt Arsenault
10036a70874dSMatt Arsenault CCAssignFn *CallerAssignFnFixed;
10046a70874dSMatt Arsenault CCAssignFn *CallerAssignFnVarArg;
10056a70874dSMatt Arsenault std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
10066a70874dSMatt Arsenault getAssignFnsForCC(CallerCC, TLI);
10076a70874dSMatt Arsenault
10086a70874dSMatt Arsenault // FIXME: We are not accounting for potential differences in implicitly passed
10096a70874dSMatt Arsenault // inputs, but only the fixed ABI is supported now anyway.
10106a70874dSMatt Arsenault IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
10116a70874dSMatt Arsenault CalleeAssignFnVarArg);
10126a70874dSMatt Arsenault IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
10136a70874dSMatt Arsenault CallerAssignFnVarArg);
10146a70874dSMatt Arsenault return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner);
10156a70874dSMatt Arsenault }
10166a70874dSMatt Arsenault
areCalleeOutgoingArgsTailCallable(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & OutArgs) const10176a70874dSMatt Arsenault bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable(
10186a70874dSMatt Arsenault CallLoweringInfo &Info, MachineFunction &MF,
10196a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &OutArgs) const {
10206a70874dSMatt Arsenault // If there are no outgoing arguments, then we are done.
10216a70874dSMatt Arsenault if (OutArgs.empty())
10226a70874dSMatt Arsenault return true;
10236a70874dSMatt Arsenault
10246a70874dSMatt Arsenault const Function &CallerF = MF.getFunction();
10256a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv;
10266a70874dSMatt Arsenault CallingConv::ID CallerCC = CallerF.getCallingConv();
10276a70874dSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
10286a70874dSMatt Arsenault
10296a70874dSMatt Arsenault CCAssignFn *AssignFnFixed;
10306a70874dSMatt Arsenault CCAssignFn *AssignFnVarArg;
10316a70874dSMatt Arsenault std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
10326a70874dSMatt Arsenault
10336a70874dSMatt Arsenault // We have outgoing arguments. Make sure that we can tail call with them.
10346a70874dSMatt Arsenault SmallVector<CCValAssign, 16> OutLocs;
10356a70874dSMatt Arsenault CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
10366a70874dSMatt Arsenault OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
10376a70874dSMatt Arsenault
10386a70874dSMatt Arsenault if (!determineAssignments(Assigner, OutArgs, OutInfo)) {
10396a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
10406a70874dSMatt Arsenault return false;
10416a70874dSMatt Arsenault }
10426a70874dSMatt Arsenault
10436a70874dSMatt Arsenault // Make sure that they can fit on the caller's stack.
10446a70874dSMatt Arsenault const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
10456a70874dSMatt Arsenault if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
10466a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
10476a70874dSMatt Arsenault return false;
10486a70874dSMatt Arsenault }
10496a70874dSMatt Arsenault
10506a70874dSMatt Arsenault // Verify that the parameters in callee-saved registers match.
10516a70874dSMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
10526a70874dSMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo();
10536a70874dSMatt Arsenault const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
10546a70874dSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
10556a70874dSMatt Arsenault return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
10566a70874dSMatt Arsenault }
10576a70874dSMatt Arsenault
10586a70874dSMatt Arsenault /// Return true if the calling convention is one that we can guarantee TCO for.
canGuaranteeTCO(CallingConv::ID CC)10596a70874dSMatt Arsenault static bool canGuaranteeTCO(CallingConv::ID CC) {
10606a70874dSMatt Arsenault return CC == CallingConv::Fast;
10616a70874dSMatt Arsenault }
10626a70874dSMatt Arsenault
10636a70874dSMatt Arsenault /// Return true if we might ever do TCO for calls with this calling convention.
mayTailCallThisCC(CallingConv::ID CC)10646a70874dSMatt Arsenault static bool mayTailCallThisCC(CallingConv::ID CC) {
10656a70874dSMatt Arsenault switch (CC) {
10666a70874dSMatt Arsenault case CallingConv::C:
10676a70874dSMatt Arsenault case CallingConv::AMDGPU_Gfx:
10686a70874dSMatt Arsenault return true;
10696a70874dSMatt Arsenault default:
10706a70874dSMatt Arsenault return canGuaranteeTCO(CC);
10716a70874dSMatt Arsenault }
10726a70874dSMatt Arsenault }
10736a70874dSMatt Arsenault
isEligibleForTailCallOptimization(MachineIRBuilder & B,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & InArgs,SmallVectorImpl<ArgInfo> & OutArgs) const10746a70874dSMatt Arsenault bool AMDGPUCallLowering::isEligibleForTailCallOptimization(
10756a70874dSMatt Arsenault MachineIRBuilder &B, CallLoweringInfo &Info,
10766a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const {
10776a70874dSMatt Arsenault // Must pass all target-independent checks in order to tail call optimize.
10786a70874dSMatt Arsenault if (!Info.IsTailCall)
10796a70874dSMatt Arsenault return false;
10806a70874dSMatt Arsenault
1081fd1cfc90SSebastian Neubauer // Indirect calls can't be tail calls, because the address can be divergent.
1082fd1cfc90SSebastian Neubauer // TODO Check divergence info if the call really is divergent.
1083fd1cfc90SSebastian Neubauer if (Info.Callee.isReg())
1084fd1cfc90SSebastian Neubauer return false;
1085fd1cfc90SSebastian Neubauer
10866a70874dSMatt Arsenault MachineFunction &MF = B.getMF();
10876a70874dSMatt Arsenault const Function &CallerF = MF.getFunction();
10886a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv;
10896a70874dSMatt Arsenault CallingConv::ID CallerCC = CallerF.getCallingConv();
10906a70874dSMatt Arsenault
10916a70874dSMatt Arsenault const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
10926a70874dSMatt Arsenault const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10936a70874dSMatt Arsenault // Kernels aren't callable, and don't have a live in return address so it
10946a70874dSMatt Arsenault // doesn't make sense to do a tail call with entry functions.
10956a70874dSMatt Arsenault if (!CallerPreserved)
10966a70874dSMatt Arsenault return false;
10976a70874dSMatt Arsenault
10986a70874dSMatt Arsenault if (!mayTailCallThisCC(CalleeCC)) {
10996a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
11006a70874dSMatt Arsenault return false;
11016a70874dSMatt Arsenault }
11026a70874dSMatt Arsenault
11036a70874dSMatt Arsenault if (any_of(CallerF.args(), [](const Argument &A) {
11046a70874dSMatt Arsenault return A.hasByValAttr() || A.hasSwiftErrorAttr();
11056a70874dSMatt Arsenault })) {
11066a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval "
11076a70874dSMatt Arsenault "or swifterror arguments\n");
11086a70874dSMatt Arsenault return false;
11096a70874dSMatt Arsenault }
11106a70874dSMatt Arsenault
11116a70874dSMatt Arsenault // If we have -tailcallopt, then we're done.
11126a70874dSMatt Arsenault if (MF.getTarget().Options.GuaranteedTailCallOpt)
11136a70874dSMatt Arsenault return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv();
11146a70874dSMatt Arsenault
11156a70874dSMatt Arsenault // Verify that the incoming and outgoing arguments from the callee are
11166a70874dSMatt Arsenault // safe to tail call.
11176a70874dSMatt Arsenault if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
11186a70874dSMatt Arsenault LLVM_DEBUG(
11196a70874dSMatt Arsenault dbgs()
11206a70874dSMatt Arsenault << "... Caller and callee have incompatible calling conventions.\n");
11216a70874dSMatt Arsenault return false;
11226a70874dSMatt Arsenault }
11236a70874dSMatt Arsenault
11246a70874dSMatt Arsenault if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
11256a70874dSMatt Arsenault return false;
11266a70874dSMatt Arsenault
11276a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n");
11286a70874dSMatt Arsenault return true;
11296a70874dSMatt Arsenault }
11306a70874dSMatt Arsenault
11316a70874dSMatt Arsenault // Insert outgoing implicit arguments for a call, by inserting copies to the
11326a70874dSMatt Arsenault // implicit argument registers and adding the necessary implicit uses to the
11336a70874dSMatt Arsenault // call instruction.
handleImplicitCallArguments(MachineIRBuilder & MIRBuilder,MachineInstrBuilder & CallInst,const GCNSubtarget & ST,const SIMachineFunctionInfo & FuncInfo,ArrayRef<std::pair<MCRegister,Register>> ImplicitArgRegs) const11346a70874dSMatt Arsenault void AMDGPUCallLowering::handleImplicitCallArguments(
11356a70874dSMatt Arsenault MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst,
11366a70874dSMatt Arsenault const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo,
11376a70874dSMatt Arsenault ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const {
11386a70874dSMatt Arsenault if (!ST.enableFlatScratch()) {
11396a70874dSMatt Arsenault // Insert copies for the SRD. In the HSA case, this should be an identity
11406a70874dSMatt Arsenault // copy.
1141d5e14ba8SSander de Smalen auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32),
1142d5e14ba8SSander de Smalen FuncInfo.getScratchRSrcReg());
11436a70874dSMatt Arsenault MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg);
11446a70874dSMatt Arsenault CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit);
11456a70874dSMatt Arsenault }
11466a70874dSMatt Arsenault
11476a70874dSMatt Arsenault for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) {
11486a70874dSMatt Arsenault MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second);
11496a70874dSMatt Arsenault CallInst.addReg(ArgReg.first, RegState::Implicit);
11506a70874dSMatt Arsenault }
11516a70874dSMatt Arsenault }
11526a70874dSMatt Arsenault
lowerTailCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & OutArgs) const11536a70874dSMatt Arsenault bool AMDGPUCallLowering::lowerTailCall(
11546a70874dSMatt Arsenault MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
11556a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &OutArgs) const {
11566a70874dSMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
11576a70874dSMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
11586a70874dSMatt Arsenault SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
11596a70874dSMatt Arsenault const Function &F = MF.getFunction();
11606a70874dSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
11616a70874dSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
11626a70874dSMatt Arsenault
11636a70874dSMatt Arsenault // True when we're tail calling, but without -tailcallopt.
11646a70874dSMatt Arsenault bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
11656a70874dSMatt Arsenault
11666a70874dSMatt Arsenault // Find out which ABI gets to decide where things go.
11676a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv;
11686a70874dSMatt Arsenault CCAssignFn *AssignFnFixed;
11696a70874dSMatt Arsenault CCAssignFn *AssignFnVarArg;
11706a70874dSMatt Arsenault std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
11716a70874dSMatt Arsenault
11726a70874dSMatt Arsenault MachineInstrBuilder CallSeqStart;
11736a70874dSMatt Arsenault if (!IsSibCall)
11746a70874dSMatt Arsenault CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP);
11756a70874dSMatt Arsenault
11766a70874dSMatt Arsenault unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
11776a70874dSMatt Arsenault auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
11786a70874dSMatt Arsenault if (!addCallTargetOperands(MIB, MIRBuilder, Info))
11796a70874dSMatt Arsenault return false;
11806a70874dSMatt Arsenault
11816a70874dSMatt Arsenault // Byte offset for the tail call. When we are sibcalling, this will always
11826a70874dSMatt Arsenault // be 0.
11836a70874dSMatt Arsenault MIB.addImm(0);
11846a70874dSMatt Arsenault
11856a70874dSMatt Arsenault // Tell the call which registers are clobbered.
11866a70874dSMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo();
11876a70874dSMatt Arsenault const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
11886a70874dSMatt Arsenault MIB.addRegMask(Mask);
11896a70874dSMatt Arsenault
11906a70874dSMatt Arsenault // FPDiff is the byte offset of the call's argument area from the callee's.
11916a70874dSMatt Arsenault // Stores to callee stack arguments will be placed in FixedStackSlots offset
11926a70874dSMatt Arsenault // by this amount for a tail call. In a sibling call it must be 0 because the
11936a70874dSMatt Arsenault // caller will deallocate the entire stack and the callee still expects its
11946a70874dSMatt Arsenault // arguments to begin at SP+0.
11956a70874dSMatt Arsenault int FPDiff = 0;
11966a70874dSMatt Arsenault
11976a70874dSMatt Arsenault // This will be 0 for sibcalls, potentially nonzero for tail calls produced
11986a70874dSMatt Arsenault // by -tailcallopt. For sibcalls, the memory operands for the call are
11996a70874dSMatt Arsenault // already available in the caller's incoming argument space.
12006a70874dSMatt Arsenault unsigned NumBytes = 0;
12016a70874dSMatt Arsenault if (!IsSibCall) {
12026a70874dSMatt Arsenault // We aren't sibcalling, so we need to compute FPDiff. We need to do this
12036a70874dSMatt Arsenault // before handling assignments, because FPDiff must be known for memory
12046a70874dSMatt Arsenault // arguments.
12056a70874dSMatt Arsenault unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
12066a70874dSMatt Arsenault SmallVector<CCValAssign, 16> OutLocs;
12076a70874dSMatt Arsenault CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
12086a70874dSMatt Arsenault
12096a70874dSMatt Arsenault // FIXME: Not accounting for callee implicit inputs
12106a70874dSMatt Arsenault OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg);
12116a70874dSMatt Arsenault if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
12126a70874dSMatt Arsenault return false;
12136a70874dSMatt Arsenault
12146a70874dSMatt Arsenault // The callee will pop the argument stack as a tail call. Thus, we must
12156a70874dSMatt Arsenault // keep it 16-byte aligned.
12166a70874dSMatt Arsenault NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment());
12176a70874dSMatt Arsenault
12186a70874dSMatt Arsenault // FPDiff will be negative if this tail call requires more space than we
12196a70874dSMatt Arsenault // would automatically have in our incoming argument space. Positive if we
12206a70874dSMatt Arsenault // actually shrink the stack.
12216a70874dSMatt Arsenault FPDiff = NumReusableBytes - NumBytes;
12226a70874dSMatt Arsenault
12236a70874dSMatt Arsenault // The stack pointer must be 16-byte aligned at all times it's used for a
12246a70874dSMatt Arsenault // memory operation, which in practice means at *all* times and in
12256a70874dSMatt Arsenault // particular across call boundaries. Therefore our own arguments started at
12266a70874dSMatt Arsenault // a 16-byte aligned SP and the delta applied for the tail call should
12276a70874dSMatt Arsenault // satisfy the same constraint.
122885394d9eSMatt Arsenault assert(isAligned(ST.getStackAlignment(), FPDiff) &&
122985394d9eSMatt Arsenault "unaligned stack on tail call");
12306a70874dSMatt Arsenault }
12316a70874dSMatt Arsenault
12326a70874dSMatt Arsenault SmallVector<CCValAssign, 16> ArgLocs;
12336a70874dSMatt Arsenault CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
12346a70874dSMatt Arsenault
12356a70874dSMatt Arsenault // We could pass MIB and directly add the implicit uses to the call
12366a70874dSMatt Arsenault // now. However, as an aesthetic choice, place implicit argument operands
12376a70874dSMatt Arsenault // after the ordinary user argument registers.
12386a70874dSMatt Arsenault SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs;
12396a70874dSMatt Arsenault
124006b90175SMatt Arsenault if (Info.CallConv != CallingConv::AMDGPU_Gfx) {
12416a70874dSMatt Arsenault // With a fixed ABI, allocate fixed registers before user arguments.
12426a70874dSMatt Arsenault if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
12436a70874dSMatt Arsenault return false;
12446a70874dSMatt Arsenault }
12456a70874dSMatt Arsenault
12466a70874dSMatt Arsenault OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
12476a70874dSMatt Arsenault
12486a70874dSMatt Arsenault if (!determineAssignments(Assigner, OutArgs, CCInfo))
12496a70874dSMatt Arsenault return false;
12506a70874dSMatt Arsenault
12516a70874dSMatt Arsenault // Do the actual argument marshalling.
12526a70874dSMatt Arsenault AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff);
12536a70874dSMatt Arsenault if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
12546a70874dSMatt Arsenault return false;
12556a70874dSMatt Arsenault
12566a70874dSMatt Arsenault handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs);
12576a70874dSMatt Arsenault
12586a70874dSMatt Arsenault // If we have -tailcallopt, we need to adjust the stack. We'll do the call
12596a70874dSMatt Arsenault // sequence start and end here.
12606a70874dSMatt Arsenault if (!IsSibCall) {
12616a70874dSMatt Arsenault MIB->getOperand(1).setImm(FPDiff);
12626a70874dSMatt Arsenault CallSeqStart.addImm(NumBytes).addImm(0);
12636a70874dSMatt Arsenault // End the call sequence *before* emitting the call. Normally, we would
12646a70874dSMatt Arsenault // tidy the frame up after the call. However, here, we've laid out the
12656a70874dSMatt Arsenault // parameters so that when SP is reset, they will be in the correct
12666a70874dSMatt Arsenault // location.
12676a70874dSMatt Arsenault MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0);
12686a70874dSMatt Arsenault }
12696a70874dSMatt Arsenault
12706a70874dSMatt Arsenault // Now we can add the actual call instruction to the correct basic block.
12716a70874dSMatt Arsenault MIRBuilder.insertInstr(MIB);
12726a70874dSMatt Arsenault
12736a70874dSMatt Arsenault // If Callee is a reg, since it is used by a target specific
12746a70874dSMatt Arsenault // instruction, it must have a register class matching the
12756a70874dSMatt Arsenault // constraint of that instruction.
12766a70874dSMatt Arsenault
12776a70874dSMatt Arsenault // FIXME: We should define regbankselectable call instructions to handle
12786a70874dSMatt Arsenault // divergent call targets.
12796a70874dSMatt Arsenault if (MIB->getOperand(0).isReg()) {
12806a70874dSMatt Arsenault MIB->getOperand(0).setReg(constrainOperandRegClass(
12816a70874dSMatt Arsenault MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB,
12826a70874dSMatt Arsenault MIB->getDesc(), MIB->getOperand(0), 0));
12836a70874dSMatt Arsenault }
12846a70874dSMatt Arsenault
12856a70874dSMatt Arsenault MF.getFrameInfo().setHasTailCall();
12866a70874dSMatt Arsenault Info.LoweredTailCall = true;
12876a70874dSMatt Arsenault return true;
12886a70874dSMatt Arsenault }
12896a70874dSMatt Arsenault
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info) const129061f1f2a2SMatt Arsenault bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
129161f1f2a2SMatt Arsenault CallLoweringInfo &Info) const {
129261f1f2a2SMatt Arsenault if (Info.IsVarArg) {
129361f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n");
129461f1f2a2SMatt Arsenault return false;
129561f1f2a2SMatt Arsenault }
129661f1f2a2SMatt Arsenault
129761f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
129861f1f2a2SMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
129961f1f2a2SMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo();
130061f1f2a2SMatt Arsenault
130161f1f2a2SMatt Arsenault const Function &F = MF.getFunction();
130261f1f2a2SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
130361f1f2a2SMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>();
130461f1f2a2SMatt Arsenault const DataLayout &DL = F.getParent()->getDataLayout();
130561f1f2a2SMatt Arsenault
130661f1f2a2SMatt Arsenault SmallVector<ArgInfo, 8> OutArgs;
1307fd82cbcfSMatt Arsenault for (auto &OrigArg : Info.OrigArgs)
1308fd82cbcfSMatt Arsenault splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
130961f1f2a2SMatt Arsenault
13103231d2b5SMatt Arsenault SmallVector<ArgInfo, 8> InArgs;
13113231d2b5SMatt Arsenault if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy())
13123231d2b5SMatt Arsenault splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
13133231d2b5SMatt Arsenault
131461f1f2a2SMatt Arsenault // If we can lower as a tail call, do that instead.
13156a70874dSMatt Arsenault bool CanTailCallOpt =
13166a70874dSMatt Arsenault isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
131761f1f2a2SMatt Arsenault
131861f1f2a2SMatt Arsenault // We must emit a tail call if we have musttail.
131961f1f2a2SMatt Arsenault if (Info.IsMustTailCall && !CanTailCallOpt) {
132061f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
132161f1f2a2SMatt Arsenault return false;
132261f1f2a2SMatt Arsenault }
132361f1f2a2SMatt Arsenault
132499e8e173SMatt Arsenault Info.IsTailCall = CanTailCallOpt;
13256a70874dSMatt Arsenault if (CanTailCallOpt)
13266a70874dSMatt Arsenault return lowerTailCall(MIRBuilder, Info, OutArgs);
13276a70874dSMatt Arsenault
132861f1f2a2SMatt Arsenault // Find out which ABI gets to decide where things go.
132961f1f2a2SMatt Arsenault CCAssignFn *AssignFnFixed;
133061f1f2a2SMatt Arsenault CCAssignFn *AssignFnVarArg;
133161f1f2a2SMatt Arsenault std::tie(AssignFnFixed, AssignFnVarArg) =
133261f1f2a2SMatt Arsenault getAssignFnsForCC(Info.CallConv, TLI);
133361f1f2a2SMatt Arsenault
133461f1f2a2SMatt Arsenault MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP)
133561f1f2a2SMatt Arsenault .addImm(0)
133661f1f2a2SMatt Arsenault .addImm(0);
133761f1f2a2SMatt Arsenault
133861f1f2a2SMatt Arsenault // Create a temporarily-floating call instruction so we can add the implicit
133961f1f2a2SMatt Arsenault // uses of arg registers.
134061f1f2a2SMatt Arsenault unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
134161f1f2a2SMatt Arsenault
134261f1f2a2SMatt Arsenault auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
134361f1f2a2SMatt Arsenault MIB.addDef(TRI->getReturnAddressReg(MF));
134461f1f2a2SMatt Arsenault
134561f1f2a2SMatt Arsenault if (!addCallTargetOperands(MIB, MIRBuilder, Info))
134661f1f2a2SMatt Arsenault return false;
134761f1f2a2SMatt Arsenault
134861f1f2a2SMatt Arsenault // Tell the call which registers are clobbered.
134961f1f2a2SMatt Arsenault const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
135061f1f2a2SMatt Arsenault MIB.addRegMask(Mask);
135161f1f2a2SMatt Arsenault
135261f1f2a2SMatt Arsenault SmallVector<CCValAssign, 16> ArgLocs;
135361f1f2a2SMatt Arsenault CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext());
135461f1f2a2SMatt Arsenault
135561f1f2a2SMatt Arsenault // We could pass MIB and directly add the implicit uses to the call
135661f1f2a2SMatt Arsenault // now. However, as an aesthetic choice, place implicit argument operands
135761f1f2a2SMatt Arsenault // after the ordinary user argument registers.
135861f1f2a2SMatt Arsenault SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs;
135961f1f2a2SMatt Arsenault
136006b90175SMatt Arsenault if (Info.CallConv != CallingConv::AMDGPU_Gfx) {
136161f1f2a2SMatt Arsenault // With a fixed ABI, allocate fixed registers before user arguments.
136261f1f2a2SMatt Arsenault if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info))
136361f1f2a2SMatt Arsenault return false;
136461f1f2a2SMatt Arsenault }
136561f1f2a2SMatt Arsenault
136661f1f2a2SMatt Arsenault // Do the actual argument marshalling.
136761f1f2a2SMatt Arsenault SmallVector<Register, 8> PhysRegs;
136824e2e5dfSMatt Arsenault
136924e2e5dfSMatt Arsenault OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg);
137024e2e5dfSMatt Arsenault if (!determineAssignments(Assigner, OutArgs, CCInfo))
137124e2e5dfSMatt Arsenault return false;
137224e2e5dfSMatt Arsenault
137324e2e5dfSMatt Arsenault AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false);
137424e2e5dfSMatt Arsenault if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder))
137561f1f2a2SMatt Arsenault return false;
137661f1f2a2SMatt Arsenault
137761f1f2a2SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
137861f1f2a2SMatt Arsenault
13796a70874dSMatt Arsenault handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs);
138061f1f2a2SMatt Arsenault
138161f1f2a2SMatt Arsenault // Get a count of how many bytes are to be pushed on the stack.
138261f1f2a2SMatt Arsenault unsigned NumBytes = CCInfo.getNextStackOffset();
138361f1f2a2SMatt Arsenault
138461f1f2a2SMatt Arsenault // If Callee is a reg, since it is used by a target specific
138561f1f2a2SMatt Arsenault // instruction, it must have a register class matching the
138661f1f2a2SMatt Arsenault // constraint of that instruction.
138761f1f2a2SMatt Arsenault
138861f1f2a2SMatt Arsenault // FIXME: We should define regbankselectable call instructions to handle
138961f1f2a2SMatt Arsenault // divergent call targets.
139061f1f2a2SMatt Arsenault if (MIB->getOperand(1).isReg()) {
139161f1f2a2SMatt Arsenault MIB->getOperand(1).setReg(constrainOperandRegClass(
139261f1f2a2SMatt Arsenault MF, *TRI, MRI, *ST.getInstrInfo(),
139361f1f2a2SMatt Arsenault *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1),
139461f1f2a2SMatt Arsenault 1));
139561f1f2a2SMatt Arsenault }
139661f1f2a2SMatt Arsenault
1397d2b8fcffSMatt Arsenault // Now we can add the actual call instruction to the correct position.
1398d2b8fcffSMatt Arsenault MIRBuilder.insertInstr(MIB);
1399d2b8fcffSMatt Arsenault
140061f1f2a2SMatt Arsenault // Finally we can copy the returned value back into its virtual-register. In
140161f1f2a2SMatt Arsenault // symmetry with the arguments, the physical register must be an
140261f1f2a2SMatt Arsenault // implicit-define of the call instruction.
1403ae25a397SChristudasan Devadasan if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) {
140461f1f2a2SMatt Arsenault CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv,
140561f1f2a2SMatt Arsenault Info.IsVarArg);
14065efc3bfdSMatt Arsenault IncomingValueAssigner Assigner(RetAssignFn);
140724e2e5dfSMatt Arsenault CallReturnHandler Handler(MIRBuilder, MRI, MIB);
140824e2e5dfSMatt Arsenault if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder,
140924e2e5dfSMatt Arsenault Info.CallConv, Info.IsVarArg))
141061f1f2a2SMatt Arsenault return false;
141161f1f2a2SMatt Arsenault }
141261f1f2a2SMatt Arsenault
141361f1f2a2SMatt Arsenault uint64_t CalleePopBytes = NumBytes;
14143231d2b5SMatt Arsenault
14153231d2b5SMatt Arsenault MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN)
14163231d2b5SMatt Arsenault .addImm(0)
141761f1f2a2SMatt Arsenault .addImm(CalleePopBytes);
141861f1f2a2SMatt Arsenault
14193231d2b5SMatt Arsenault if (!Info.CanLowerReturn) {
14203231d2b5SMatt Arsenault insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
14213231d2b5SMatt Arsenault Info.DemoteRegister, Info.DemoteStackIndex);
14223231d2b5SMatt Arsenault }
14233231d2b5SMatt Arsenault
142461f1f2a2SMatt Arsenault return true;
142561f1f2a2SMatt Arsenault }
1426