1d8ea85acSTom Stellard //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2000c5af3STom Stellard // 32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information. 52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6000c5af3STom Stellard // 7000c5af3STom Stellard //===----------------------------------------------------------------------===// 8000c5af3STom Stellard /// 9000c5af3STom Stellard /// \file 10000c5af3STom Stellard /// This file implements the lowering of LLVM calls to machine code calls for 11000c5af3STom Stellard /// GlobalISel. 12000c5af3STom Stellard /// 13000c5af3STom Stellard //===----------------------------------------------------------------------===// 14000c5af3STom Stellard 15000c5af3STom Stellard #include "AMDGPUCallLowering.h" 16ca16621bSTom Stellard #include "AMDGPU.h" 1761f1f2a2SMatt Arsenault #include "AMDGPULegalizerInfo.h" 18a162048aSMatt Arsenault #include "AMDGPUTargetMachine.h" 19ca16621bSTom Stellard #include "SIMachineFunctionInfo.h" 206bda14b3SChandler Carruth #include "SIRegisterInfo.h" 21206b9927STom Stellard #include "llvm/CodeGen/Analysis.h" 22ae25a397SChristudasan Devadasan #include "llvm/CodeGen/FunctionLoweringInfo.h" 23000c5af3STom Stellard #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 246a87e9b0Sdfukalov #include "llvm/IR/IntrinsicsAMDGPU.h" 25000c5af3STom Stellard 2661f1f2a2SMatt Arsenault #define DEBUG_TYPE "amdgpu-call-lowering" 2761f1f2a2SMatt Arsenault 28000c5af3STom Stellard using namespace llvm; 29000c5af3STom Stellard 30206b9927STom Stellard namespace { 31206b9927STom Stellard 3278dcff48SMatt Arsenault /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. 3378dcff48SMatt Arsenault static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, 3478dcff48SMatt Arsenault Register ValVReg, CCValAssign &VA) { 3561f1f2a2SMatt Arsenault if (VA.getLocVT().getSizeInBits() < 32) { 3661f1f2a2SMatt Arsenault // 16-bit types are reported as legal for 32-bit registers. We need to 3761f1f2a2SMatt Arsenault // extend and do a 32-bit copy to avoid the verifier complaining about it. 3878dcff48SMatt Arsenault return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 3961f1f2a2SMatt Arsenault } 4061f1f2a2SMatt Arsenault 4178dcff48SMatt Arsenault return Handler.extendRegister(ValVReg, VA); 4261f1f2a2SMatt Arsenault } 4361f1f2a2SMatt Arsenault 4478dcff48SMatt Arsenault struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 450c92bfa4SMatt Arsenault AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4624e2e5dfSMatt Arsenault MachineInstrBuilder MIB) 4724e2e5dfSMatt Arsenault : OutgoingValueHandler(B, MRI), MIB(MIB) {} 48206b9927STom Stellard 49206b9927STom Stellard MachineInstrBuilder MIB; 50206b9927STom Stellard 51faeaedf8SMatt Arsenault Register getStackAddress(uint64_t Size, int64_t Offset, 526b76d828SMatt Arsenault MachinePointerInfo &MPO, 536b76d828SMatt Arsenault ISD::ArgFlagsTy Flags) override { 54206b9927STom Stellard llvm_unreachable("not implemented"); 55206b9927STom Stellard } 56206b9927STom Stellard 5799c7e918SMatt Arsenault void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 58206b9927STom Stellard MachinePointerInfo &MPO, CCValAssign &VA) override { 59206b9927STom Stellard llvm_unreachable("not implemented"); 60206b9927STom Stellard } 61206b9927STom Stellard 62faeaedf8SMatt Arsenault void assignValueToReg(Register ValVReg, Register PhysReg, 63206b9927STom Stellard CCValAssign &VA) override { 6478dcff48SMatt Arsenault Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 65a9ea8a9aSMatt Arsenault 6667cfbec7SMatt Arsenault // If this is a scalar return, insert a readfirstlane just in case the value 6767cfbec7SMatt Arsenault // ends up in a VGPR. 6867cfbec7SMatt Arsenault // FIXME: Assert this is a shader return. 6967cfbec7SMatt Arsenault const SIRegisterInfo *TRI 7067cfbec7SMatt Arsenault = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 7167cfbec7SMatt Arsenault if (TRI->isSGPRReg(MRI, PhysReg)) { 7267cfbec7SMatt Arsenault auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 7367cfbec7SMatt Arsenault {MRI.getType(ExtReg)}, false) 7467cfbec7SMatt Arsenault .addReg(ExtReg); 7567cfbec7SMatt Arsenault ExtReg = ToSGPR.getReg(0); 7667cfbec7SMatt Arsenault } 7767cfbec7SMatt Arsenault 78a9ea8a9aSMatt Arsenault MIRBuilder.buildCopy(PhysReg, ExtReg); 79a9ea8a9aSMatt Arsenault MIB.addUse(PhysReg, RegState::Implicit); 80206b9927STom Stellard } 81206b9927STom Stellard }; 82206b9927STom Stellard 8378dcff48SMatt Arsenault struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { 84fecf43ebSMatt Arsenault uint64_t StackUsed = 0; 85fecf43ebSMatt Arsenault 8624e2e5dfSMatt Arsenault AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 8724e2e5dfSMatt Arsenault : IncomingValueHandler(B, MRI) {} 88fecf43ebSMatt Arsenault 89fecf43ebSMatt Arsenault Register getStackAddress(uint64_t Size, int64_t Offset, 906b76d828SMatt Arsenault MachinePointerInfo &MPO, 916b76d828SMatt Arsenault ISD::ArgFlagsTy Flags) override { 92fecf43ebSMatt Arsenault auto &MFI = MIRBuilder.getMF().getFrameInfo(); 936b76d828SMatt Arsenault 946b76d828SMatt Arsenault // Byval is assumed to be writable memory, but other stack passed arguments 956b76d828SMatt Arsenault // are not. 966b76d828SMatt Arsenault const bool IsImmutable = !Flags.isByVal(); 976b76d828SMatt Arsenault int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 98fecf43ebSMatt Arsenault MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 992a1b5af2SJay Foad auto AddrReg = MIRBuilder.buildFrameIndex( 1002a1b5af2SJay Foad LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 101fecf43ebSMatt Arsenault StackUsed = std::max(StackUsed, Size + Offset); 1022a1b5af2SJay Foad return AddrReg.getReg(0); 103fecf43ebSMatt Arsenault } 104fecf43ebSMatt Arsenault 105fecf43ebSMatt Arsenault void assignValueToReg(Register ValVReg, Register PhysReg, 106fecf43ebSMatt Arsenault CCValAssign &VA) override { 107fecf43ebSMatt Arsenault markPhysRegUsed(PhysReg); 108fecf43ebSMatt Arsenault 109fecf43ebSMatt Arsenault if (VA.getLocVT().getSizeInBits() < 32) { 110fecf43ebSMatt Arsenault // 16-bit types are reported as legal for 32-bit registers. We need to do 111fecf43ebSMatt Arsenault // a 32-bit copy, and truncate to avoid the verifier complaining about it. 112fecf43ebSMatt Arsenault auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 11378dcff48SMatt Arsenault 11478dcff48SMatt Arsenault // If we have signext/zeroext, it applies to the whole 32-bit register 11578dcff48SMatt Arsenault // before truncation. 11678dcff48SMatt Arsenault auto Extended = 11778dcff48SMatt Arsenault buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); 11878dcff48SMatt Arsenault MIRBuilder.buildTrunc(ValVReg, Extended); 119fecf43ebSMatt Arsenault return; 120fecf43ebSMatt Arsenault } 121fecf43ebSMatt Arsenault 12278dcff48SMatt Arsenault IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 123fecf43ebSMatt Arsenault } 124fecf43ebSMatt Arsenault 12599c7e918SMatt Arsenault void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 126fecf43ebSMatt Arsenault MachinePointerInfo &MPO, CCValAssign &VA) override { 127fb0c35faSMatt Arsenault MachineFunction &MF = MIRBuilder.getMF(); 128fb0c35faSMatt Arsenault 129fb0c35faSMatt Arsenault auto MMO = MF.getMachineMemOperand( 13099c7e918SMatt Arsenault MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, 1310de874adSGuillaume Chatelet inferAlignFromPtrInfo(MF, MPO)); 132fecf43ebSMatt Arsenault MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 133fecf43ebSMatt Arsenault } 134fecf43ebSMatt Arsenault 135fecf43ebSMatt Arsenault /// How the physical register gets marked varies between formal 136fecf43ebSMatt Arsenault /// parameters (it's a basic-block live-in), and a call instruction 137fecf43ebSMatt Arsenault /// (it's an implicit-def of the BL). 138fecf43ebSMatt Arsenault virtual void markPhysRegUsed(unsigned PhysReg) = 0; 139fecf43ebSMatt Arsenault }; 140fecf43ebSMatt Arsenault 1410c92bfa4SMatt Arsenault struct FormalArgHandler : public AMDGPUIncomingArgHandler { 14224e2e5dfSMatt Arsenault FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 14324e2e5dfSMatt Arsenault : AMDGPUIncomingArgHandler(B, MRI) {} 144fecf43ebSMatt Arsenault 145fecf43ebSMatt Arsenault void markPhysRegUsed(unsigned PhysReg) override { 146fecf43ebSMatt Arsenault MIRBuilder.getMBB().addLiveIn(PhysReg); 147fecf43ebSMatt Arsenault } 148fecf43ebSMatt Arsenault }; 149fecf43ebSMatt Arsenault 1500c92bfa4SMatt Arsenault struct CallReturnHandler : public AMDGPUIncomingArgHandler { 15161f1f2a2SMatt Arsenault CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 15224e2e5dfSMatt Arsenault MachineInstrBuilder MIB) 15324e2e5dfSMatt Arsenault : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} 15461f1f2a2SMatt Arsenault 15561f1f2a2SMatt Arsenault void markPhysRegUsed(unsigned PhysReg) override { 15661f1f2a2SMatt Arsenault MIB.addDef(PhysReg, RegState::Implicit); 15761f1f2a2SMatt Arsenault } 15861f1f2a2SMatt Arsenault 15961f1f2a2SMatt Arsenault MachineInstrBuilder MIB; 16061f1f2a2SMatt Arsenault }; 16161f1f2a2SMatt Arsenault 16278dcff48SMatt Arsenault struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { 16361f1f2a2SMatt Arsenault /// For tail calls, the byte offset of the call's argument area from the 16461f1f2a2SMatt Arsenault /// callee's. Unused elsewhere. 16561f1f2a2SMatt Arsenault int FPDiff; 16661f1f2a2SMatt Arsenault 16761f1f2a2SMatt Arsenault // Cache the SP register vreg if we need it more than once in this call site. 16861f1f2a2SMatt Arsenault Register SPReg; 16961f1f2a2SMatt Arsenault 17061f1f2a2SMatt Arsenault bool IsTailCall; 17161f1f2a2SMatt Arsenault 1720c92bfa4SMatt Arsenault AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, 1730c92bfa4SMatt Arsenault MachineRegisterInfo &MRI, MachineInstrBuilder MIB, 1740c92bfa4SMatt Arsenault bool IsTailCall = false, int FPDiff = 0) 17524e2e5dfSMatt Arsenault : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), 17624e2e5dfSMatt Arsenault IsTailCall(IsTailCall) {} 17761f1f2a2SMatt Arsenault 17861f1f2a2SMatt Arsenault Register getStackAddress(uint64_t Size, int64_t Offset, 1796b76d828SMatt Arsenault MachinePointerInfo &MPO, 1806b76d828SMatt Arsenault ISD::ArgFlagsTy Flags) override { 18161f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF(); 18261f1f2a2SMatt Arsenault const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); 18361f1f2a2SMatt Arsenault const LLT S32 = LLT::scalar(32); 18461f1f2a2SMatt Arsenault 18561f1f2a2SMatt Arsenault if (IsTailCall) { 1866a70874dSMatt Arsenault Offset += FPDiff; 1876a70874dSMatt Arsenault int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); 1886a70874dSMatt Arsenault auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); 1896a70874dSMatt Arsenault MPO = MachinePointerInfo::getFixedStack(MF, FI); 1906a70874dSMatt Arsenault return FIReg.getReg(0); 19161f1f2a2SMatt Arsenault } 19261f1f2a2SMatt Arsenault 19361f1f2a2SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 19461f1f2a2SMatt Arsenault 19561f1f2a2SMatt Arsenault if (!SPReg) 19661f1f2a2SMatt Arsenault SPReg = MIRBuilder.buildCopy(PtrTy, MFI->getStackPtrOffsetReg()).getReg(0); 19761f1f2a2SMatt Arsenault 19861f1f2a2SMatt Arsenault auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); 19961f1f2a2SMatt Arsenault 20061f1f2a2SMatt Arsenault auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); 20161f1f2a2SMatt Arsenault MPO = MachinePointerInfo::getStack(MF, Offset); 20261f1f2a2SMatt Arsenault return AddrReg.getReg(0); 20361f1f2a2SMatt Arsenault } 20461f1f2a2SMatt Arsenault 20561f1f2a2SMatt Arsenault void assignValueToReg(Register ValVReg, Register PhysReg, 20661f1f2a2SMatt Arsenault CCValAssign &VA) override { 20761f1f2a2SMatt Arsenault MIB.addUse(PhysReg, RegState::Implicit); 20878dcff48SMatt Arsenault Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 20961f1f2a2SMatt Arsenault MIRBuilder.buildCopy(PhysReg, ExtReg); 21061f1f2a2SMatt Arsenault } 21161f1f2a2SMatt Arsenault 21299c7e918SMatt Arsenault void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 21361f1f2a2SMatt Arsenault MachinePointerInfo &MPO, CCValAssign &VA) override { 21461f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF(); 21561f1f2a2SMatt Arsenault uint64_t LocMemOffset = VA.getLocMemOffset(); 21661f1f2a2SMatt Arsenault const auto &ST = MF.getSubtarget<GCNSubtarget>(); 21761f1f2a2SMatt Arsenault 21861f1f2a2SMatt Arsenault auto MMO = MF.getMachineMemOperand( 21999c7e918SMatt Arsenault MPO, MachineMemOperand::MOStore, MemTy, 22061f1f2a2SMatt Arsenault commonAlignment(ST.getStackAlignment(), LocMemOffset)); 22161f1f2a2SMatt Arsenault MIRBuilder.buildStore(ValVReg, Addr, *MMO); 22261f1f2a2SMatt Arsenault } 22361f1f2a2SMatt Arsenault 224392e0fcfSMatt Arsenault void assignValueToAddress(const CallLowering::ArgInfo &Arg, 22599c7e918SMatt Arsenault unsigned ValRegIndex, Register Addr, LLT MemTy, 22699c7e918SMatt Arsenault MachinePointerInfo &MPO, CCValAssign &VA) override { 22761f1f2a2SMatt Arsenault Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 228392e0fcfSMatt Arsenault ? extendRegister(Arg.Regs[ValRegIndex], VA) 229392e0fcfSMatt Arsenault : Arg.Regs[ValRegIndex]; 23099c7e918SMatt Arsenault assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); 23161f1f2a2SMatt Arsenault } 23261f1f2a2SMatt Arsenault }; 233206b9927STom Stellard } 234206b9927STom Stellard 235000c5af3STom Stellard AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 2360da6350dSMatt Arsenault : CallLowering(&TLI) { 237000c5af3STom Stellard } 238000c5af3STom Stellard 239*dc6e8dfdSJacob Lambert // FIXME: Compatibility shim 240eb416277SMatt Arsenault static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 241eb416277SMatt Arsenault switch (MIOpc) { 242eb416277SMatt Arsenault case TargetOpcode::G_SEXT: 243eb416277SMatt Arsenault return ISD::SIGN_EXTEND; 244eb416277SMatt Arsenault case TargetOpcode::G_ZEXT: 245eb416277SMatt Arsenault return ISD::ZERO_EXTEND; 246eb416277SMatt Arsenault case TargetOpcode::G_ANYEXT: 247eb416277SMatt Arsenault return ISD::ANY_EXTEND; 248eb416277SMatt Arsenault default: 249eb416277SMatt Arsenault llvm_unreachable("not an extend opcode"); 250eb416277SMatt Arsenault } 251eb416277SMatt Arsenault } 252eb416277SMatt Arsenault 253ae25a397SChristudasan Devadasan bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, 254ae25a397SChristudasan Devadasan CallingConv::ID CallConv, 255ae25a397SChristudasan Devadasan SmallVectorImpl<BaseArgInfo> &Outs, 256ae25a397SChristudasan Devadasan bool IsVarArg) const { 257ae25a397SChristudasan Devadasan // For shaders. Vector types should be explicitly handled by CC. 258ae25a397SChristudasan Devadasan if (AMDGPU::isEntryFunctionCC(CallConv)) 259ae25a397SChristudasan Devadasan return true; 260ae25a397SChristudasan Devadasan 261ae25a397SChristudasan Devadasan SmallVector<CCValAssign, 16> ArgLocs; 262ae25a397SChristudasan Devadasan const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 263ae25a397SChristudasan Devadasan CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 264ae25a397SChristudasan Devadasan MF.getFunction().getContext()); 265ae25a397SChristudasan Devadasan 266ae25a397SChristudasan Devadasan return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); 267ae25a397SChristudasan Devadasan } 268ae25a397SChristudasan Devadasan 269a9ea8a9aSMatt Arsenault /// Lower the return value for the already existing \p Ret. This assumes that 27006c8cb03SAustin Kerbow /// \p B's insertion point is correct. 27106c8cb03SAustin Kerbow bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 272a9ea8a9aSMatt Arsenault const Value *Val, ArrayRef<Register> VRegs, 273a9ea8a9aSMatt Arsenault MachineInstrBuilder &Ret) const { 274a9ea8a9aSMatt Arsenault if (!Val) 275a9ea8a9aSMatt Arsenault return true; 276a9ea8a9aSMatt Arsenault 27706c8cb03SAustin Kerbow auto &MF = B.getMF(); 278a9ea8a9aSMatt Arsenault const auto &F = MF.getFunction(); 279a9ea8a9aSMatt Arsenault const DataLayout &DL = MF.getDataLayout(); 280eb416277SMatt Arsenault MachineRegisterInfo *MRI = B.getMRI(); 2816b7d5a92SMatt Arsenault LLVMContext &Ctx = F.getContext(); 282a9ea8a9aSMatt Arsenault 283a9ea8a9aSMatt Arsenault CallingConv::ID CC = F.getCallingConv(); 284a9ea8a9aSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 285a9ea8a9aSMatt Arsenault 2866b7d5a92SMatt Arsenault SmallVector<EVT, 8> SplitEVTs; 2876b7d5a92SMatt Arsenault ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 2886b7d5a92SMatt Arsenault assert(VRegs.size() == SplitEVTs.size() && 2896b7d5a92SMatt Arsenault "For each split Type there should be exactly one VReg."); 290a9ea8a9aSMatt Arsenault 2916b7d5a92SMatt Arsenault SmallVector<ArgInfo, 8> SplitRetInfos; 2926b7d5a92SMatt Arsenault 2936b7d5a92SMatt Arsenault for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 2946b7d5a92SMatt Arsenault EVT VT = SplitEVTs[i]; 2956b7d5a92SMatt Arsenault Register Reg = VRegs[i]; 2969b057f64SMatt Arsenault ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); 2976b7d5a92SMatt Arsenault setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 2986b7d5a92SMatt Arsenault 2996b7d5a92SMatt Arsenault if (VT.isScalarInteger()) { 3006b7d5a92SMatt Arsenault unsigned ExtendOp = TargetOpcode::G_ANYEXT; 3016b7d5a92SMatt Arsenault if (RetInfo.Flags[0].isSExt()) { 3026b7d5a92SMatt Arsenault assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 3036b7d5a92SMatt Arsenault ExtendOp = TargetOpcode::G_SEXT; 3046b7d5a92SMatt Arsenault } else if (RetInfo.Flags[0].isZExt()) { 3056b7d5a92SMatt Arsenault assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 3066b7d5a92SMatt Arsenault ExtendOp = TargetOpcode::G_ZEXT; 3076b7d5a92SMatt Arsenault } 3086b7d5a92SMatt Arsenault 3096b7d5a92SMatt Arsenault EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 3106b7d5a92SMatt Arsenault extOpcodeToISDExtOpcode(ExtendOp)); 3116b7d5a92SMatt Arsenault if (ExtVT != VT) { 3126b7d5a92SMatt Arsenault RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); 3136b7d5a92SMatt Arsenault LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); 3146b7d5a92SMatt Arsenault Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); 3156b7d5a92SMatt Arsenault } 3166b7d5a92SMatt Arsenault } 3176b7d5a92SMatt Arsenault 3186b7d5a92SMatt Arsenault if (Reg != RetInfo.Regs[0]) { 3196b7d5a92SMatt Arsenault RetInfo.Regs[0] = Reg; 3206b7d5a92SMatt Arsenault // Reset the arg flags after modifying Reg. 3216b7d5a92SMatt Arsenault setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 3226b7d5a92SMatt Arsenault } 3236b7d5a92SMatt Arsenault 324fd82cbcfSMatt Arsenault splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); 3256b7d5a92SMatt Arsenault } 326a9ea8a9aSMatt Arsenault 327a9ea8a9aSMatt Arsenault CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 32824e2e5dfSMatt Arsenault 32924e2e5dfSMatt Arsenault OutgoingValueAssigner Assigner(AssignFn); 33024e2e5dfSMatt Arsenault AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); 33124e2e5dfSMatt Arsenault return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, 33224e2e5dfSMatt Arsenault CC, F.isVarArg()); 333a9ea8a9aSMatt Arsenault } 334a9ea8a9aSMatt Arsenault 335d68458bdSChristudasan Devadasan bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, 336d68458bdSChristudasan Devadasan ArrayRef<Register> VRegs, 337d68458bdSChristudasan Devadasan FunctionLoweringInfo &FLI) const { 338206b9927STom Stellard 33906c8cb03SAustin Kerbow MachineFunction &MF = B.getMF(); 340206b9927STom Stellard MachineRegisterInfo &MRI = MF.getRegInfo(); 341206b9927STom Stellard SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 342206b9927STom Stellard MFI->setIfReturnsVoid(!Val); 343206b9927STom Stellard 344a9ea8a9aSMatt Arsenault assert(!Val == VRegs.empty() && "Return value without a vreg"); 345a9ea8a9aSMatt Arsenault 34606c8cb03SAustin Kerbow CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 347a9ea8a9aSMatt Arsenault const bool IsShader = AMDGPU::isShader(CC); 348a022b1ccSSebastian Neubauer const bool IsWaveEnd = 349a022b1ccSSebastian Neubauer (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); 350a9ea8a9aSMatt Arsenault if (IsWaveEnd) { 35106c8cb03SAustin Kerbow B.buildInstr(AMDGPU::S_ENDPGM) 352a9ea8a9aSMatt Arsenault .addImm(0); 353206b9927STom Stellard return true; 354206b9927STom Stellard } 355206b9927STom Stellard 356eb416277SMatt Arsenault auto const &ST = MF.getSubtarget<GCNSubtarget>(); 357206b9927STom Stellard 358711556e6SMichael Liao unsigned ReturnOpc = 359711556e6SMichael Liao IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 360257882ffSTom Stellard 36106c8cb03SAustin Kerbow auto Ret = B.buildInstrNoInsert(ReturnOpc); 362a9ea8a9aSMatt Arsenault Register ReturnAddrVReg; 363a9ea8a9aSMatt Arsenault if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 364a9ea8a9aSMatt Arsenault ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 365a9ea8a9aSMatt Arsenault Ret.addUse(ReturnAddrVReg); 366206b9927STom Stellard } 367206b9927STom Stellard 368ae25a397SChristudasan Devadasan if (!FLI.CanLowerReturn) 369ae25a397SChristudasan Devadasan insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); 370ae25a397SChristudasan Devadasan else if (!lowerReturnVal(B, Val, VRegs, Ret)) 371a9ea8a9aSMatt Arsenault return false; 372a9ea8a9aSMatt Arsenault 373a9ea8a9aSMatt Arsenault if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 374a9ea8a9aSMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo(); 375a9ea8a9aSMatt Arsenault Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 376a9ea8a9aSMatt Arsenault &AMDGPU::SGPR_64RegClass); 37706c8cb03SAustin Kerbow B.buildCopy(ReturnAddrVReg, LiveInReturn); 378a9ea8a9aSMatt Arsenault } 379a9ea8a9aSMatt Arsenault 380a9ea8a9aSMatt Arsenault // TODO: Handle CalleeSavedRegsViaCopy. 381a9ea8a9aSMatt Arsenault 38206c8cb03SAustin Kerbow B.insertInstr(Ret); 383000c5af3STom Stellard return true; 384000c5af3STom Stellard } 385000c5af3STom Stellard 3861168119cSMatt Arsenault void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, 38729f30379SMatt Arsenault uint64_t Offset) const { 38806c8cb03SAustin Kerbow MachineFunction &MF = B.getMF(); 3898623e8d8SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 390ca16621bSTom Stellard MachineRegisterInfo &MRI = MF.getRegInfo(); 391faeaedf8SMatt Arsenault Register KernArgSegmentPtr = 3928623e8d8SMatt Arsenault MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 393faeaedf8SMatt Arsenault Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 394ca16621bSTom Stellard 3952a1b5af2SJay Foad auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 396ca16621bSTom Stellard 3971168119cSMatt Arsenault B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 398ca16621bSTom Stellard } 399ca16621bSTom Stellard 40021a0ef8dSMatt Arsenault void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, 40121a0ef8dSMatt Arsenault uint64_t Offset, 40221a0ef8dSMatt Arsenault Align Alignment) const { 40306c8cb03SAustin Kerbow MachineFunction &MF = B.getMF(); 404f1caa283SMatthias Braun const Function &F = MF.getFunction(); 405ca16621bSTom Stellard const DataLayout &DL = F.getParent()->getDataLayout(); 406c7c05b0cSJay Foad MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 4071168119cSMatt Arsenault 4081168119cSMatt Arsenault LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 40921a0ef8dSMatt Arsenault 41021a0ef8dSMatt Arsenault SmallVector<ArgInfo, 32> SplitArgs; 41121a0ef8dSMatt Arsenault SmallVector<uint64_t> FieldOffsets; 41221a0ef8dSMatt Arsenault splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); 41321a0ef8dSMatt Arsenault 41421a0ef8dSMatt Arsenault unsigned Idx = 0; 41521a0ef8dSMatt Arsenault for (ArgInfo &SplitArg : SplitArgs) { 4161168119cSMatt Arsenault Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); 41721a0ef8dSMatt Arsenault lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); 41821a0ef8dSMatt Arsenault 4193ceb9229SMatt Arsenault LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); 4203ceb9229SMatt Arsenault if (SplitArg.Flags[0].isPointer()) { 4213ceb9229SMatt Arsenault // Compensate for losing pointeriness in splitValueTypes. 4223ceb9229SMatt Arsenault LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), 4233ceb9229SMatt Arsenault ArgTy.getScalarSizeInBits()); 4243ceb9229SMatt Arsenault ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) 4253ceb9229SMatt Arsenault : PtrTy; 4263ceb9229SMatt Arsenault } 427ca16621bSTom Stellard 4280de874adSGuillaume Chatelet MachineMemOperand *MMO = MF.getMachineMemOperand( 4290de874adSGuillaume Chatelet PtrInfo, 4300de874adSGuillaume Chatelet MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 431ca16621bSTom Stellard MachineMemOperand::MOInvariant, 43221a0ef8dSMatt Arsenault ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); 433ca16621bSTom Stellard 43421a0ef8dSMatt Arsenault assert(SplitArg.Regs.size() == 1); 43521a0ef8dSMatt Arsenault 43621a0ef8dSMatt Arsenault B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); 43721a0ef8dSMatt Arsenault ++Idx; 43821a0ef8dSMatt Arsenault } 439ca16621bSTom Stellard } 440ca16621bSTom Stellard 441bae3636fSMatt Arsenault // Allocate special inputs passed in user SGPRs. 442bae3636fSMatt Arsenault static void allocateHSAUserSGPRs(CCState &CCInfo, 44306c8cb03SAustin Kerbow MachineIRBuilder &B, 444bae3636fSMatt Arsenault MachineFunction &MF, 445bae3636fSMatt Arsenault const SIRegisterInfo &TRI, 446bae3636fSMatt Arsenault SIMachineFunctionInfo &Info) { 447bae3636fSMatt Arsenault // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 448bae3636fSMatt Arsenault if (Info.hasPrivateSegmentBuffer()) { 4494dad4914SMatt Arsenault Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 450bae3636fSMatt Arsenault MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 451bae3636fSMatt Arsenault CCInfo.AllocateReg(PrivateSegmentBufferReg); 452bae3636fSMatt Arsenault } 453bae3636fSMatt Arsenault 454bae3636fSMatt Arsenault if (Info.hasDispatchPtr()) { 4554dad4914SMatt Arsenault Register DispatchPtrReg = Info.addDispatchPtr(TRI); 456bae3636fSMatt Arsenault MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 457bae3636fSMatt Arsenault CCInfo.AllocateReg(DispatchPtrReg); 458bae3636fSMatt Arsenault } 459bae3636fSMatt Arsenault 460bae3636fSMatt Arsenault if (Info.hasQueuePtr()) { 4614dad4914SMatt Arsenault Register QueuePtrReg = Info.addQueuePtr(TRI); 462bae3636fSMatt Arsenault MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 463bae3636fSMatt Arsenault CCInfo.AllocateReg(QueuePtrReg); 464bae3636fSMatt Arsenault } 465bae3636fSMatt Arsenault 466bae3636fSMatt Arsenault if (Info.hasKernargSegmentPtr()) { 467bae3636fSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo(); 468bae3636fSMatt Arsenault Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 469bae3636fSMatt Arsenault const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 470bae3636fSMatt Arsenault Register VReg = MRI.createGenericVirtualRegister(P4); 471bae3636fSMatt Arsenault MRI.addLiveIn(InputPtrReg, VReg); 47206c8cb03SAustin Kerbow B.getMBB().addLiveIn(InputPtrReg); 47306c8cb03SAustin Kerbow B.buildCopy(VReg, InputPtrReg); 474bae3636fSMatt Arsenault CCInfo.AllocateReg(InputPtrReg); 475bae3636fSMatt Arsenault } 476bae3636fSMatt Arsenault 477bae3636fSMatt Arsenault if (Info.hasDispatchID()) { 4784dad4914SMatt Arsenault Register DispatchIDReg = Info.addDispatchID(TRI); 479bae3636fSMatt Arsenault MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 480bae3636fSMatt Arsenault CCInfo.AllocateReg(DispatchIDReg); 481bae3636fSMatt Arsenault } 482bae3636fSMatt Arsenault 483bae3636fSMatt Arsenault if (Info.hasFlatScratchInit()) { 4844dad4914SMatt Arsenault Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 485bae3636fSMatt Arsenault MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 486bae3636fSMatt Arsenault CCInfo.AllocateReg(FlatScratchInitReg); 487bae3636fSMatt Arsenault } 488bae3636fSMatt Arsenault 489bae3636fSMatt Arsenault // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 490bae3636fSMatt Arsenault // these from the dispatch pointer. 491bae3636fSMatt Arsenault } 492bae3636fSMatt Arsenault 493b725d273SMatt Arsenault bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 49406c8cb03SAustin Kerbow MachineIRBuilder &B, const Function &F, 495c3dbe239SDiana Picus ArrayRef<ArrayRef<Register>> VRegs) const { 49606c8cb03SAustin Kerbow MachineFunction &MF = B.getMF(); 4975bfbae5cSTom Stellard const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 498ca16621bSTom Stellard MachineRegisterInfo &MRI = MF.getRegInfo(); 499ca16621bSTom Stellard SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 500fecf43ebSMatt Arsenault const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 501fecf43ebSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 502ca16621bSTom Stellard const DataLayout &DL = F.getParent()->getDataLayout(); 503ca16621bSTom Stellard 50413e49dceSJon Chesterfield Info->allocateModuleLDSGlobal(F.getParent()); 50513e49dceSJon Chesterfield 506ca16621bSTom Stellard SmallVector<CCValAssign, 16> ArgLocs; 507ca16621bSTom Stellard CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 508ca16621bSTom Stellard 50906c8cb03SAustin Kerbow allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 510bae3636fSMatt Arsenault 51129f30379SMatt Arsenault unsigned i = 0; 5120de874adSGuillaume Chatelet const Align KernArgBaseAlign(16); 51329f30379SMatt Arsenault const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 51429f30379SMatt Arsenault uint64_t ExplicitArgOffset = 0; 51529f30379SMatt Arsenault 51629f30379SMatt Arsenault // TODO: Align down to dword alignment and extract bits for extending loads. 51729f30379SMatt Arsenault for (auto &Arg : F.args()) { 5181168119cSMatt Arsenault const bool IsByRef = Arg.hasByRefAttr(); 5191168119cSMatt Arsenault Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 52029f30379SMatt Arsenault unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 52129f30379SMatt Arsenault if (AllocSize == 0) 52229f30379SMatt Arsenault continue; 52329f30379SMatt Arsenault 5241168119cSMatt Arsenault MaybeAlign ABIAlign = IsByRef ? Arg.getParamAlign() : None; 5251168119cSMatt Arsenault if (!ABIAlign) 5261168119cSMatt Arsenault ABIAlign = DL.getABITypeAlign(ArgTy); 52729f30379SMatt Arsenault 52829f30379SMatt Arsenault uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 52929f30379SMatt Arsenault ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 53029f30379SMatt Arsenault 53142bb4814SMatt Arsenault if (Arg.use_empty()) { 53242bb4814SMatt Arsenault ++i; 533a5b9ad7eSMatt Arsenault continue; 53442bb4814SMatt Arsenault } 535a5b9ad7eSMatt Arsenault 5361168119cSMatt Arsenault Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 5371168119cSMatt Arsenault 5381168119cSMatt Arsenault if (IsByRef) { 5391168119cSMatt Arsenault unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); 5401168119cSMatt Arsenault 5411168119cSMatt Arsenault assert(VRegs[i].size() == 1 && 5421168119cSMatt Arsenault "expected only one register for byval pointers"); 5431168119cSMatt Arsenault if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { 54421a0ef8dSMatt Arsenault lowerParameterPtr(VRegs[i][0], B, ArgOffset); 5451168119cSMatt Arsenault } else { 5461168119cSMatt Arsenault const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 5471168119cSMatt Arsenault Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); 54821a0ef8dSMatt Arsenault lowerParameterPtr(PtrReg, B, ArgOffset); 5491168119cSMatt Arsenault 5501168119cSMatt Arsenault B.buildAddrSpaceCast(VRegs[i][0], PtrReg); 5511168119cSMatt Arsenault } 5521168119cSMatt Arsenault } else { 55321a0ef8dSMatt Arsenault ArgInfo OrigArg(VRegs[i], Arg, i); 55421a0ef8dSMatt Arsenault const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; 55521a0ef8dSMatt Arsenault setArgFlags(OrigArg, OrigArgIdx, DL, F); 55621a0ef8dSMatt Arsenault lowerParameter(B, OrigArg, ArgOffset, Alignment); 5571168119cSMatt Arsenault } 5581168119cSMatt Arsenault 55929f30379SMatt Arsenault ++i; 56029f30379SMatt Arsenault } 56129f30379SMatt Arsenault 562fecf43ebSMatt Arsenault TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 563fecf43ebSMatt Arsenault TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 56429f30379SMatt Arsenault return true; 56529f30379SMatt Arsenault } 56629f30379SMatt Arsenault 567b725d273SMatt Arsenault bool AMDGPUCallLowering::lowerFormalArguments( 568d68458bdSChristudasan Devadasan MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, 569d68458bdSChristudasan Devadasan FunctionLoweringInfo &FLI) const { 570fecf43ebSMatt Arsenault CallingConv::ID CC = F.getCallingConv(); 571fecf43ebSMatt Arsenault 572b725d273SMatt Arsenault // The infrastructure for normal calling convention lowering is essentially 573b725d273SMatt Arsenault // useless for kernels. We want to avoid any kind of legalization or argument 574b725d273SMatt Arsenault // splitting. 575fecf43ebSMatt Arsenault if (CC == CallingConv::AMDGPU_KERNEL) 57606c8cb03SAustin Kerbow return lowerFormalArgumentsKernel(B, F, VRegs); 577b725d273SMatt Arsenault 578a022b1ccSSebastian Neubauer const bool IsGraphics = AMDGPU::isGraphics(CC); 579fecf43ebSMatt Arsenault const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 580fecf43ebSMatt Arsenault 58106c8cb03SAustin Kerbow MachineFunction &MF = B.getMF(); 58206c8cb03SAustin Kerbow MachineBasicBlock &MBB = B.getMBB(); 583b725d273SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo(); 584b725d273SMatt Arsenault SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 585fecf43ebSMatt Arsenault const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 586fecf43ebSMatt Arsenault const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 587b725d273SMatt Arsenault const DataLayout &DL = F.getParent()->getDataLayout(); 588b725d273SMatt Arsenault 58913e49dceSJon Chesterfield Info->allocateModuleLDSGlobal(F.getParent()); 590b725d273SMatt Arsenault 591b725d273SMatt Arsenault SmallVector<CCValAssign, 16> ArgLocs; 592fecf43ebSMatt Arsenault CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 593b725d273SMatt Arsenault 594a9ea8a9aSMatt Arsenault if (!IsEntryFunc) { 595a9ea8a9aSMatt Arsenault Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 596a9ea8a9aSMatt Arsenault Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 597a9ea8a9aSMatt Arsenault &AMDGPU::SGPR_64RegClass); 598a9ea8a9aSMatt Arsenault MBB.addLiveIn(ReturnAddrReg); 59906c8cb03SAustin Kerbow B.buildCopy(LiveInReturn, ReturnAddrReg); 600a9ea8a9aSMatt Arsenault } 601a9ea8a9aSMatt Arsenault 602bae3636fSMatt Arsenault if (Info->hasImplicitBufferPtr()) { 603fecf43ebSMatt Arsenault Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 604bae3636fSMatt Arsenault MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 605bae3636fSMatt Arsenault CCInfo.AllocateReg(ImplicitBufferPtrReg); 606bae3636fSMatt Arsenault } 607bae3636fSMatt Arsenault 608fecf43ebSMatt Arsenault SmallVector<ArgInfo, 32> SplitArgs; 609fecf43ebSMatt Arsenault unsigned Idx = 0; 610c7709e1cSTom Stellard unsigned PSInputNum = 0; 6119d8337d8STom Stellard 612ae25a397SChristudasan Devadasan // Insert the hidden sret parameter if the return value won't fit in the 613ae25a397SChristudasan Devadasan // return registers. 614ae25a397SChristudasan Devadasan if (!FLI.CanLowerReturn) 615ae25a397SChristudasan Devadasan insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); 616ae25a397SChristudasan Devadasan 617fecf43ebSMatt Arsenault for (auto &Arg : F.args()) { 618fecf43ebSMatt Arsenault if (DL.getTypeStoreSize(Arg.getType()) == 0) 619c7709e1cSTom Stellard continue; 620c7709e1cSTom Stellard 621fecf43ebSMatt Arsenault const bool InReg = Arg.hasAttribute(Attribute::InReg); 622fecf43ebSMatt Arsenault 623fecf43ebSMatt Arsenault // SGPR arguments to functions not implemented. 624a022b1ccSSebastian Neubauer if (!IsGraphics && InReg) 625fecf43ebSMatt Arsenault return false; 626fecf43ebSMatt Arsenault 627a9ea8a9aSMatt Arsenault if (Arg.hasAttribute(Attribute::SwiftSelf) || 628fecf43ebSMatt Arsenault Arg.hasAttribute(Attribute::SwiftError) || 629b60a2ae4SMatt Arsenault Arg.hasAttribute(Attribute::Nest)) 630fecf43ebSMatt Arsenault return false; 631fecf43ebSMatt Arsenault 632fecf43ebSMatt Arsenault if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 633fecf43ebSMatt Arsenault const bool ArgUsed = !Arg.use_empty(); 634fecf43ebSMatt Arsenault bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 635fecf43ebSMatt Arsenault 636fecf43ebSMatt Arsenault if (!SkipArg) { 637c7709e1cSTom Stellard Info->markPSInputAllocated(PSInputNum); 638fecf43ebSMatt Arsenault if (ArgUsed) 639c7709e1cSTom Stellard Info->markPSInputEnabled(PSInputNum); 640fecf43ebSMatt Arsenault } 641c7709e1cSTom Stellard 642c7709e1cSTom Stellard ++PSInputNum; 643c7709e1cSTom Stellard 644fecf43ebSMatt Arsenault if (SkipArg) { 645b60a2ae4SMatt Arsenault for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 64606c8cb03SAustin Kerbow B.buildUndef(VRegs[Idx][I]); 647b60a2ae4SMatt Arsenault 648fecf43ebSMatt Arsenault ++Idx; 649c7709e1cSTom Stellard continue; 650fecf43ebSMatt Arsenault } 6519d8337d8STom Stellard } 652e0a4da8cSMatt Arsenault 6539b057f64SMatt Arsenault ArgInfo OrigArg(VRegs[Idx], Arg, Idx); 654eb416277SMatt Arsenault const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 655eb416277SMatt Arsenault setArgFlags(OrigArg, OrigArgIdx, DL, F); 656b60a2ae4SMatt Arsenault 6576c260d3bSMatt Arsenault splitToValueTypes(OrigArg, SplitArgs, DL, CC); 658fecf43ebSMatt Arsenault ++Idx; 6599d8337d8STom Stellard } 6609d8337d8STom Stellard 661fecf43ebSMatt Arsenault // At least one interpolation mode must be enabled or else the GPU will 662fecf43ebSMatt Arsenault // hang. 663fecf43ebSMatt Arsenault // 664fecf43ebSMatt Arsenault // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 665fecf43ebSMatt Arsenault // set PSInputAddr, the user wants to enable some bits after the compilation 666fecf43ebSMatt Arsenault // based on run-time states. Since we can't know what the final PSInputEna 667fecf43ebSMatt Arsenault // will look like, so we shouldn't do anything here and the user should take 668fecf43ebSMatt Arsenault // responsibility for the correct programming. 669fecf43ebSMatt Arsenault // 670fecf43ebSMatt Arsenault // Otherwise, the following restrictions apply: 671fecf43ebSMatt Arsenault // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 672fecf43ebSMatt Arsenault // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 673fecf43ebSMatt Arsenault // enabled too. 674fecf43ebSMatt Arsenault if (CC == CallingConv::AMDGPU_PS) { 675fecf43ebSMatt Arsenault if ((Info->getPSInputAddr() & 0x7F) == 0 || 676fecf43ebSMatt Arsenault ((Info->getPSInputAddr() & 0xF) == 0 && 677fecf43ebSMatt Arsenault Info->isPSInputAllocated(11))) { 678fecf43ebSMatt Arsenault CCInfo.AllocateReg(AMDGPU::VGPR0); 679fecf43ebSMatt Arsenault CCInfo.AllocateReg(AMDGPU::VGPR1); 680fecf43ebSMatt Arsenault Info->markPSInputAllocated(0); 681fecf43ebSMatt Arsenault Info->markPSInputEnabled(0); 682fecf43ebSMatt Arsenault } 683fecf43ebSMatt Arsenault 684fecf43ebSMatt Arsenault if (Subtarget.isAmdPalOS()) { 685fecf43ebSMatt Arsenault // For isAmdPalOS, the user does not enable some bits after compilation 686fecf43ebSMatt Arsenault // based on run-time states; the register values being generated here are 687fecf43ebSMatt Arsenault // the final ones set in hardware. Therefore we need to apply the 688fecf43ebSMatt Arsenault // workaround to PSInputAddr and PSInputEnable together. (The case where 689fecf43ebSMatt Arsenault // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 690fecf43ebSMatt Arsenault // set up an input arg for a particular interpolation mode, but nothing 691fecf43ebSMatt Arsenault // uses that input arg. Really we should have an earlier pass that removes 692fecf43ebSMatt Arsenault // such an arg.) 693fecf43ebSMatt Arsenault unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 694fecf43ebSMatt Arsenault if ((PsInputBits & 0x7F) == 0 || 695fecf43ebSMatt Arsenault ((PsInputBits & 0xF) == 0 && 696fecf43ebSMatt Arsenault (PsInputBits >> 11 & 1))) 697fecf43ebSMatt Arsenault Info->markPSInputEnabled( 698fecf43ebSMatt Arsenault countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 699fecf43ebSMatt Arsenault } 700fecf43ebSMatt Arsenault } 701fecf43ebSMatt Arsenault 702fecf43ebSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 703fecf43ebSMatt Arsenault CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 704fecf43ebSMatt Arsenault 705fecf43ebSMatt Arsenault if (!MBB.empty()) 70606c8cb03SAustin Kerbow B.setInstr(*MBB.begin()); 707fecf43ebSMatt Arsenault 708a162048aSMatt Arsenault if (!IsEntryFunc) { 709a162048aSMatt Arsenault // For the fixed ABI, pass workitem IDs in the last argument register. 710a162048aSMatt Arsenault if (AMDGPUTargetMachine::EnableFixedFunctionABI) 711a162048aSMatt Arsenault TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 712a162048aSMatt Arsenault } 713a162048aSMatt Arsenault 71424e2e5dfSMatt Arsenault IncomingValueAssigner Assigner(AssignFn); 71524e2e5dfSMatt Arsenault if (!determineAssignments(Assigner, SplitArgs, CCInfo)) 71624e2e5dfSMatt Arsenault return false; 71724e2e5dfSMatt Arsenault 71824e2e5dfSMatt Arsenault FormalArgHandler Handler(B, MRI); 71924e2e5dfSMatt Arsenault if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) 72029f30379SMatt Arsenault return false; 721fecf43ebSMatt Arsenault 7226a70874dSMatt Arsenault uint64_t StackOffset = Assigner.StackOffset; 7236a70874dSMatt Arsenault 724a162048aSMatt Arsenault if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { 725fecf43ebSMatt Arsenault // Special inputs come after user arguments. 726fecf43ebSMatt Arsenault TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 727fecf43ebSMatt Arsenault } 728fecf43ebSMatt Arsenault 729fecf43ebSMatt Arsenault // Start adding system SGPRs. 730fecf43ebSMatt Arsenault if (IsEntryFunc) { 731a022b1ccSSebastian Neubauer TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); 732fecf43ebSMatt Arsenault } else { 733d5a46586SStanislav Mekhanoshin if (!Subtarget.enableFlatScratch()) 734fecf43ebSMatt Arsenault CCInfo.AllocateReg(Info->getScratchRSrcReg()); 735fecf43ebSMatt Arsenault TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 736fecf43ebSMatt Arsenault } 737fecf43ebSMatt Arsenault 7386a70874dSMatt Arsenault // When we tail call, we need to check if the callee's arguments will fit on 7396a70874dSMatt Arsenault // the caller's stack. So, whenever we lower formal arguments, we should keep 7406a70874dSMatt Arsenault // track of this information, since we might lower a tail call in this 7416a70874dSMatt Arsenault // function later. 7426a70874dSMatt Arsenault Info->setBytesInStackArgArea(StackOffset); 7436a70874dSMatt Arsenault 744fecf43ebSMatt Arsenault // Move back to the end of the basic block. 74506c8cb03SAustin Kerbow B.setMBB(MBB); 746fecf43ebSMatt Arsenault 747fecf43ebSMatt Arsenault return true; 748000c5af3STom Stellard } 74961f1f2a2SMatt Arsenault 75061f1f2a2SMatt Arsenault bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, 75161f1f2a2SMatt Arsenault CCState &CCInfo, 75261f1f2a2SMatt Arsenault SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, 75361f1f2a2SMatt Arsenault CallLoweringInfo &Info) const { 75461f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF(); 75561f1f2a2SMatt Arsenault 7560197cd0bSMatt Arsenault // If there's no call site, this doesn't correspond to a call from the IR and 7570197cd0bSMatt Arsenault // doesn't need implicit inputs. 7580197cd0bSMatt Arsenault if (!Info.CB) 7590197cd0bSMatt Arsenault return true; 7600197cd0bSMatt Arsenault 76161f1f2a2SMatt Arsenault const AMDGPUFunctionArgInfo *CalleeArgInfo 76261f1f2a2SMatt Arsenault = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 76361f1f2a2SMatt Arsenault 76461f1f2a2SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 76561f1f2a2SMatt Arsenault const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); 76661f1f2a2SMatt Arsenault 76761f1f2a2SMatt Arsenault 76861f1f2a2SMatt Arsenault // TODO: Unify with private memory register handling. This is complicated by 76961f1f2a2SMatt Arsenault // the fact that at least in kernels, the input argument is not necessarily 77061f1f2a2SMatt Arsenault // in the same location as the input. 77161f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 77261f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::DISPATCH_PTR, 77361f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::QUEUE_PTR, 77461f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, 77561f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::DISPATCH_ID, 77661f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 77761f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 77861f1f2a2SMatt Arsenault AMDGPUFunctionArgInfo::WORKGROUP_ID_Z 77961f1f2a2SMatt Arsenault }; 78061f1f2a2SMatt Arsenault 7810197cd0bSMatt Arsenault static constexpr StringLiteral ImplicitAttrNames[] = { 7820197cd0bSMatt Arsenault "amdgpu-no-dispatch-ptr", 7830197cd0bSMatt Arsenault "amdgpu-no-queue-ptr", 7840197cd0bSMatt Arsenault "amdgpu-no-implicitarg-ptr", 7850197cd0bSMatt Arsenault "amdgpu-no-dispatch-id", 7860197cd0bSMatt Arsenault "amdgpu-no-workgroup-id-x", 7870197cd0bSMatt Arsenault "amdgpu-no-workgroup-id-y", 7880197cd0bSMatt Arsenault "amdgpu-no-workgroup-id-z" 7890197cd0bSMatt Arsenault }; 7900197cd0bSMatt Arsenault 79161f1f2a2SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo(); 79261f1f2a2SMatt Arsenault 79361f1f2a2SMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 79461f1f2a2SMatt Arsenault const AMDGPULegalizerInfo *LI 79561f1f2a2SMatt Arsenault = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); 79661f1f2a2SMatt Arsenault 7970197cd0bSMatt Arsenault unsigned I = 0; 79861f1f2a2SMatt Arsenault for (auto InputID : InputRegs) { 79961f1f2a2SMatt Arsenault const ArgDescriptor *OutgoingArg; 80061f1f2a2SMatt Arsenault const TargetRegisterClass *ArgRC; 80161f1f2a2SMatt Arsenault LLT ArgTy; 80261f1f2a2SMatt Arsenault 8030197cd0bSMatt Arsenault // If the callee does not use the attribute value, skip copying the value. 8040197cd0bSMatt Arsenault if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) 8050197cd0bSMatt Arsenault continue; 8060197cd0bSMatt Arsenault 80761f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) = 80861f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(InputID); 80961f1f2a2SMatt Arsenault if (!OutgoingArg) 81061f1f2a2SMatt Arsenault continue; 81161f1f2a2SMatt Arsenault 81261f1f2a2SMatt Arsenault const ArgDescriptor *IncomingArg; 81361f1f2a2SMatt Arsenault const TargetRegisterClass *IncomingArgRC; 81461f1f2a2SMatt Arsenault std::tie(IncomingArg, IncomingArgRC, ArgTy) = 81561f1f2a2SMatt Arsenault CallerArgInfo.getPreloadedValue(InputID); 81661f1f2a2SMatt Arsenault assert(IncomingArgRC == ArgRC); 81761f1f2a2SMatt Arsenault 81861f1f2a2SMatt Arsenault Register InputReg = MRI.createGenericVirtualRegister(ArgTy); 81961f1f2a2SMatt Arsenault 82061f1f2a2SMatt Arsenault if (IncomingArg) { 821200bb519SMatt Arsenault LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); 82261f1f2a2SMatt Arsenault } else { 82361f1f2a2SMatt Arsenault assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 82461f1f2a2SMatt Arsenault LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); 82561f1f2a2SMatt Arsenault } 82661f1f2a2SMatt Arsenault 82761f1f2a2SMatt Arsenault if (OutgoingArg->isRegister()) { 82861f1f2a2SMatt Arsenault ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 82961f1f2a2SMatt Arsenault if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 83061f1f2a2SMatt Arsenault report_fatal_error("failed to allocate implicit input argument"); 83161f1f2a2SMatt Arsenault } else { 83261f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 83361f1f2a2SMatt Arsenault return false; 83461f1f2a2SMatt Arsenault } 83561f1f2a2SMatt Arsenault } 83661f1f2a2SMatt Arsenault 83761f1f2a2SMatt Arsenault // Pack workitem IDs into a single register or pass it as is if already 83861f1f2a2SMatt Arsenault // packed. 83961f1f2a2SMatt Arsenault const ArgDescriptor *OutgoingArg; 84061f1f2a2SMatt Arsenault const TargetRegisterClass *ArgRC; 84161f1f2a2SMatt Arsenault LLT ArgTy; 84261f1f2a2SMatt Arsenault 84361f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) = 84461f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 84561f1f2a2SMatt Arsenault if (!OutgoingArg) 84661f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) = 84761f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 84861f1f2a2SMatt Arsenault if (!OutgoingArg) 84961f1f2a2SMatt Arsenault std::tie(OutgoingArg, ArgRC, ArgTy) = 85061f1f2a2SMatt Arsenault CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 85161f1f2a2SMatt Arsenault if (!OutgoingArg) 85261f1f2a2SMatt Arsenault return false; 85361f1f2a2SMatt Arsenault 854200bb519SMatt Arsenault auto WorkitemIDX = 855200bb519SMatt Arsenault CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 856200bb519SMatt Arsenault auto WorkitemIDY = 857200bb519SMatt Arsenault CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 858200bb519SMatt Arsenault auto WorkitemIDZ = 859200bb519SMatt Arsenault CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 86061f1f2a2SMatt Arsenault 861200bb519SMatt Arsenault const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); 862200bb519SMatt Arsenault const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); 863200bb519SMatt Arsenault const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); 86461f1f2a2SMatt Arsenault const LLT S32 = LLT::scalar(32); 86561f1f2a2SMatt Arsenault 8660197cd0bSMatt Arsenault const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); 8670197cd0bSMatt Arsenault const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); 8680197cd0bSMatt Arsenault const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); 8690197cd0bSMatt Arsenault 87061f1f2a2SMatt Arsenault // If incoming ids are not packed we need to pack them. 87161f1f2a2SMatt Arsenault // FIXME: Should consider known workgroup size to eliminate known 0 cases. 87261f1f2a2SMatt Arsenault Register InputReg; 8730197cd0bSMatt Arsenault if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && 8740197cd0bSMatt Arsenault NeedWorkItemIDX) { 87561f1f2a2SMatt Arsenault InputReg = MRI.createGenericVirtualRegister(S32); 876200bb519SMatt Arsenault LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, 877200bb519SMatt Arsenault std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); 87861f1f2a2SMatt Arsenault } 87961f1f2a2SMatt Arsenault 8800197cd0bSMatt Arsenault if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && 8810197cd0bSMatt Arsenault NeedWorkItemIDY) { 88261f1f2a2SMatt Arsenault Register Y = MRI.createGenericVirtualRegister(S32); 883200bb519SMatt Arsenault LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), 884200bb519SMatt Arsenault std::get<2>(WorkitemIDY)); 88561f1f2a2SMatt Arsenault 88661f1f2a2SMatt Arsenault Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); 88761f1f2a2SMatt Arsenault InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; 88861f1f2a2SMatt Arsenault } 88961f1f2a2SMatt Arsenault 8900197cd0bSMatt Arsenault if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && 8910197cd0bSMatt Arsenault NeedWorkItemIDZ) { 89261f1f2a2SMatt Arsenault Register Z = MRI.createGenericVirtualRegister(S32); 893200bb519SMatt Arsenault LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), 894200bb519SMatt Arsenault std::get<2>(WorkitemIDZ)); 89561f1f2a2SMatt Arsenault 89661f1f2a2SMatt Arsenault Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); 89761f1f2a2SMatt Arsenault InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; 89861f1f2a2SMatt Arsenault } 89961f1f2a2SMatt Arsenault 9000197cd0bSMatt Arsenault if (!InputReg && (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { 90161f1f2a2SMatt Arsenault InputReg = MRI.createGenericVirtualRegister(S32); 90261f1f2a2SMatt Arsenault 90361f1f2a2SMatt Arsenault // Workitem ids are already packed, any of present incoming arguments will 90461f1f2a2SMatt Arsenault // carry all required fields. 90561f1f2a2SMatt Arsenault ArgDescriptor IncomingArg = ArgDescriptor::createArg( 90661f1f2a2SMatt Arsenault IncomingArgX ? *IncomingArgX : 90761f1f2a2SMatt Arsenault IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); 908200bb519SMatt Arsenault LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, 909200bb519SMatt Arsenault &AMDGPU::VGPR_32RegClass, S32); 91061f1f2a2SMatt Arsenault } 91161f1f2a2SMatt Arsenault 91261f1f2a2SMatt Arsenault if (OutgoingArg->isRegister()) { 9130197cd0bSMatt Arsenault if (InputReg) 91461f1f2a2SMatt Arsenault ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 9150197cd0bSMatt Arsenault 91661f1f2a2SMatt Arsenault if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 91761f1f2a2SMatt Arsenault report_fatal_error("failed to allocate implicit input argument"); 91861f1f2a2SMatt Arsenault } else { 91961f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 92061f1f2a2SMatt Arsenault return false; 92161f1f2a2SMatt Arsenault } 92261f1f2a2SMatt Arsenault 92361f1f2a2SMatt Arsenault return true; 92461f1f2a2SMatt Arsenault } 92561f1f2a2SMatt Arsenault 92661f1f2a2SMatt Arsenault /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 92761f1f2a2SMatt Arsenault /// CC. 92861f1f2a2SMatt Arsenault static std::pair<CCAssignFn *, CCAssignFn *> 92961f1f2a2SMatt Arsenault getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { 93061f1f2a2SMatt Arsenault return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 93161f1f2a2SMatt Arsenault } 93261f1f2a2SMatt Arsenault 93361f1f2a2SMatt Arsenault static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 93461f1f2a2SMatt Arsenault bool IsTailCall) { 9356a70874dSMatt Arsenault return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::SI_CALL; 93661f1f2a2SMatt Arsenault } 93761f1f2a2SMatt Arsenault 93861f1f2a2SMatt Arsenault // Add operands to call instruction to track the callee. 93961f1f2a2SMatt Arsenault static bool addCallTargetOperands(MachineInstrBuilder &CallInst, 94061f1f2a2SMatt Arsenault MachineIRBuilder &MIRBuilder, 94161f1f2a2SMatt Arsenault AMDGPUCallLowering::CallLoweringInfo &Info) { 94261f1f2a2SMatt Arsenault if (Info.Callee.isReg()) { 9431fd1beeaSMatt Arsenault CallInst.addReg(Info.Callee.getReg()); 94461f1f2a2SMatt Arsenault CallInst.addImm(0); 94561f1f2a2SMatt Arsenault } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { 94661f1f2a2SMatt Arsenault // The call lowering lightly assumed we can directly encode a call target in 94761f1f2a2SMatt Arsenault // the instruction, which is not the case. Materialize the address here. 94861f1f2a2SMatt Arsenault const GlobalValue *GV = Info.Callee.getGlobal(); 94961f1f2a2SMatt Arsenault auto Ptr = MIRBuilder.buildGlobalValue( 95061f1f2a2SMatt Arsenault LLT::pointer(GV->getAddressSpace(), 64), GV); 95161f1f2a2SMatt Arsenault CallInst.addReg(Ptr.getReg(0)); 95261f1f2a2SMatt Arsenault CallInst.add(Info.Callee); 95361f1f2a2SMatt Arsenault } else 95461f1f2a2SMatt Arsenault return false; 95561f1f2a2SMatt Arsenault 95661f1f2a2SMatt Arsenault return true; 95761f1f2a2SMatt Arsenault } 95861f1f2a2SMatt Arsenault 9596a70874dSMatt Arsenault bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( 9606a70874dSMatt Arsenault CallLoweringInfo &Info, MachineFunction &MF, 9616a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &InArgs) const { 9626a70874dSMatt Arsenault const Function &CallerF = MF.getFunction(); 9636a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv; 9646a70874dSMatt Arsenault CallingConv::ID CallerCC = CallerF.getCallingConv(); 9656a70874dSMatt Arsenault 9666a70874dSMatt Arsenault // If the calling conventions match, then everything must be the same. 9676a70874dSMatt Arsenault if (CalleeCC == CallerCC) 9686a70874dSMatt Arsenault return true; 9696a70874dSMatt Arsenault 9706a70874dSMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 9716a70874dSMatt Arsenault 9726a70874dSMatt Arsenault // Make sure that the caller and callee preserve all of the same registers. 9736a70874dSMatt Arsenault auto TRI = ST.getRegisterInfo(); 9746a70874dSMatt Arsenault 9756a70874dSMatt Arsenault const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 9766a70874dSMatt Arsenault const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 9776a70874dSMatt Arsenault if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 9786a70874dSMatt Arsenault return false; 9796a70874dSMatt Arsenault 9806a70874dSMatt Arsenault // Check if the caller and callee will handle arguments in the same way. 9816a70874dSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 9826a70874dSMatt Arsenault CCAssignFn *CalleeAssignFnFixed; 9836a70874dSMatt Arsenault CCAssignFn *CalleeAssignFnVarArg; 9846a70874dSMatt Arsenault std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = 9856a70874dSMatt Arsenault getAssignFnsForCC(CalleeCC, TLI); 9866a70874dSMatt Arsenault 9876a70874dSMatt Arsenault CCAssignFn *CallerAssignFnFixed; 9886a70874dSMatt Arsenault CCAssignFn *CallerAssignFnVarArg; 9896a70874dSMatt Arsenault std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = 9906a70874dSMatt Arsenault getAssignFnsForCC(CallerCC, TLI); 9916a70874dSMatt Arsenault 9926a70874dSMatt Arsenault // FIXME: We are not accounting for potential differences in implicitly passed 9936a70874dSMatt Arsenault // inputs, but only the fixed ABI is supported now anyway. 9946a70874dSMatt Arsenault IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, 9956a70874dSMatt Arsenault CalleeAssignFnVarArg); 9966a70874dSMatt Arsenault IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, 9976a70874dSMatt Arsenault CallerAssignFnVarArg); 9986a70874dSMatt Arsenault return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); 9996a70874dSMatt Arsenault } 10006a70874dSMatt Arsenault 10016a70874dSMatt Arsenault bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( 10026a70874dSMatt Arsenault CallLoweringInfo &Info, MachineFunction &MF, 10036a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &OutArgs) const { 10046a70874dSMatt Arsenault // If there are no outgoing arguments, then we are done. 10056a70874dSMatt Arsenault if (OutArgs.empty()) 10066a70874dSMatt Arsenault return true; 10076a70874dSMatt Arsenault 10086a70874dSMatt Arsenault const Function &CallerF = MF.getFunction(); 10096a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv; 10106a70874dSMatt Arsenault CallingConv::ID CallerCC = CallerF.getCallingConv(); 10116a70874dSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 10126a70874dSMatt Arsenault 10136a70874dSMatt Arsenault CCAssignFn *AssignFnFixed; 10146a70874dSMatt Arsenault CCAssignFn *AssignFnVarArg; 10156a70874dSMatt Arsenault std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 10166a70874dSMatt Arsenault 10176a70874dSMatt Arsenault // We have outgoing arguments. Make sure that we can tail call with them. 10186a70874dSMatt Arsenault SmallVector<CCValAssign, 16> OutLocs; 10196a70874dSMatt Arsenault CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); 10206a70874dSMatt Arsenault OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 10216a70874dSMatt Arsenault 10226a70874dSMatt Arsenault if (!determineAssignments(Assigner, OutArgs, OutInfo)) { 10236a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); 10246a70874dSMatt Arsenault return false; 10256a70874dSMatt Arsenault } 10266a70874dSMatt Arsenault 10276a70874dSMatt Arsenault // Make sure that they can fit on the caller's stack. 10286a70874dSMatt Arsenault const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 10296a70874dSMatt Arsenault if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { 10306a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); 10316a70874dSMatt Arsenault return false; 10326a70874dSMatt Arsenault } 10336a70874dSMatt Arsenault 10346a70874dSMatt Arsenault // Verify that the parameters in callee-saved registers match. 10356a70874dSMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 10366a70874dSMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo(); 10376a70874dSMatt Arsenault const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); 10386a70874dSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo(); 10396a70874dSMatt Arsenault return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); 10406a70874dSMatt Arsenault } 10416a70874dSMatt Arsenault 10426a70874dSMatt Arsenault /// Return true if the calling convention is one that we can guarantee TCO for. 10436a70874dSMatt Arsenault static bool canGuaranteeTCO(CallingConv::ID CC) { 10446a70874dSMatt Arsenault return CC == CallingConv::Fast; 10456a70874dSMatt Arsenault } 10466a70874dSMatt Arsenault 10476a70874dSMatt Arsenault /// Return true if we might ever do TCO for calls with this calling convention. 10486a70874dSMatt Arsenault static bool mayTailCallThisCC(CallingConv::ID CC) { 10496a70874dSMatt Arsenault switch (CC) { 10506a70874dSMatt Arsenault case CallingConv::C: 10516a70874dSMatt Arsenault case CallingConv::AMDGPU_Gfx: 10526a70874dSMatt Arsenault return true; 10536a70874dSMatt Arsenault default: 10546a70874dSMatt Arsenault return canGuaranteeTCO(CC); 10556a70874dSMatt Arsenault } 10566a70874dSMatt Arsenault } 10576a70874dSMatt Arsenault 10586a70874dSMatt Arsenault bool AMDGPUCallLowering::isEligibleForTailCallOptimization( 10596a70874dSMatt Arsenault MachineIRBuilder &B, CallLoweringInfo &Info, 10606a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { 10616a70874dSMatt Arsenault // Must pass all target-independent checks in order to tail call optimize. 10626a70874dSMatt Arsenault if (!Info.IsTailCall) 10636a70874dSMatt Arsenault return false; 10646a70874dSMatt Arsenault 10656a70874dSMatt Arsenault MachineFunction &MF = B.getMF(); 10666a70874dSMatt Arsenault const Function &CallerF = MF.getFunction(); 10676a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv; 10686a70874dSMatt Arsenault CallingConv::ID CallerCC = CallerF.getCallingConv(); 10696a70874dSMatt Arsenault 10706a70874dSMatt Arsenault const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 10716a70874dSMatt Arsenault const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 10726a70874dSMatt Arsenault // Kernels aren't callable, and don't have a live in return address so it 10736a70874dSMatt Arsenault // doesn't make sense to do a tail call with entry functions. 10746a70874dSMatt Arsenault if (!CallerPreserved) 10756a70874dSMatt Arsenault return false; 10766a70874dSMatt Arsenault 10776a70874dSMatt Arsenault if (!mayTailCallThisCC(CalleeCC)) { 10786a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); 10796a70874dSMatt Arsenault return false; 10806a70874dSMatt Arsenault } 10816a70874dSMatt Arsenault 10826a70874dSMatt Arsenault if (any_of(CallerF.args(), [](const Argument &A) { 10836a70874dSMatt Arsenault return A.hasByValAttr() || A.hasSwiftErrorAttr(); 10846a70874dSMatt Arsenault })) { 10856a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval " 10866a70874dSMatt Arsenault "or swifterror arguments\n"); 10876a70874dSMatt Arsenault return false; 10886a70874dSMatt Arsenault } 10896a70874dSMatt Arsenault 10906a70874dSMatt Arsenault // If we have -tailcallopt, then we're done. 10916a70874dSMatt Arsenault if (MF.getTarget().Options.GuaranteedTailCallOpt) 10926a70874dSMatt Arsenault return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); 10936a70874dSMatt Arsenault 10946a70874dSMatt Arsenault // Verify that the incoming and outgoing arguments from the callee are 10956a70874dSMatt Arsenault // safe to tail call. 10966a70874dSMatt Arsenault if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { 10976a70874dSMatt Arsenault LLVM_DEBUG( 10986a70874dSMatt Arsenault dbgs() 10996a70874dSMatt Arsenault << "... Caller and callee have incompatible calling conventions.\n"); 11006a70874dSMatt Arsenault return false; 11016a70874dSMatt Arsenault } 11026a70874dSMatt Arsenault 11036a70874dSMatt Arsenault if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) 11046a70874dSMatt Arsenault return false; 11056a70874dSMatt Arsenault 11066a70874dSMatt Arsenault LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n"); 11076a70874dSMatt Arsenault return true; 11086a70874dSMatt Arsenault } 11096a70874dSMatt Arsenault 11106a70874dSMatt Arsenault // Insert outgoing implicit arguments for a call, by inserting copies to the 11116a70874dSMatt Arsenault // implicit argument registers and adding the necessary implicit uses to the 11126a70874dSMatt Arsenault // call instruction. 11136a70874dSMatt Arsenault void AMDGPUCallLowering::handleImplicitCallArguments( 11146a70874dSMatt Arsenault MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, 11156a70874dSMatt Arsenault const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, 11166a70874dSMatt Arsenault ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { 11176a70874dSMatt Arsenault if (!ST.enableFlatScratch()) { 11186a70874dSMatt Arsenault // Insert copies for the SRD. In the HSA case, this should be an identity 11196a70874dSMatt Arsenault // copy. 1120d5e14ba8SSander de Smalen auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), 1121d5e14ba8SSander de Smalen FuncInfo.getScratchRSrcReg()); 11226a70874dSMatt Arsenault MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 11236a70874dSMatt Arsenault CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); 11246a70874dSMatt Arsenault } 11256a70874dSMatt Arsenault 11266a70874dSMatt Arsenault for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { 11276a70874dSMatt Arsenault MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); 11286a70874dSMatt Arsenault CallInst.addReg(ArgReg.first, RegState::Implicit); 11296a70874dSMatt Arsenault } 11306a70874dSMatt Arsenault } 11316a70874dSMatt Arsenault 11326a70874dSMatt Arsenault bool AMDGPUCallLowering::lowerTailCall( 11336a70874dSMatt Arsenault MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 11346a70874dSMatt Arsenault SmallVectorImpl<ArgInfo> &OutArgs) const { 11356a70874dSMatt Arsenault MachineFunction &MF = MIRBuilder.getMF(); 11366a70874dSMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 11376a70874dSMatt Arsenault SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 11386a70874dSMatt Arsenault const Function &F = MF.getFunction(); 11396a70874dSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo(); 11406a70874dSMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 11416a70874dSMatt Arsenault 11426a70874dSMatt Arsenault // True when we're tail calling, but without -tailcallopt. 11436a70874dSMatt Arsenault bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; 11446a70874dSMatt Arsenault 11456a70874dSMatt Arsenault // Find out which ABI gets to decide where things go. 11466a70874dSMatt Arsenault CallingConv::ID CalleeCC = Info.CallConv; 11476a70874dSMatt Arsenault CCAssignFn *AssignFnFixed; 11486a70874dSMatt Arsenault CCAssignFn *AssignFnVarArg; 11496a70874dSMatt Arsenault std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 11506a70874dSMatt Arsenault 11516a70874dSMatt Arsenault MachineInstrBuilder CallSeqStart; 11526a70874dSMatt Arsenault if (!IsSibCall) 11536a70874dSMatt Arsenault CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); 11546a70874dSMatt Arsenault 11556a70874dSMatt Arsenault unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); 11566a70874dSMatt Arsenault auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 11576a70874dSMatt Arsenault if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 11586a70874dSMatt Arsenault return false; 11596a70874dSMatt Arsenault 11606a70874dSMatt Arsenault // Byte offset for the tail call. When we are sibcalling, this will always 11616a70874dSMatt Arsenault // be 0. 11626a70874dSMatt Arsenault MIB.addImm(0); 11636a70874dSMatt Arsenault 11646a70874dSMatt Arsenault // Tell the call which registers are clobbered. 11656a70874dSMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo(); 11666a70874dSMatt Arsenault const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); 11676a70874dSMatt Arsenault MIB.addRegMask(Mask); 11686a70874dSMatt Arsenault 11696a70874dSMatt Arsenault // FPDiff is the byte offset of the call's argument area from the callee's. 11706a70874dSMatt Arsenault // Stores to callee stack arguments will be placed in FixedStackSlots offset 11716a70874dSMatt Arsenault // by this amount for a tail call. In a sibling call it must be 0 because the 11726a70874dSMatt Arsenault // caller will deallocate the entire stack and the callee still expects its 11736a70874dSMatt Arsenault // arguments to begin at SP+0. 11746a70874dSMatt Arsenault int FPDiff = 0; 11756a70874dSMatt Arsenault 11766a70874dSMatt Arsenault // This will be 0 for sibcalls, potentially nonzero for tail calls produced 11776a70874dSMatt Arsenault // by -tailcallopt. For sibcalls, the memory operands for the call are 11786a70874dSMatt Arsenault // already available in the caller's incoming argument space. 11796a70874dSMatt Arsenault unsigned NumBytes = 0; 11806a70874dSMatt Arsenault if (!IsSibCall) { 11816a70874dSMatt Arsenault // We aren't sibcalling, so we need to compute FPDiff. We need to do this 11826a70874dSMatt Arsenault // before handling assignments, because FPDiff must be known for memory 11836a70874dSMatt Arsenault // arguments. 11846a70874dSMatt Arsenault unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 11856a70874dSMatt Arsenault SmallVector<CCValAssign, 16> OutLocs; 11866a70874dSMatt Arsenault CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); 11876a70874dSMatt Arsenault 11886a70874dSMatt Arsenault // FIXME: Not accounting for callee implicit inputs 11896a70874dSMatt Arsenault OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); 11906a70874dSMatt Arsenault if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) 11916a70874dSMatt Arsenault return false; 11926a70874dSMatt Arsenault 11936a70874dSMatt Arsenault // The callee will pop the argument stack as a tail call. Thus, we must 11946a70874dSMatt Arsenault // keep it 16-byte aligned. 11956a70874dSMatt Arsenault NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); 11966a70874dSMatt Arsenault 11976a70874dSMatt Arsenault // FPDiff will be negative if this tail call requires more space than we 11986a70874dSMatt Arsenault // would automatically have in our incoming argument space. Positive if we 11996a70874dSMatt Arsenault // actually shrink the stack. 12006a70874dSMatt Arsenault FPDiff = NumReusableBytes - NumBytes; 12016a70874dSMatt Arsenault 12026a70874dSMatt Arsenault // The stack pointer must be 16-byte aligned at all times it's used for a 12036a70874dSMatt Arsenault // memory operation, which in practice means at *all* times and in 12046a70874dSMatt Arsenault // particular across call boundaries. Therefore our own arguments started at 12056a70874dSMatt Arsenault // a 16-byte aligned SP and the delta applied for the tail call should 12066a70874dSMatt Arsenault // satisfy the same constraint. 120785394d9eSMatt Arsenault assert(isAligned(ST.getStackAlignment(), FPDiff) && 120885394d9eSMatt Arsenault "unaligned stack on tail call"); 12096a70874dSMatt Arsenault } 12106a70874dSMatt Arsenault 12116a70874dSMatt Arsenault SmallVector<CCValAssign, 16> ArgLocs; 12126a70874dSMatt Arsenault CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 12136a70874dSMatt Arsenault 12146a70874dSMatt Arsenault // We could pass MIB and directly add the implicit uses to the call 12156a70874dSMatt Arsenault // now. However, as an aesthetic choice, place implicit argument operands 12166a70874dSMatt Arsenault // after the ordinary user argument registers. 12176a70874dSMatt Arsenault SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 12186a70874dSMatt Arsenault 12196a70874dSMatt Arsenault if (AMDGPUTargetMachine::EnableFixedFunctionABI && 12206a70874dSMatt Arsenault Info.CallConv != CallingConv::AMDGPU_Gfx) { 12216a70874dSMatt Arsenault // With a fixed ABI, allocate fixed registers before user arguments. 12226a70874dSMatt Arsenault if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 12236a70874dSMatt Arsenault return false; 12246a70874dSMatt Arsenault } 12256a70874dSMatt Arsenault 12266a70874dSMatt Arsenault OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 12276a70874dSMatt Arsenault 12286a70874dSMatt Arsenault if (!determineAssignments(Assigner, OutArgs, CCInfo)) 12296a70874dSMatt Arsenault return false; 12306a70874dSMatt Arsenault 12316a70874dSMatt Arsenault // Do the actual argument marshalling. 12326a70874dSMatt Arsenault AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); 12336a70874dSMatt Arsenault if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 12346a70874dSMatt Arsenault return false; 12356a70874dSMatt Arsenault 12366a70874dSMatt Arsenault handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); 12376a70874dSMatt Arsenault 12386a70874dSMatt Arsenault // If we have -tailcallopt, we need to adjust the stack. We'll do the call 12396a70874dSMatt Arsenault // sequence start and end here. 12406a70874dSMatt Arsenault if (!IsSibCall) { 12416a70874dSMatt Arsenault MIB->getOperand(1).setImm(FPDiff); 12426a70874dSMatt Arsenault CallSeqStart.addImm(NumBytes).addImm(0); 12436a70874dSMatt Arsenault // End the call sequence *before* emitting the call. Normally, we would 12446a70874dSMatt Arsenault // tidy the frame up after the call. However, here, we've laid out the 12456a70874dSMatt Arsenault // parameters so that when SP is reset, they will be in the correct 12466a70874dSMatt Arsenault // location. 12476a70874dSMatt Arsenault MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); 12486a70874dSMatt Arsenault } 12496a70874dSMatt Arsenault 12506a70874dSMatt Arsenault // Now we can add the actual call instruction to the correct basic block. 12516a70874dSMatt Arsenault MIRBuilder.insertInstr(MIB); 12526a70874dSMatt Arsenault 12536a70874dSMatt Arsenault // If Callee is a reg, since it is used by a target specific 12546a70874dSMatt Arsenault // instruction, it must have a register class matching the 12556a70874dSMatt Arsenault // constraint of that instruction. 12566a70874dSMatt Arsenault 12576a70874dSMatt Arsenault // FIXME: We should define regbankselectable call instructions to handle 12586a70874dSMatt Arsenault // divergent call targets. 12596a70874dSMatt Arsenault if (MIB->getOperand(0).isReg()) { 12606a70874dSMatt Arsenault MIB->getOperand(0).setReg(constrainOperandRegClass( 12616a70874dSMatt Arsenault MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, 12626a70874dSMatt Arsenault MIB->getDesc(), MIB->getOperand(0), 0)); 12636a70874dSMatt Arsenault } 12646a70874dSMatt Arsenault 12656a70874dSMatt Arsenault MF.getFrameInfo().setHasTailCall(); 12666a70874dSMatt Arsenault Info.LoweredTailCall = true; 12676a70874dSMatt Arsenault return true; 12686a70874dSMatt Arsenault } 12696a70874dSMatt Arsenault 127061f1f2a2SMatt Arsenault bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 127161f1f2a2SMatt Arsenault CallLoweringInfo &Info) const { 127261f1f2a2SMatt Arsenault if (Info.IsVarArg) { 127361f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); 127461f1f2a2SMatt Arsenault return false; 127561f1f2a2SMatt Arsenault } 127661f1f2a2SMatt Arsenault 127761f1f2a2SMatt Arsenault MachineFunction &MF = MIRBuilder.getMF(); 127861f1f2a2SMatt Arsenault const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 127961f1f2a2SMatt Arsenault const SIRegisterInfo *TRI = ST.getRegisterInfo(); 128061f1f2a2SMatt Arsenault 128161f1f2a2SMatt Arsenault const Function &F = MF.getFunction(); 128261f1f2a2SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo(); 128361f1f2a2SMatt Arsenault const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 128461f1f2a2SMatt Arsenault const DataLayout &DL = F.getParent()->getDataLayout(); 128561f1f2a2SMatt Arsenault 1286a022b1ccSSebastian Neubauer if (!AMDGPUTargetMachine::EnableFixedFunctionABI && 12870bb60dbeSSebastian Neubauer Info.CallConv != CallingConv::AMDGPU_Gfx) { 1288a022b1ccSSebastian Neubauer LLVM_DEBUG(dbgs() << "Variable function ABI not implemented\n"); 1289a022b1ccSSebastian Neubauer return false; 1290a022b1ccSSebastian Neubauer } 1291a022b1ccSSebastian Neubauer 129261f1f2a2SMatt Arsenault SmallVector<ArgInfo, 8> OutArgs; 1293fd82cbcfSMatt Arsenault for (auto &OrigArg : Info.OrigArgs) 1294fd82cbcfSMatt Arsenault splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); 129561f1f2a2SMatt Arsenault 12963231d2b5SMatt Arsenault SmallVector<ArgInfo, 8> InArgs; 12973231d2b5SMatt Arsenault if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) 12983231d2b5SMatt Arsenault splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); 12993231d2b5SMatt Arsenault 130061f1f2a2SMatt Arsenault // If we can lower as a tail call, do that instead. 13016a70874dSMatt Arsenault bool CanTailCallOpt = 13026a70874dSMatt Arsenault isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); 130361f1f2a2SMatt Arsenault 130461f1f2a2SMatt Arsenault // We must emit a tail call if we have musttail. 130561f1f2a2SMatt Arsenault if (Info.IsMustTailCall && !CanTailCallOpt) { 130661f1f2a2SMatt Arsenault LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 130761f1f2a2SMatt Arsenault return false; 130861f1f2a2SMatt Arsenault } 130961f1f2a2SMatt Arsenault 13106a70874dSMatt Arsenault if (CanTailCallOpt) 13116a70874dSMatt Arsenault return lowerTailCall(MIRBuilder, Info, OutArgs); 13126a70874dSMatt Arsenault 131361f1f2a2SMatt Arsenault // Find out which ABI gets to decide where things go. 131461f1f2a2SMatt Arsenault CCAssignFn *AssignFnFixed; 131561f1f2a2SMatt Arsenault CCAssignFn *AssignFnVarArg; 131661f1f2a2SMatt Arsenault std::tie(AssignFnFixed, AssignFnVarArg) = 131761f1f2a2SMatt Arsenault getAssignFnsForCC(Info.CallConv, TLI); 131861f1f2a2SMatt Arsenault 131961f1f2a2SMatt Arsenault MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) 132061f1f2a2SMatt Arsenault .addImm(0) 132161f1f2a2SMatt Arsenault .addImm(0); 132261f1f2a2SMatt Arsenault 132361f1f2a2SMatt Arsenault // Create a temporarily-floating call instruction so we can add the implicit 132461f1f2a2SMatt Arsenault // uses of arg registers. 132561f1f2a2SMatt Arsenault unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); 132661f1f2a2SMatt Arsenault 132761f1f2a2SMatt Arsenault auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 132861f1f2a2SMatt Arsenault MIB.addDef(TRI->getReturnAddressReg(MF)); 132961f1f2a2SMatt Arsenault 133061f1f2a2SMatt Arsenault if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 133161f1f2a2SMatt Arsenault return false; 133261f1f2a2SMatt Arsenault 133361f1f2a2SMatt Arsenault // Tell the call which registers are clobbered. 133461f1f2a2SMatt Arsenault const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 133561f1f2a2SMatt Arsenault MIB.addRegMask(Mask); 133661f1f2a2SMatt Arsenault 133761f1f2a2SMatt Arsenault SmallVector<CCValAssign, 16> ArgLocs; 133861f1f2a2SMatt Arsenault CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 133961f1f2a2SMatt Arsenault 134061f1f2a2SMatt Arsenault // We could pass MIB and directly add the implicit uses to the call 134161f1f2a2SMatt Arsenault // now. However, as an aesthetic choice, place implicit argument operands 134261f1f2a2SMatt Arsenault // after the ordinary user argument registers. 134361f1f2a2SMatt Arsenault SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 134461f1f2a2SMatt Arsenault 13459719f170SMatt Arsenault if (AMDGPUTargetMachine::EnableFixedFunctionABI && 13469719f170SMatt Arsenault Info.CallConv != CallingConv::AMDGPU_Gfx) { 134761f1f2a2SMatt Arsenault // With a fixed ABI, allocate fixed registers before user arguments. 134861f1f2a2SMatt Arsenault if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 134961f1f2a2SMatt Arsenault return false; 135061f1f2a2SMatt Arsenault } 135161f1f2a2SMatt Arsenault 135261f1f2a2SMatt Arsenault // Do the actual argument marshalling. 135361f1f2a2SMatt Arsenault SmallVector<Register, 8> PhysRegs; 135424e2e5dfSMatt Arsenault 135524e2e5dfSMatt Arsenault OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 135624e2e5dfSMatt Arsenault if (!determineAssignments(Assigner, OutArgs, CCInfo)) 135724e2e5dfSMatt Arsenault return false; 135824e2e5dfSMatt Arsenault 135924e2e5dfSMatt Arsenault AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); 136024e2e5dfSMatt Arsenault if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 136161f1f2a2SMatt Arsenault return false; 136261f1f2a2SMatt Arsenault 136361f1f2a2SMatt Arsenault const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 136461f1f2a2SMatt Arsenault 13656a70874dSMatt Arsenault handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); 136661f1f2a2SMatt Arsenault 136761f1f2a2SMatt Arsenault // Get a count of how many bytes are to be pushed on the stack. 136861f1f2a2SMatt Arsenault unsigned NumBytes = CCInfo.getNextStackOffset(); 136961f1f2a2SMatt Arsenault 137061f1f2a2SMatt Arsenault // If Callee is a reg, since it is used by a target specific 137161f1f2a2SMatt Arsenault // instruction, it must have a register class matching the 137261f1f2a2SMatt Arsenault // constraint of that instruction. 137361f1f2a2SMatt Arsenault 137461f1f2a2SMatt Arsenault // FIXME: We should define regbankselectable call instructions to handle 137561f1f2a2SMatt Arsenault // divergent call targets. 137661f1f2a2SMatt Arsenault if (MIB->getOperand(1).isReg()) { 137761f1f2a2SMatt Arsenault MIB->getOperand(1).setReg(constrainOperandRegClass( 137861f1f2a2SMatt Arsenault MF, *TRI, MRI, *ST.getInstrInfo(), 137961f1f2a2SMatt Arsenault *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), 138061f1f2a2SMatt Arsenault 1)); 138161f1f2a2SMatt Arsenault } 138261f1f2a2SMatt Arsenault 1383d2b8fcffSMatt Arsenault // Now we can add the actual call instruction to the correct position. 1384d2b8fcffSMatt Arsenault MIRBuilder.insertInstr(MIB); 1385d2b8fcffSMatt Arsenault 138661f1f2a2SMatt Arsenault // Finally we can copy the returned value back into its virtual-register. In 138761f1f2a2SMatt Arsenault // symmetry with the arguments, the physical register must be an 138861f1f2a2SMatt Arsenault // implicit-define of the call instruction. 1389ae25a397SChristudasan Devadasan if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 139061f1f2a2SMatt Arsenault CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, 139161f1f2a2SMatt Arsenault Info.IsVarArg); 13925efc3bfdSMatt Arsenault IncomingValueAssigner Assigner(RetAssignFn); 139324e2e5dfSMatt Arsenault CallReturnHandler Handler(MIRBuilder, MRI, MIB); 139424e2e5dfSMatt Arsenault if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, 139524e2e5dfSMatt Arsenault Info.CallConv, Info.IsVarArg)) 139661f1f2a2SMatt Arsenault return false; 139761f1f2a2SMatt Arsenault } 139861f1f2a2SMatt Arsenault 139961f1f2a2SMatt Arsenault uint64_t CalleePopBytes = NumBytes; 14003231d2b5SMatt Arsenault 14013231d2b5SMatt Arsenault MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) 14023231d2b5SMatt Arsenault .addImm(0) 140361f1f2a2SMatt Arsenault .addImm(CalleePopBytes); 140461f1f2a2SMatt Arsenault 14053231d2b5SMatt Arsenault if (!Info.CanLowerReturn) { 14063231d2b5SMatt Arsenault insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 14073231d2b5SMatt Arsenault Info.DemoteRegister, Info.DemoteStackIndex); 14083231d2b5SMatt Arsenault } 14093231d2b5SMatt Arsenault 141061f1f2a2SMatt Arsenault return true; 141161f1f2a2SMatt Arsenault } 1412