1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPULegalizerInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/FunctionLoweringInfo.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/IR/IntrinsicsAMDGPU.h" 26 27 #define DEBUG_TYPE "amdgpu-call-lowering" 28 29 using namespace llvm; 30 31 namespace { 32 33 /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. 34 static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, 35 Register ValVReg, CCValAssign &VA) { 36 if (VA.getLocVT().getSizeInBits() < 32) { 37 // 16-bit types are reported as legal for 32-bit registers. We need to 38 // extend and do a 32-bit copy to avoid the verifier complaining about it. 39 return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 40 } 41 42 return Handler.extendRegister(ValVReg, VA); 43 } 44 45 struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 46 AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 47 MachineInstrBuilder MIB) 48 : OutgoingValueHandler(B, MRI), MIB(MIB) {} 49 50 MachineInstrBuilder MIB; 51 52 Register getStackAddress(uint64_t Size, int64_t Offset, 53 MachinePointerInfo &MPO, 54 ISD::ArgFlagsTy Flags) override { 55 llvm_unreachable("not implemented"); 56 } 57 58 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 59 MachinePointerInfo &MPO, CCValAssign &VA) override { 60 llvm_unreachable("not implemented"); 61 } 62 63 void assignValueToReg(Register ValVReg, Register PhysReg, 64 CCValAssign VA) override { 65 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 66 67 // If this is a scalar return, insert a readfirstlane just in case the value 68 // ends up in a VGPR. 69 // FIXME: Assert this is a shader return. 70 const SIRegisterInfo *TRI 71 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 72 if (TRI->isSGPRReg(MRI, PhysReg)) { 73 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 74 {MRI.getType(ExtReg)}, false) 75 .addReg(ExtReg); 76 ExtReg = ToSGPR.getReg(0); 77 } 78 79 MIRBuilder.buildCopy(PhysReg, ExtReg); 80 MIB.addUse(PhysReg, RegState::Implicit); 81 } 82 }; 83 84 struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { 85 uint64_t StackUsed = 0; 86 87 AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 88 : IncomingValueHandler(B, MRI) {} 89 90 Register getStackAddress(uint64_t Size, int64_t Offset, 91 MachinePointerInfo &MPO, 92 ISD::ArgFlagsTy Flags) override { 93 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 94 95 // Byval is assumed to be writable memory, but other stack passed arguments 96 // are not. 97 const bool IsImmutable = !Flags.isByVal(); 98 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 99 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 100 auto AddrReg = MIRBuilder.buildFrameIndex( 101 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 102 StackUsed = std::max(StackUsed, Size + Offset); 103 return AddrReg.getReg(0); 104 } 105 106 void assignValueToReg(Register ValVReg, Register PhysReg, 107 CCValAssign VA) override { 108 markPhysRegUsed(PhysReg); 109 110 if (VA.getLocVT().getSizeInBits() < 32) { 111 // 16-bit types are reported as legal for 32-bit registers. We need to do 112 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 113 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 114 115 // If we have signext/zeroext, it applies to the whole 32-bit register 116 // before truncation. 117 auto Extended = 118 buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); 119 MIRBuilder.buildTrunc(ValVReg, Extended); 120 return; 121 } 122 123 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 124 } 125 126 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 127 MachinePointerInfo &MPO, CCValAssign &VA) override { 128 MachineFunction &MF = MIRBuilder.getMF(); 129 130 auto MMO = MF.getMachineMemOperand( 131 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, 132 inferAlignFromPtrInfo(MF, MPO)); 133 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 134 } 135 136 /// How the physical register gets marked varies between formal 137 /// parameters (it's a basic-block live-in), and a call instruction 138 /// (it's an implicit-def of the BL). 139 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 140 }; 141 142 struct FormalArgHandler : public AMDGPUIncomingArgHandler { 143 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 144 : AMDGPUIncomingArgHandler(B, MRI) {} 145 146 void markPhysRegUsed(unsigned PhysReg) override { 147 MIRBuilder.getMBB().addLiveIn(PhysReg); 148 } 149 }; 150 151 struct CallReturnHandler : public AMDGPUIncomingArgHandler { 152 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 153 MachineInstrBuilder MIB) 154 : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} 155 156 void markPhysRegUsed(unsigned PhysReg) override { 157 MIB.addDef(PhysReg, RegState::Implicit); 158 } 159 160 MachineInstrBuilder MIB; 161 }; 162 163 struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { 164 /// For tail calls, the byte offset of the call's argument area from the 165 /// callee's. Unused elsewhere. 166 int FPDiff; 167 168 // Cache the SP register vreg if we need it more than once in this call site. 169 Register SPReg; 170 171 bool IsTailCall; 172 173 AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, 174 MachineRegisterInfo &MRI, MachineInstrBuilder MIB, 175 bool IsTailCall = false, int FPDiff = 0) 176 : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), 177 IsTailCall(IsTailCall) {} 178 179 Register getStackAddress(uint64_t Size, int64_t Offset, 180 MachinePointerInfo &MPO, 181 ISD::ArgFlagsTy Flags) override { 182 MachineFunction &MF = MIRBuilder.getMF(); 183 const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); 184 const LLT S32 = LLT::scalar(32); 185 186 if (IsTailCall) { 187 Offset += FPDiff; 188 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); 189 auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); 190 MPO = MachinePointerInfo::getFixedStack(MF, FI); 191 return FIReg.getReg(0); 192 } 193 194 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 195 196 if (!SPReg) { 197 const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); 198 if (ST.enableFlatScratch()) { 199 // The stack is accessed unswizzled, so we can use a regular copy. 200 SPReg = MIRBuilder.buildCopy(PtrTy, 201 MFI->getStackPtrOffsetReg()).getReg(0); 202 } else { 203 // The address we produce here, without knowing the use context, is going 204 // to be interpreted as a vector address, so we need to convert to a 205 // swizzled address. 206 SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy}, 207 {MFI->getStackPtrOffsetReg()}).getReg(0); 208 } 209 } 210 211 auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); 212 213 auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); 214 MPO = MachinePointerInfo::getStack(MF, Offset); 215 return AddrReg.getReg(0); 216 } 217 218 void assignValueToReg(Register ValVReg, Register PhysReg, 219 CCValAssign VA) override { 220 MIB.addUse(PhysReg, RegState::Implicit); 221 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 222 MIRBuilder.buildCopy(PhysReg, ExtReg); 223 } 224 225 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 226 MachinePointerInfo &MPO, CCValAssign &VA) override { 227 MachineFunction &MF = MIRBuilder.getMF(); 228 uint64_t LocMemOffset = VA.getLocMemOffset(); 229 const auto &ST = MF.getSubtarget<GCNSubtarget>(); 230 231 auto MMO = MF.getMachineMemOperand( 232 MPO, MachineMemOperand::MOStore, MemTy, 233 commonAlignment(ST.getStackAlignment(), LocMemOffset)); 234 MIRBuilder.buildStore(ValVReg, Addr, *MMO); 235 } 236 237 void assignValueToAddress(const CallLowering::ArgInfo &Arg, 238 unsigned ValRegIndex, Register Addr, LLT MemTy, 239 MachinePointerInfo &MPO, CCValAssign &VA) override { 240 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 241 ? extendRegister(Arg.Regs[ValRegIndex], VA) 242 : Arg.Regs[ValRegIndex]; 243 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); 244 } 245 }; 246 } 247 248 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 249 : CallLowering(&TLI) { 250 } 251 252 // FIXME: Compatibility shim 253 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 254 switch (MIOpc) { 255 case TargetOpcode::G_SEXT: 256 return ISD::SIGN_EXTEND; 257 case TargetOpcode::G_ZEXT: 258 return ISD::ZERO_EXTEND; 259 case TargetOpcode::G_ANYEXT: 260 return ISD::ANY_EXTEND; 261 default: 262 llvm_unreachable("not an extend opcode"); 263 } 264 } 265 266 bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, 267 CallingConv::ID CallConv, 268 SmallVectorImpl<BaseArgInfo> &Outs, 269 bool IsVarArg) const { 270 // For shaders. Vector types should be explicitly handled by CC. 271 if (AMDGPU::isEntryFunctionCC(CallConv)) 272 return true; 273 274 SmallVector<CCValAssign, 16> ArgLocs; 275 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 276 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 277 MF.getFunction().getContext()); 278 279 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); 280 } 281 282 /// Lower the return value for the already existing \p Ret. This assumes that 283 /// \p B's insertion point is correct. 284 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 285 const Value *Val, ArrayRef<Register> VRegs, 286 MachineInstrBuilder &Ret) const { 287 if (!Val) 288 return true; 289 290 auto &MF = B.getMF(); 291 const auto &F = MF.getFunction(); 292 const DataLayout &DL = MF.getDataLayout(); 293 MachineRegisterInfo *MRI = B.getMRI(); 294 LLVMContext &Ctx = F.getContext(); 295 296 CallingConv::ID CC = F.getCallingConv(); 297 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 298 299 SmallVector<EVT, 8> SplitEVTs; 300 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 301 assert(VRegs.size() == SplitEVTs.size() && 302 "For each split Type there should be exactly one VReg."); 303 304 SmallVector<ArgInfo, 8> SplitRetInfos; 305 306 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 307 EVT VT = SplitEVTs[i]; 308 Register Reg = VRegs[i]; 309 ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); 310 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 311 312 if (VT.isScalarInteger()) { 313 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 314 if (RetInfo.Flags[0].isSExt()) { 315 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 316 ExtendOp = TargetOpcode::G_SEXT; 317 } else if (RetInfo.Flags[0].isZExt()) { 318 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 319 ExtendOp = TargetOpcode::G_ZEXT; 320 } 321 322 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 323 extOpcodeToISDExtOpcode(ExtendOp)); 324 if (ExtVT != VT) { 325 RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); 326 LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); 327 Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); 328 } 329 } 330 331 if (Reg != RetInfo.Regs[0]) { 332 RetInfo.Regs[0] = Reg; 333 // Reset the arg flags after modifying Reg. 334 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 335 } 336 337 splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); 338 } 339 340 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 341 342 OutgoingValueAssigner Assigner(AssignFn); 343 AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); 344 return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, 345 CC, F.isVarArg()); 346 } 347 348 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, 349 ArrayRef<Register> VRegs, 350 FunctionLoweringInfo &FLI) const { 351 352 MachineFunction &MF = B.getMF(); 353 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 354 MFI->setIfReturnsVoid(!Val); 355 356 assert(!Val == VRegs.empty() && "Return value without a vreg"); 357 358 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 359 const bool IsShader = AMDGPU::isShader(CC); 360 const bool IsWaveEnd = 361 (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); 362 if (IsWaveEnd) { 363 B.buildInstr(AMDGPU::S_ENDPGM) 364 .addImm(0); 365 return true; 366 } 367 368 unsigned ReturnOpc = 369 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::SI_RETURN; 370 auto Ret = B.buildInstrNoInsert(ReturnOpc); 371 372 if (!FLI.CanLowerReturn) 373 insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); 374 else if (!lowerReturnVal(B, Val, VRegs, Ret)) 375 return false; 376 377 // TODO: Handle CalleeSavedRegsViaCopy. 378 379 B.insertInstr(Ret); 380 return true; 381 } 382 383 void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, 384 uint64_t Offset) const { 385 MachineFunction &MF = B.getMF(); 386 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 387 MachineRegisterInfo &MRI = MF.getRegInfo(); 388 Register KernArgSegmentPtr = 389 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 390 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 391 392 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 393 394 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 395 } 396 397 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, 398 uint64_t Offset, 399 Align Alignment) const { 400 MachineFunction &MF = B.getMF(); 401 const Function &F = MF.getFunction(); 402 const DataLayout &DL = F.getParent()->getDataLayout(); 403 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 404 405 LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 406 407 SmallVector<ArgInfo, 32> SplitArgs; 408 SmallVector<uint64_t> FieldOffsets; 409 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); 410 411 unsigned Idx = 0; 412 for (ArgInfo &SplitArg : SplitArgs) { 413 Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); 414 lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); 415 416 LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); 417 if (SplitArg.Flags[0].isPointer()) { 418 // Compensate for losing pointeriness in splitValueTypes. 419 LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), 420 ArgTy.getScalarSizeInBits()); 421 ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) 422 : PtrTy; 423 } 424 425 MachineMemOperand *MMO = MF.getMachineMemOperand( 426 PtrInfo, 427 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 428 MachineMemOperand::MOInvariant, 429 ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); 430 431 assert(SplitArg.Regs.size() == 1); 432 433 B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); 434 ++Idx; 435 } 436 } 437 438 // Allocate special inputs passed in user SGPRs. 439 static void allocateHSAUserSGPRs(CCState &CCInfo, 440 MachineIRBuilder &B, 441 MachineFunction &MF, 442 const SIRegisterInfo &TRI, 443 SIMachineFunctionInfo &Info) { 444 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 445 if (Info.hasPrivateSegmentBuffer()) { 446 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 447 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 448 CCInfo.AllocateReg(PrivateSegmentBufferReg); 449 } 450 451 if (Info.hasDispatchPtr()) { 452 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 453 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 454 CCInfo.AllocateReg(DispatchPtrReg); 455 } 456 457 if (Info.hasQueuePtr() && AMDGPU::getAmdhsaCodeObjectVersion() < 5) { 458 Register QueuePtrReg = Info.addQueuePtr(TRI); 459 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 460 CCInfo.AllocateReg(QueuePtrReg); 461 } 462 463 if (Info.hasKernargSegmentPtr()) { 464 MachineRegisterInfo &MRI = MF.getRegInfo(); 465 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 466 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 467 Register VReg = MRI.createGenericVirtualRegister(P4); 468 MRI.addLiveIn(InputPtrReg, VReg); 469 B.getMBB().addLiveIn(InputPtrReg); 470 B.buildCopy(VReg, InputPtrReg); 471 CCInfo.AllocateReg(InputPtrReg); 472 } 473 474 if (Info.hasDispatchID()) { 475 Register DispatchIDReg = Info.addDispatchID(TRI); 476 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 477 CCInfo.AllocateReg(DispatchIDReg); 478 } 479 480 if (Info.hasFlatScratchInit()) { 481 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 482 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 483 CCInfo.AllocateReg(FlatScratchInitReg); 484 } 485 486 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 487 // these from the dispatch pointer. 488 } 489 490 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 491 MachineIRBuilder &B, const Function &F, 492 ArrayRef<ArrayRef<Register>> VRegs) const { 493 MachineFunction &MF = B.getMF(); 494 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 495 MachineRegisterInfo &MRI = MF.getRegInfo(); 496 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 497 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 498 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 499 const DataLayout &DL = F.getParent()->getDataLayout(); 500 501 Info->allocateModuleLDSGlobal(F.getParent()); 502 503 SmallVector<CCValAssign, 16> ArgLocs; 504 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 505 506 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 507 508 unsigned i = 0; 509 const Align KernArgBaseAlign(16); 510 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 511 uint64_t ExplicitArgOffset = 0; 512 513 // TODO: Align down to dword alignment and extract bits for extending loads. 514 for (auto &Arg : F.args()) { 515 const bool IsByRef = Arg.hasByRefAttr(); 516 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 517 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 518 if (AllocSize == 0) 519 continue; 520 521 MaybeAlign ABIAlign = IsByRef ? Arg.getParamAlign() : None; 522 if (!ABIAlign) 523 ABIAlign = DL.getABITypeAlign(ArgTy); 524 525 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 526 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 527 528 if (Arg.use_empty()) { 529 ++i; 530 continue; 531 } 532 533 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 534 535 if (IsByRef) { 536 unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); 537 538 assert(VRegs[i].size() == 1 && 539 "expected only one register for byval pointers"); 540 if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { 541 lowerParameterPtr(VRegs[i][0], B, ArgOffset); 542 } else { 543 const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 544 Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); 545 lowerParameterPtr(PtrReg, B, ArgOffset); 546 547 B.buildAddrSpaceCast(VRegs[i][0], PtrReg); 548 } 549 } else { 550 ArgInfo OrigArg(VRegs[i], Arg, i); 551 const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; 552 setArgFlags(OrigArg, OrigArgIdx, DL, F); 553 lowerParameter(B, OrigArg, ArgOffset, Alignment); 554 } 555 556 ++i; 557 } 558 559 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 560 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 561 return true; 562 } 563 564 bool AMDGPUCallLowering::lowerFormalArguments( 565 MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, 566 FunctionLoweringInfo &FLI) const { 567 CallingConv::ID CC = F.getCallingConv(); 568 569 // The infrastructure for normal calling convention lowering is essentially 570 // useless for kernels. We want to avoid any kind of legalization or argument 571 // splitting. 572 if (CC == CallingConv::AMDGPU_KERNEL) 573 return lowerFormalArgumentsKernel(B, F, VRegs); 574 575 const bool IsGraphics = AMDGPU::isGraphics(CC); 576 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 577 578 MachineFunction &MF = B.getMF(); 579 MachineBasicBlock &MBB = B.getMBB(); 580 MachineRegisterInfo &MRI = MF.getRegInfo(); 581 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 582 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 583 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 584 const DataLayout &DL = F.getParent()->getDataLayout(); 585 586 Info->allocateModuleLDSGlobal(F.getParent()); 587 588 SmallVector<CCValAssign, 16> ArgLocs; 589 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 590 591 if (Info->hasImplicitBufferPtr()) { 592 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 593 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 594 CCInfo.AllocateReg(ImplicitBufferPtrReg); 595 } 596 597 // FIXME: This probably isn't defined for mesa 598 if (Info->hasFlatScratchInit() && !Subtarget.isAmdPalOS()) { 599 Register FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 600 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 601 CCInfo.AllocateReg(FlatScratchInitReg); 602 } 603 604 SmallVector<ArgInfo, 32> SplitArgs; 605 unsigned Idx = 0; 606 unsigned PSInputNum = 0; 607 608 // Insert the hidden sret parameter if the return value won't fit in the 609 // return registers. 610 if (!FLI.CanLowerReturn) 611 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); 612 613 for (auto &Arg : F.args()) { 614 if (DL.getTypeStoreSize(Arg.getType()) == 0) 615 continue; 616 617 const bool InReg = Arg.hasAttribute(Attribute::InReg); 618 619 // SGPR arguments to functions not implemented. 620 if (!IsGraphics && InReg) 621 return false; 622 623 if (Arg.hasAttribute(Attribute::SwiftSelf) || 624 Arg.hasAttribute(Attribute::SwiftError) || 625 Arg.hasAttribute(Attribute::Nest)) 626 return false; 627 628 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 629 const bool ArgUsed = !Arg.use_empty(); 630 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 631 632 if (!SkipArg) { 633 Info->markPSInputAllocated(PSInputNum); 634 if (ArgUsed) 635 Info->markPSInputEnabled(PSInputNum); 636 } 637 638 ++PSInputNum; 639 640 if (SkipArg) { 641 for (Register R : VRegs[Idx]) 642 B.buildUndef(R); 643 644 ++Idx; 645 continue; 646 } 647 } 648 649 ArgInfo OrigArg(VRegs[Idx], Arg, Idx); 650 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 651 setArgFlags(OrigArg, OrigArgIdx, DL, F); 652 653 splitToValueTypes(OrigArg, SplitArgs, DL, CC); 654 ++Idx; 655 } 656 657 // At least one interpolation mode must be enabled or else the GPU will 658 // hang. 659 // 660 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 661 // set PSInputAddr, the user wants to enable some bits after the compilation 662 // based on run-time states. Since we can't know what the final PSInputEna 663 // will look like, so we shouldn't do anything here and the user should take 664 // responsibility for the correct programming. 665 // 666 // Otherwise, the following restrictions apply: 667 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 668 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 669 // enabled too. 670 if (CC == CallingConv::AMDGPU_PS) { 671 if ((Info->getPSInputAddr() & 0x7F) == 0 || 672 ((Info->getPSInputAddr() & 0xF) == 0 && 673 Info->isPSInputAllocated(11))) { 674 CCInfo.AllocateReg(AMDGPU::VGPR0); 675 CCInfo.AllocateReg(AMDGPU::VGPR1); 676 Info->markPSInputAllocated(0); 677 Info->markPSInputEnabled(0); 678 } 679 680 if (Subtarget.isAmdPalOS()) { 681 // For isAmdPalOS, the user does not enable some bits after compilation 682 // based on run-time states; the register values being generated here are 683 // the final ones set in hardware. Therefore we need to apply the 684 // workaround to PSInputAddr and PSInputEnable together. (The case where 685 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 686 // set up an input arg for a particular interpolation mode, but nothing 687 // uses that input arg. Really we should have an earlier pass that removes 688 // such an arg.) 689 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 690 if ((PsInputBits & 0x7F) == 0 || 691 ((PsInputBits & 0xF) == 0 && 692 (PsInputBits >> 11 & 1))) 693 Info->markPSInputEnabled( 694 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 695 } 696 } 697 698 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 699 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 700 701 if (!MBB.empty()) 702 B.setInstr(*MBB.begin()); 703 704 if (!IsEntryFunc && !IsGraphics) { 705 // For the fixed ABI, pass workitem IDs in the last argument register. 706 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 707 } 708 709 IncomingValueAssigner Assigner(AssignFn); 710 if (!determineAssignments(Assigner, SplitArgs, CCInfo)) 711 return false; 712 713 FormalArgHandler Handler(B, MRI); 714 if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) 715 return false; 716 717 uint64_t StackOffset = Assigner.StackOffset; 718 719 // Start adding system SGPRs. 720 if (IsEntryFunc) { 721 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); 722 } else { 723 if (!Subtarget.enableFlatScratch()) 724 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 725 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 726 } 727 728 // When we tail call, we need to check if the callee's arguments will fit on 729 // the caller's stack. So, whenever we lower formal arguments, we should keep 730 // track of this information, since we might lower a tail call in this 731 // function later. 732 Info->setBytesInStackArgArea(StackOffset); 733 734 // Move back to the end of the basic block. 735 B.setMBB(MBB); 736 737 return true; 738 } 739 740 bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, 741 CCState &CCInfo, 742 SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, 743 CallLoweringInfo &Info) const { 744 MachineFunction &MF = MIRBuilder.getMF(); 745 746 // If there's no call site, this doesn't correspond to a call from the IR and 747 // doesn't need implicit inputs. 748 if (!Info.CB) 749 return true; 750 751 const AMDGPUFunctionArgInfo *CalleeArgInfo 752 = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 753 754 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 755 const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); 756 757 758 // TODO: Unify with private memory register handling. This is complicated by 759 // the fact that at least in kernels, the input argument is not necessarily 760 // in the same location as the input. 761 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 762 AMDGPUFunctionArgInfo::DISPATCH_PTR, 763 AMDGPUFunctionArgInfo::QUEUE_PTR, 764 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, 765 AMDGPUFunctionArgInfo::DISPATCH_ID, 766 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 767 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 768 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z 769 }; 770 771 static constexpr StringLiteral ImplicitAttrNames[] = { 772 "amdgpu-no-dispatch-ptr", 773 "amdgpu-no-queue-ptr", 774 "amdgpu-no-implicitarg-ptr", 775 "amdgpu-no-dispatch-id", 776 "amdgpu-no-workgroup-id-x", 777 "amdgpu-no-workgroup-id-y", 778 "amdgpu-no-workgroup-id-z" 779 }; 780 781 MachineRegisterInfo &MRI = MF.getRegInfo(); 782 783 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 784 const AMDGPULegalizerInfo *LI 785 = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); 786 787 unsigned I = 0; 788 for (auto InputID : InputRegs) { 789 const ArgDescriptor *OutgoingArg; 790 const TargetRegisterClass *ArgRC; 791 LLT ArgTy; 792 793 // If the callee does not use the attribute value, skip copying the value. 794 if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) 795 continue; 796 797 std::tie(OutgoingArg, ArgRC, ArgTy) = 798 CalleeArgInfo->getPreloadedValue(InputID); 799 if (!OutgoingArg) 800 continue; 801 802 const ArgDescriptor *IncomingArg; 803 const TargetRegisterClass *IncomingArgRC; 804 std::tie(IncomingArg, IncomingArgRC, ArgTy) = 805 CallerArgInfo.getPreloadedValue(InputID); 806 assert(IncomingArgRC == ArgRC); 807 808 Register InputReg = MRI.createGenericVirtualRegister(ArgTy); 809 810 if (IncomingArg) { 811 LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); 812 } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { 813 LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); 814 } else { 815 // We may have proven the input wasn't needed, although the ABI is 816 // requiring it. We just need to allocate the register appropriately. 817 MIRBuilder.buildUndef(InputReg); 818 } 819 820 if (OutgoingArg->isRegister()) { 821 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 822 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 823 report_fatal_error("failed to allocate implicit input argument"); 824 } else { 825 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 826 return false; 827 } 828 } 829 830 // Pack workitem IDs into a single register or pass it as is if already 831 // packed. 832 const ArgDescriptor *OutgoingArg; 833 const TargetRegisterClass *ArgRC; 834 LLT ArgTy; 835 836 std::tie(OutgoingArg, ArgRC, ArgTy) = 837 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 838 if (!OutgoingArg) 839 std::tie(OutgoingArg, ArgRC, ArgTy) = 840 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 841 if (!OutgoingArg) 842 std::tie(OutgoingArg, ArgRC, ArgTy) = 843 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 844 if (!OutgoingArg) 845 return false; 846 847 auto WorkitemIDX = 848 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 849 auto WorkitemIDY = 850 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 851 auto WorkitemIDZ = 852 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 853 854 const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); 855 const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); 856 const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); 857 const LLT S32 = LLT::scalar(32); 858 859 const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); 860 const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); 861 const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); 862 863 // If incoming ids are not packed we need to pack them. 864 // FIXME: Should consider known workgroup size to eliminate known 0 cases. 865 Register InputReg; 866 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && 867 NeedWorkItemIDX) { 868 if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) { 869 InputReg = MRI.createGenericVirtualRegister(S32); 870 LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, 871 std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); 872 } else { 873 InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0); 874 } 875 } 876 877 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && 878 NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) { 879 Register Y = MRI.createGenericVirtualRegister(S32); 880 LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), 881 std::get<2>(WorkitemIDY)); 882 883 Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); 884 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; 885 } 886 887 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && 888 NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) { 889 Register Z = MRI.createGenericVirtualRegister(S32); 890 LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), 891 std::get<2>(WorkitemIDZ)); 892 893 Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); 894 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; 895 } 896 897 if (!InputReg && 898 (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { 899 InputReg = MRI.createGenericVirtualRegister(S32); 900 if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { 901 // We're in a situation where the outgoing function requires the workitem 902 // ID, but the calling function does not have it (e.g a graphics function 903 // calling a C calling convention function). This is illegal, but we need 904 // to produce something. 905 MIRBuilder.buildUndef(InputReg); 906 } else { 907 // Workitem ids are already packed, any of present incoming arguments will 908 // carry all required fields. 909 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 910 IncomingArgX ? *IncomingArgX : 911 IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); 912 LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, 913 &AMDGPU::VGPR_32RegClass, S32); 914 } 915 } 916 917 if (OutgoingArg->isRegister()) { 918 if (InputReg) 919 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 920 921 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 922 report_fatal_error("failed to allocate implicit input argument"); 923 } else { 924 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 925 return false; 926 } 927 928 return true; 929 } 930 931 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 932 /// CC. 933 static std::pair<CCAssignFn *, CCAssignFn *> 934 getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { 935 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 936 } 937 938 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 939 bool IsTailCall) { 940 assert(!(IsIndirect && IsTailCall) && "Indirect calls can't be tail calls, " 941 "because the address can be divergent"); 942 return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::G_SI_CALL; 943 } 944 945 // Add operands to call instruction to track the callee. 946 static bool addCallTargetOperands(MachineInstrBuilder &CallInst, 947 MachineIRBuilder &MIRBuilder, 948 AMDGPUCallLowering::CallLoweringInfo &Info) { 949 if (Info.Callee.isReg()) { 950 CallInst.addReg(Info.Callee.getReg()); 951 CallInst.addImm(0); 952 } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { 953 // The call lowering lightly assumed we can directly encode a call target in 954 // the instruction, which is not the case. Materialize the address here. 955 const GlobalValue *GV = Info.Callee.getGlobal(); 956 auto Ptr = MIRBuilder.buildGlobalValue( 957 LLT::pointer(GV->getAddressSpace(), 64), GV); 958 CallInst.addReg(Ptr.getReg(0)); 959 CallInst.add(Info.Callee); 960 } else 961 return false; 962 963 return true; 964 } 965 966 bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( 967 CallLoweringInfo &Info, MachineFunction &MF, 968 SmallVectorImpl<ArgInfo> &InArgs) const { 969 const Function &CallerF = MF.getFunction(); 970 CallingConv::ID CalleeCC = Info.CallConv; 971 CallingConv::ID CallerCC = CallerF.getCallingConv(); 972 973 // If the calling conventions match, then everything must be the same. 974 if (CalleeCC == CallerCC) 975 return true; 976 977 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 978 979 // Make sure that the caller and callee preserve all of the same registers. 980 auto TRI = ST.getRegisterInfo(); 981 982 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 983 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 984 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 985 return false; 986 987 // Check if the caller and callee will handle arguments in the same way. 988 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 989 CCAssignFn *CalleeAssignFnFixed; 990 CCAssignFn *CalleeAssignFnVarArg; 991 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = 992 getAssignFnsForCC(CalleeCC, TLI); 993 994 CCAssignFn *CallerAssignFnFixed; 995 CCAssignFn *CallerAssignFnVarArg; 996 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = 997 getAssignFnsForCC(CallerCC, TLI); 998 999 // FIXME: We are not accounting for potential differences in implicitly passed 1000 // inputs, but only the fixed ABI is supported now anyway. 1001 IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, 1002 CalleeAssignFnVarArg); 1003 IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, 1004 CallerAssignFnVarArg); 1005 return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); 1006 } 1007 1008 bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( 1009 CallLoweringInfo &Info, MachineFunction &MF, 1010 SmallVectorImpl<ArgInfo> &OutArgs) const { 1011 // If there are no outgoing arguments, then we are done. 1012 if (OutArgs.empty()) 1013 return true; 1014 1015 const Function &CallerF = MF.getFunction(); 1016 CallingConv::ID CalleeCC = Info.CallConv; 1017 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1018 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1019 1020 CCAssignFn *AssignFnFixed; 1021 CCAssignFn *AssignFnVarArg; 1022 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1023 1024 // We have outgoing arguments. Make sure that we can tail call with them. 1025 SmallVector<CCValAssign, 16> OutLocs; 1026 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); 1027 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1028 1029 if (!determineAssignments(Assigner, OutArgs, OutInfo)) { 1030 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); 1031 return false; 1032 } 1033 1034 // Make sure that they can fit on the caller's stack. 1035 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1036 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { 1037 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); 1038 return false; 1039 } 1040 1041 // Verify that the parameters in callee-saved registers match. 1042 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1043 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1044 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); 1045 MachineRegisterInfo &MRI = MF.getRegInfo(); 1046 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); 1047 } 1048 1049 /// Return true if the calling convention is one that we can guarantee TCO for. 1050 static bool canGuaranteeTCO(CallingConv::ID CC) { 1051 return CC == CallingConv::Fast; 1052 } 1053 1054 /// Return true if we might ever do TCO for calls with this calling convention. 1055 static bool mayTailCallThisCC(CallingConv::ID CC) { 1056 switch (CC) { 1057 case CallingConv::C: 1058 case CallingConv::AMDGPU_Gfx: 1059 return true; 1060 default: 1061 return canGuaranteeTCO(CC); 1062 } 1063 } 1064 1065 bool AMDGPUCallLowering::isEligibleForTailCallOptimization( 1066 MachineIRBuilder &B, CallLoweringInfo &Info, 1067 SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { 1068 // Must pass all target-independent checks in order to tail call optimize. 1069 if (!Info.IsTailCall) 1070 return false; 1071 1072 // Indirect calls can't be tail calls, because the address can be divergent. 1073 // TODO Check divergence info if the call really is divergent. 1074 if (Info.Callee.isReg()) 1075 return false; 1076 1077 MachineFunction &MF = B.getMF(); 1078 const Function &CallerF = MF.getFunction(); 1079 CallingConv::ID CalleeCC = Info.CallConv; 1080 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1081 1082 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1083 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1084 // Kernels aren't callable, and don't have a live in return address so it 1085 // doesn't make sense to do a tail call with entry functions. 1086 if (!CallerPreserved) 1087 return false; 1088 1089 if (!mayTailCallThisCC(CalleeCC)) { 1090 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); 1091 return false; 1092 } 1093 1094 if (any_of(CallerF.args(), [](const Argument &A) { 1095 return A.hasByValAttr() || A.hasSwiftErrorAttr(); 1096 })) { 1097 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval " 1098 "or swifterror arguments\n"); 1099 return false; 1100 } 1101 1102 // If we have -tailcallopt, then we're done. 1103 if (MF.getTarget().Options.GuaranteedTailCallOpt) 1104 return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); 1105 1106 // Verify that the incoming and outgoing arguments from the callee are 1107 // safe to tail call. 1108 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { 1109 LLVM_DEBUG( 1110 dbgs() 1111 << "... Caller and callee have incompatible calling conventions.\n"); 1112 return false; 1113 } 1114 1115 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) 1116 return false; 1117 1118 LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n"); 1119 return true; 1120 } 1121 1122 // Insert outgoing implicit arguments for a call, by inserting copies to the 1123 // implicit argument registers and adding the necessary implicit uses to the 1124 // call instruction. 1125 void AMDGPUCallLowering::handleImplicitCallArguments( 1126 MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, 1127 const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, 1128 ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { 1129 if (!ST.enableFlatScratch()) { 1130 // Insert copies for the SRD. In the HSA case, this should be an identity 1131 // copy. 1132 auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), 1133 FuncInfo.getScratchRSrcReg()); 1134 MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 1135 CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); 1136 } 1137 1138 for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { 1139 MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); 1140 CallInst.addReg(ArgReg.first, RegState::Implicit); 1141 } 1142 } 1143 1144 bool AMDGPUCallLowering::lowerTailCall( 1145 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 1146 SmallVectorImpl<ArgInfo> &OutArgs) const { 1147 MachineFunction &MF = MIRBuilder.getMF(); 1148 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1149 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1150 const Function &F = MF.getFunction(); 1151 MachineRegisterInfo &MRI = MF.getRegInfo(); 1152 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1153 1154 // True when we're tail calling, but without -tailcallopt. 1155 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; 1156 1157 // Find out which ABI gets to decide where things go. 1158 CallingConv::ID CalleeCC = Info.CallConv; 1159 CCAssignFn *AssignFnFixed; 1160 CCAssignFn *AssignFnVarArg; 1161 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1162 1163 MachineInstrBuilder CallSeqStart; 1164 if (!IsSibCall) 1165 CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); 1166 1167 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); 1168 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1169 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1170 return false; 1171 1172 // Byte offset for the tail call. When we are sibcalling, this will always 1173 // be 0. 1174 MIB.addImm(0); 1175 1176 // Tell the call which registers are clobbered. 1177 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1178 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); 1179 MIB.addRegMask(Mask); 1180 1181 // FPDiff is the byte offset of the call's argument area from the callee's. 1182 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1183 // by this amount for a tail call. In a sibling call it must be 0 because the 1184 // caller will deallocate the entire stack and the callee still expects its 1185 // arguments to begin at SP+0. 1186 int FPDiff = 0; 1187 1188 // This will be 0 for sibcalls, potentially nonzero for tail calls produced 1189 // by -tailcallopt. For sibcalls, the memory operands for the call are 1190 // already available in the caller's incoming argument space. 1191 unsigned NumBytes = 0; 1192 if (!IsSibCall) { 1193 // We aren't sibcalling, so we need to compute FPDiff. We need to do this 1194 // before handling assignments, because FPDiff must be known for memory 1195 // arguments. 1196 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1197 SmallVector<CCValAssign, 16> OutLocs; 1198 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); 1199 1200 // FIXME: Not accounting for callee implicit inputs 1201 OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); 1202 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) 1203 return false; 1204 1205 // The callee will pop the argument stack as a tail call. Thus, we must 1206 // keep it 16-byte aligned. 1207 NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); 1208 1209 // FPDiff will be negative if this tail call requires more space than we 1210 // would automatically have in our incoming argument space. Positive if we 1211 // actually shrink the stack. 1212 FPDiff = NumReusableBytes - NumBytes; 1213 1214 // The stack pointer must be 16-byte aligned at all times it's used for a 1215 // memory operation, which in practice means at *all* times and in 1216 // particular across call boundaries. Therefore our own arguments started at 1217 // a 16-byte aligned SP and the delta applied for the tail call should 1218 // satisfy the same constraint. 1219 assert(isAligned(ST.getStackAlignment(), FPDiff) && 1220 "unaligned stack on tail call"); 1221 } 1222 1223 SmallVector<CCValAssign, 16> ArgLocs; 1224 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1225 1226 // We could pass MIB and directly add the implicit uses to the call 1227 // now. However, as an aesthetic choice, place implicit argument operands 1228 // after the ordinary user argument registers. 1229 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1230 1231 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1232 // With a fixed ABI, allocate fixed registers before user arguments. 1233 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1234 return false; 1235 } 1236 1237 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1238 1239 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1240 return false; 1241 1242 // Do the actual argument marshalling. 1243 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); 1244 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1245 return false; 1246 1247 handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); 1248 1249 // If we have -tailcallopt, we need to adjust the stack. We'll do the call 1250 // sequence start and end here. 1251 if (!IsSibCall) { 1252 MIB->getOperand(1).setImm(FPDiff); 1253 CallSeqStart.addImm(NumBytes).addImm(0); 1254 // End the call sequence *before* emitting the call. Normally, we would 1255 // tidy the frame up after the call. However, here, we've laid out the 1256 // parameters so that when SP is reset, they will be in the correct 1257 // location. 1258 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); 1259 } 1260 1261 // Now we can add the actual call instruction to the correct basic block. 1262 MIRBuilder.insertInstr(MIB); 1263 1264 // If Callee is a reg, since it is used by a target specific 1265 // instruction, it must have a register class matching the 1266 // constraint of that instruction. 1267 1268 // FIXME: We should define regbankselectable call instructions to handle 1269 // divergent call targets. 1270 if (MIB->getOperand(0).isReg()) { 1271 MIB->getOperand(0).setReg(constrainOperandRegClass( 1272 MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, 1273 MIB->getDesc(), MIB->getOperand(0), 0)); 1274 } 1275 1276 MF.getFrameInfo().setHasTailCall(); 1277 Info.LoweredTailCall = true; 1278 return true; 1279 } 1280 1281 bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 1282 CallLoweringInfo &Info) const { 1283 if (Info.IsVarArg) { 1284 LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); 1285 return false; 1286 } 1287 1288 MachineFunction &MF = MIRBuilder.getMF(); 1289 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1290 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1291 1292 const Function &F = MF.getFunction(); 1293 MachineRegisterInfo &MRI = MF.getRegInfo(); 1294 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1295 const DataLayout &DL = F.getParent()->getDataLayout(); 1296 1297 SmallVector<ArgInfo, 8> OutArgs; 1298 for (auto &OrigArg : Info.OrigArgs) 1299 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); 1300 1301 SmallVector<ArgInfo, 8> InArgs; 1302 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) 1303 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); 1304 1305 // If we can lower as a tail call, do that instead. 1306 bool CanTailCallOpt = 1307 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); 1308 1309 // We must emit a tail call if we have musttail. 1310 if (Info.IsMustTailCall && !CanTailCallOpt) { 1311 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 1312 return false; 1313 } 1314 1315 Info.IsTailCall = CanTailCallOpt; 1316 if (CanTailCallOpt) 1317 return lowerTailCall(MIRBuilder, Info, OutArgs); 1318 1319 // Find out which ABI gets to decide where things go. 1320 CCAssignFn *AssignFnFixed; 1321 CCAssignFn *AssignFnVarArg; 1322 std::tie(AssignFnFixed, AssignFnVarArg) = 1323 getAssignFnsForCC(Info.CallConv, TLI); 1324 1325 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) 1326 .addImm(0) 1327 .addImm(0); 1328 1329 // Create a temporarily-floating call instruction so we can add the implicit 1330 // uses of arg registers. 1331 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); 1332 1333 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1334 MIB.addDef(TRI->getReturnAddressReg(MF)); 1335 1336 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1337 return false; 1338 1339 // Tell the call which registers are clobbered. 1340 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 1341 MIB.addRegMask(Mask); 1342 1343 SmallVector<CCValAssign, 16> ArgLocs; 1344 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1345 1346 // We could pass MIB and directly add the implicit uses to the call 1347 // now. However, as an aesthetic choice, place implicit argument operands 1348 // after the ordinary user argument registers. 1349 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1350 1351 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1352 // With a fixed ABI, allocate fixed registers before user arguments. 1353 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1354 return false; 1355 } 1356 1357 // Do the actual argument marshalling. 1358 SmallVector<Register, 8> PhysRegs; 1359 1360 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1361 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1362 return false; 1363 1364 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); 1365 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1366 return false; 1367 1368 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1369 1370 handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); 1371 1372 // Get a count of how many bytes are to be pushed on the stack. 1373 unsigned NumBytes = CCInfo.getNextStackOffset(); 1374 1375 // If Callee is a reg, since it is used by a target specific 1376 // instruction, it must have a register class matching the 1377 // constraint of that instruction. 1378 1379 // FIXME: We should define regbankselectable call instructions to handle 1380 // divergent call targets. 1381 if (MIB->getOperand(1).isReg()) { 1382 MIB->getOperand(1).setReg(constrainOperandRegClass( 1383 MF, *TRI, MRI, *ST.getInstrInfo(), 1384 *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), 1385 1)); 1386 } 1387 1388 // Now we can add the actual call instruction to the correct position. 1389 MIRBuilder.insertInstr(MIB); 1390 1391 // Finally we can copy the returned value back into its virtual-register. In 1392 // symmetry with the arguments, the physical register must be an 1393 // implicit-define of the call instruction. 1394 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 1395 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, 1396 Info.IsVarArg); 1397 IncomingValueAssigner Assigner(RetAssignFn); 1398 CallReturnHandler Handler(MIRBuilder, MRI, MIB); 1399 if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, 1400 Info.CallConv, Info.IsVarArg)) 1401 return false; 1402 } 1403 1404 uint64_t CalleePopBytes = NumBytes; 1405 1406 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) 1407 .addImm(0) 1408 .addImm(CalleePopBytes); 1409 1410 if (!Info.CanLowerReturn) { 1411 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 1412 Info.DemoteRegister, Info.DemoteStackIndex); 1413 } 1414 1415 return true; 1416 } 1417