1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPULegalizerInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/FunctionLoweringInfo.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/IR/IntrinsicsAMDGPU.h" 25 26 #define DEBUG_TYPE "amdgpu-call-lowering" 27 28 using namespace llvm; 29 30 namespace { 31 32 /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. 33 static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, 34 Register ValVReg, CCValAssign &VA) { 35 if (VA.getLocVT().getSizeInBits() < 32) { 36 // 16-bit types are reported as legal for 32-bit registers. We need to 37 // extend and do a 32-bit copy to avoid the verifier complaining about it. 38 return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 39 } 40 41 return Handler.extendRegister(ValVReg, VA); 42 } 43 44 struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 45 AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 46 MachineInstrBuilder MIB) 47 : OutgoingValueHandler(B, MRI), MIB(MIB) {} 48 49 MachineInstrBuilder MIB; 50 51 Register getStackAddress(uint64_t Size, int64_t Offset, 52 MachinePointerInfo &MPO, 53 ISD::ArgFlagsTy Flags) override { 54 llvm_unreachable("not implemented"); 55 } 56 57 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 58 MachinePointerInfo &MPO, CCValAssign &VA) override { 59 llvm_unreachable("not implemented"); 60 } 61 62 void assignValueToReg(Register ValVReg, Register PhysReg, 63 CCValAssign VA) override { 64 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 65 66 // If this is a scalar return, insert a readfirstlane just in case the value 67 // ends up in a VGPR. 68 // FIXME: Assert this is a shader return. 69 const SIRegisterInfo *TRI 70 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 71 if (TRI->isSGPRReg(MRI, PhysReg)) { 72 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 73 {MRI.getType(ExtReg)}, false) 74 .addReg(ExtReg); 75 ExtReg = ToSGPR.getReg(0); 76 } 77 78 MIRBuilder.buildCopy(PhysReg, ExtReg); 79 MIB.addUse(PhysReg, RegState::Implicit); 80 } 81 }; 82 83 struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { 84 uint64_t StackUsed = 0; 85 86 AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 87 : IncomingValueHandler(B, MRI) {} 88 89 Register getStackAddress(uint64_t Size, int64_t Offset, 90 MachinePointerInfo &MPO, 91 ISD::ArgFlagsTy Flags) override { 92 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 93 94 // Byval is assumed to be writable memory, but other stack passed arguments 95 // are not. 96 const bool IsImmutable = !Flags.isByVal(); 97 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 98 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 99 auto AddrReg = MIRBuilder.buildFrameIndex( 100 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 101 StackUsed = std::max(StackUsed, Size + Offset); 102 return AddrReg.getReg(0); 103 } 104 105 void assignValueToReg(Register ValVReg, Register PhysReg, 106 CCValAssign VA) override { 107 markPhysRegUsed(PhysReg); 108 109 if (VA.getLocVT().getSizeInBits() < 32) { 110 // 16-bit types are reported as legal for 32-bit registers. We need to do 111 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 112 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 113 114 // If we have signext/zeroext, it applies to the whole 32-bit register 115 // before truncation. 116 auto Extended = 117 buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); 118 MIRBuilder.buildTrunc(ValVReg, Extended); 119 return; 120 } 121 122 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 123 } 124 125 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 126 MachinePointerInfo &MPO, CCValAssign &VA) override { 127 MachineFunction &MF = MIRBuilder.getMF(); 128 129 auto MMO = MF.getMachineMemOperand( 130 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, 131 inferAlignFromPtrInfo(MF, MPO)); 132 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 133 } 134 135 /// How the physical register gets marked varies between formal 136 /// parameters (it's a basic-block live-in), and a call instruction 137 /// (it's an implicit-def of the BL). 138 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 139 }; 140 141 struct FormalArgHandler : public AMDGPUIncomingArgHandler { 142 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI) 143 : AMDGPUIncomingArgHandler(B, MRI) {} 144 145 void markPhysRegUsed(unsigned PhysReg) override { 146 MIRBuilder.getMBB().addLiveIn(PhysReg); 147 } 148 }; 149 150 struct CallReturnHandler : public AMDGPUIncomingArgHandler { 151 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 152 MachineInstrBuilder MIB) 153 : AMDGPUIncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {} 154 155 void markPhysRegUsed(unsigned PhysReg) override { 156 MIB.addDef(PhysReg, RegState::Implicit); 157 } 158 159 MachineInstrBuilder MIB; 160 }; 161 162 struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { 163 /// For tail calls, the byte offset of the call's argument area from the 164 /// callee's. Unused elsewhere. 165 int FPDiff; 166 167 // Cache the SP register vreg if we need it more than once in this call site. 168 Register SPReg; 169 170 bool IsTailCall; 171 172 AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, 173 MachineRegisterInfo &MRI, MachineInstrBuilder MIB, 174 bool IsTailCall = false, int FPDiff = 0) 175 : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB), FPDiff(FPDiff), 176 IsTailCall(IsTailCall) {} 177 178 Register getStackAddress(uint64_t Size, int64_t Offset, 179 MachinePointerInfo &MPO, 180 ISD::ArgFlagsTy Flags) override { 181 MachineFunction &MF = MIRBuilder.getMF(); 182 const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); 183 const LLT S32 = LLT::scalar(32); 184 185 if (IsTailCall) { 186 Offset += FPDiff; 187 int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true); 188 auto FIReg = MIRBuilder.buildFrameIndex(PtrTy, FI); 189 MPO = MachinePointerInfo::getFixedStack(MF, FI); 190 return FIReg.getReg(0); 191 } 192 193 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 194 195 if (!SPReg) { 196 const GCNSubtarget &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); 197 if (ST.enableFlatScratch()) { 198 // The stack is accessed unswizzled, so we can use a regular copy. 199 SPReg = MIRBuilder.buildCopy(PtrTy, 200 MFI->getStackPtrOffsetReg()).getReg(0); 201 } else { 202 // The address we produce here, without knowing the use context, is going 203 // to be interpreted as a vector address, so we need to convert to a 204 // swizzled address. 205 SPReg = MIRBuilder.buildInstr(AMDGPU::G_AMDGPU_WAVE_ADDRESS, {PtrTy}, 206 {MFI->getStackPtrOffsetReg()}).getReg(0); 207 } 208 } 209 210 auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); 211 212 auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); 213 MPO = MachinePointerInfo::getStack(MF, Offset); 214 return AddrReg.getReg(0); 215 } 216 217 void assignValueToReg(Register ValVReg, Register PhysReg, 218 CCValAssign VA) override { 219 MIB.addUse(PhysReg, RegState::Implicit); 220 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 221 MIRBuilder.buildCopy(PhysReg, ExtReg); 222 } 223 224 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 225 MachinePointerInfo &MPO, CCValAssign &VA) override { 226 MachineFunction &MF = MIRBuilder.getMF(); 227 uint64_t LocMemOffset = VA.getLocMemOffset(); 228 const auto &ST = MF.getSubtarget<GCNSubtarget>(); 229 230 auto MMO = MF.getMachineMemOperand( 231 MPO, MachineMemOperand::MOStore, MemTy, 232 commonAlignment(ST.getStackAlignment(), LocMemOffset)); 233 MIRBuilder.buildStore(ValVReg, Addr, *MMO); 234 } 235 236 void assignValueToAddress(const CallLowering::ArgInfo &Arg, 237 unsigned ValRegIndex, Register Addr, LLT MemTy, 238 MachinePointerInfo &MPO, CCValAssign &VA) override { 239 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 240 ? extendRegister(Arg.Regs[ValRegIndex], VA) 241 : Arg.Regs[ValRegIndex]; 242 assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA); 243 } 244 }; 245 } 246 247 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 248 : CallLowering(&TLI) { 249 } 250 251 // FIXME: Compatibility shim 252 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 253 switch (MIOpc) { 254 case TargetOpcode::G_SEXT: 255 return ISD::SIGN_EXTEND; 256 case TargetOpcode::G_ZEXT: 257 return ISD::ZERO_EXTEND; 258 case TargetOpcode::G_ANYEXT: 259 return ISD::ANY_EXTEND; 260 default: 261 llvm_unreachable("not an extend opcode"); 262 } 263 } 264 265 bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, 266 CallingConv::ID CallConv, 267 SmallVectorImpl<BaseArgInfo> &Outs, 268 bool IsVarArg) const { 269 // For shaders. Vector types should be explicitly handled by CC. 270 if (AMDGPU::isEntryFunctionCC(CallConv)) 271 return true; 272 273 SmallVector<CCValAssign, 16> ArgLocs; 274 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 275 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 276 MF.getFunction().getContext()); 277 278 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); 279 } 280 281 /// Lower the return value for the already existing \p Ret. This assumes that 282 /// \p B's insertion point is correct. 283 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 284 const Value *Val, ArrayRef<Register> VRegs, 285 MachineInstrBuilder &Ret) const { 286 if (!Val) 287 return true; 288 289 auto &MF = B.getMF(); 290 const auto &F = MF.getFunction(); 291 const DataLayout &DL = MF.getDataLayout(); 292 MachineRegisterInfo *MRI = B.getMRI(); 293 LLVMContext &Ctx = F.getContext(); 294 295 CallingConv::ID CC = F.getCallingConv(); 296 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 297 298 SmallVector<EVT, 8> SplitEVTs; 299 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 300 assert(VRegs.size() == SplitEVTs.size() && 301 "For each split Type there should be exactly one VReg."); 302 303 SmallVector<ArgInfo, 8> SplitRetInfos; 304 305 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 306 EVT VT = SplitEVTs[i]; 307 Register Reg = VRegs[i]; 308 ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx), 0); 309 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 310 311 if (VT.isScalarInteger()) { 312 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 313 if (RetInfo.Flags[0].isSExt()) { 314 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 315 ExtendOp = TargetOpcode::G_SEXT; 316 } else if (RetInfo.Flags[0].isZExt()) { 317 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 318 ExtendOp = TargetOpcode::G_ZEXT; 319 } 320 321 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 322 extOpcodeToISDExtOpcode(ExtendOp)); 323 if (ExtVT != VT) { 324 RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); 325 LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); 326 Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); 327 } 328 } 329 330 if (Reg != RetInfo.Regs[0]) { 331 RetInfo.Regs[0] = Reg; 332 // Reset the arg flags after modifying Reg. 333 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 334 } 335 336 splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); 337 } 338 339 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 340 341 OutgoingValueAssigner Assigner(AssignFn); 342 AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret); 343 return determineAndHandleAssignments(RetHandler, Assigner, SplitRetInfos, B, 344 CC, F.isVarArg()); 345 } 346 347 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, 348 ArrayRef<Register> VRegs, 349 FunctionLoweringInfo &FLI) const { 350 351 MachineFunction &MF = B.getMF(); 352 MachineRegisterInfo &MRI = MF.getRegInfo(); 353 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 354 MFI->setIfReturnsVoid(!Val); 355 356 assert(!Val == VRegs.empty() && "Return value without a vreg"); 357 358 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 359 const bool IsShader = AMDGPU::isShader(CC); 360 const bool IsWaveEnd = 361 (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); 362 if (IsWaveEnd) { 363 B.buildInstr(AMDGPU::S_ENDPGM) 364 .addImm(0); 365 return true; 366 } 367 368 auto const &ST = MF.getSubtarget<GCNSubtarget>(); 369 370 unsigned ReturnOpc = 0; 371 if (IsShader) 372 ReturnOpc = AMDGPU::SI_RETURN_TO_EPILOG; 373 else if (CC == CallingConv::AMDGPU_Gfx) 374 ReturnOpc = AMDGPU::S_SETPC_B64_return_gfx; 375 else 376 ReturnOpc = AMDGPU::S_SETPC_B64_return; 377 378 auto Ret = B.buildInstrNoInsert(ReturnOpc); 379 Register ReturnAddrVReg; 380 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 381 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 382 Ret.addUse(ReturnAddrVReg); 383 } else if (ReturnOpc == AMDGPU::S_SETPC_B64_return_gfx) { 384 ReturnAddrVReg = 385 MRI.createVirtualRegister(&AMDGPU::Gfx_CCR_SGPR_64RegClass); 386 Ret.addUse(ReturnAddrVReg); 387 } 388 389 if (!FLI.CanLowerReturn) 390 insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); 391 else if (!lowerReturnVal(B, Val, VRegs, Ret)) 392 return false; 393 394 if (ReturnOpc == AMDGPU::S_SETPC_B64_return || 395 ReturnOpc == AMDGPU::S_SETPC_B64_return_gfx) { 396 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 397 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 398 &AMDGPU::SGPR_64RegClass); 399 B.buildCopy(ReturnAddrVReg, LiveInReturn); 400 } 401 402 // TODO: Handle CalleeSavedRegsViaCopy. 403 404 B.insertInstr(Ret); 405 return true; 406 } 407 408 void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, 409 uint64_t Offset) const { 410 MachineFunction &MF = B.getMF(); 411 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 412 MachineRegisterInfo &MRI = MF.getRegInfo(); 413 Register KernArgSegmentPtr = 414 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 415 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 416 417 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 418 419 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 420 } 421 422 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, ArgInfo &OrigArg, 423 uint64_t Offset, 424 Align Alignment) const { 425 MachineFunction &MF = B.getMF(); 426 const Function &F = MF.getFunction(); 427 const DataLayout &DL = F.getParent()->getDataLayout(); 428 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 429 430 LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 431 432 SmallVector<ArgInfo, 32> SplitArgs; 433 SmallVector<uint64_t> FieldOffsets; 434 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv(), &FieldOffsets); 435 436 unsigned Idx = 0; 437 for (ArgInfo &SplitArg : SplitArgs) { 438 Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); 439 lowerParameterPtr(PtrReg, B, Offset + FieldOffsets[Idx]); 440 441 LLT ArgTy = getLLTForType(*SplitArg.Ty, DL); 442 if (SplitArg.Flags[0].isPointer()) { 443 // Compensate for losing pointeriness in splitValueTypes. 444 LLT PtrTy = LLT::pointer(SplitArg.Flags[0].getPointerAddrSpace(), 445 ArgTy.getScalarSizeInBits()); 446 ArgTy = ArgTy.isVector() ? LLT::vector(ArgTy.getElementCount(), PtrTy) 447 : PtrTy; 448 } 449 450 MachineMemOperand *MMO = MF.getMachineMemOperand( 451 PtrInfo, 452 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 453 MachineMemOperand::MOInvariant, 454 ArgTy, commonAlignment(Alignment, FieldOffsets[Idx])); 455 456 assert(SplitArg.Regs.size() == 1); 457 458 B.buildLoad(SplitArg.Regs[0], PtrReg, *MMO); 459 ++Idx; 460 } 461 } 462 463 // Allocate special inputs passed in user SGPRs. 464 static void allocateHSAUserSGPRs(CCState &CCInfo, 465 MachineIRBuilder &B, 466 MachineFunction &MF, 467 const SIRegisterInfo &TRI, 468 SIMachineFunctionInfo &Info) { 469 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 470 if (Info.hasPrivateSegmentBuffer()) { 471 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 472 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 473 CCInfo.AllocateReg(PrivateSegmentBufferReg); 474 } 475 476 if (Info.hasDispatchPtr()) { 477 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 478 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 479 CCInfo.AllocateReg(DispatchPtrReg); 480 } 481 482 if (Info.hasQueuePtr()) { 483 Register QueuePtrReg = Info.addQueuePtr(TRI); 484 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 485 CCInfo.AllocateReg(QueuePtrReg); 486 } 487 488 if (Info.hasKernargSegmentPtr()) { 489 MachineRegisterInfo &MRI = MF.getRegInfo(); 490 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 491 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 492 Register VReg = MRI.createGenericVirtualRegister(P4); 493 MRI.addLiveIn(InputPtrReg, VReg); 494 B.getMBB().addLiveIn(InputPtrReg); 495 B.buildCopy(VReg, InputPtrReg); 496 CCInfo.AllocateReg(InputPtrReg); 497 } 498 499 if (Info.hasDispatchID()) { 500 Register DispatchIDReg = Info.addDispatchID(TRI); 501 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 502 CCInfo.AllocateReg(DispatchIDReg); 503 } 504 505 if (Info.hasFlatScratchInit()) { 506 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 507 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 508 CCInfo.AllocateReg(FlatScratchInitReg); 509 } 510 511 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 512 // these from the dispatch pointer. 513 } 514 515 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 516 MachineIRBuilder &B, const Function &F, 517 ArrayRef<ArrayRef<Register>> VRegs) const { 518 MachineFunction &MF = B.getMF(); 519 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 520 MachineRegisterInfo &MRI = MF.getRegInfo(); 521 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 522 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 523 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 524 const DataLayout &DL = F.getParent()->getDataLayout(); 525 526 Info->allocateModuleLDSGlobal(F.getParent()); 527 528 SmallVector<CCValAssign, 16> ArgLocs; 529 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 530 531 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 532 533 unsigned i = 0; 534 const Align KernArgBaseAlign(16); 535 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 536 uint64_t ExplicitArgOffset = 0; 537 538 // TODO: Align down to dword alignment and extract bits for extending loads. 539 for (auto &Arg : F.args()) { 540 const bool IsByRef = Arg.hasByRefAttr(); 541 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 542 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 543 if (AllocSize == 0) 544 continue; 545 546 MaybeAlign ABIAlign = IsByRef ? Arg.getParamAlign() : None; 547 if (!ABIAlign) 548 ABIAlign = DL.getABITypeAlign(ArgTy); 549 550 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 551 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 552 553 if (Arg.use_empty()) { 554 ++i; 555 continue; 556 } 557 558 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 559 560 if (IsByRef) { 561 unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); 562 563 assert(VRegs[i].size() == 1 && 564 "expected only one register for byval pointers"); 565 if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { 566 lowerParameterPtr(VRegs[i][0], B, ArgOffset); 567 } else { 568 const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 569 Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); 570 lowerParameterPtr(PtrReg, B, ArgOffset); 571 572 B.buildAddrSpaceCast(VRegs[i][0], PtrReg); 573 } 574 } else { 575 ArgInfo OrigArg(VRegs[i], Arg, i); 576 const unsigned OrigArgIdx = i + AttributeList::FirstArgIndex; 577 setArgFlags(OrigArg, OrigArgIdx, DL, F); 578 lowerParameter(B, OrigArg, ArgOffset, Alignment); 579 } 580 581 ++i; 582 } 583 584 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 585 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 586 return true; 587 } 588 589 bool AMDGPUCallLowering::lowerFormalArguments( 590 MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, 591 FunctionLoweringInfo &FLI) const { 592 CallingConv::ID CC = F.getCallingConv(); 593 594 // The infrastructure for normal calling convention lowering is essentially 595 // useless for kernels. We want to avoid any kind of legalization or argument 596 // splitting. 597 if (CC == CallingConv::AMDGPU_KERNEL) 598 return lowerFormalArgumentsKernel(B, F, VRegs); 599 600 const bool IsGraphics = AMDGPU::isGraphics(CC); 601 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 602 603 MachineFunction &MF = B.getMF(); 604 MachineBasicBlock &MBB = B.getMBB(); 605 MachineRegisterInfo &MRI = MF.getRegInfo(); 606 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 607 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 608 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 609 const DataLayout &DL = F.getParent()->getDataLayout(); 610 611 Info->allocateModuleLDSGlobal(F.getParent()); 612 613 SmallVector<CCValAssign, 16> ArgLocs; 614 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 615 616 if (!IsEntryFunc) { 617 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 618 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 619 &AMDGPU::SGPR_64RegClass); 620 MBB.addLiveIn(ReturnAddrReg); 621 B.buildCopy(LiveInReturn, ReturnAddrReg); 622 } 623 624 if (Info->hasImplicitBufferPtr()) { 625 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 626 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 627 CCInfo.AllocateReg(ImplicitBufferPtrReg); 628 } 629 630 SmallVector<ArgInfo, 32> SplitArgs; 631 unsigned Idx = 0; 632 unsigned PSInputNum = 0; 633 634 // Insert the hidden sret parameter if the return value won't fit in the 635 // return registers. 636 if (!FLI.CanLowerReturn) 637 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); 638 639 for (auto &Arg : F.args()) { 640 if (DL.getTypeStoreSize(Arg.getType()) == 0) 641 continue; 642 643 const bool InReg = Arg.hasAttribute(Attribute::InReg); 644 645 // SGPR arguments to functions not implemented. 646 if (!IsGraphics && InReg) 647 return false; 648 649 if (Arg.hasAttribute(Attribute::SwiftSelf) || 650 Arg.hasAttribute(Attribute::SwiftError) || 651 Arg.hasAttribute(Attribute::Nest)) 652 return false; 653 654 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 655 const bool ArgUsed = !Arg.use_empty(); 656 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 657 658 if (!SkipArg) { 659 Info->markPSInputAllocated(PSInputNum); 660 if (ArgUsed) 661 Info->markPSInputEnabled(PSInputNum); 662 } 663 664 ++PSInputNum; 665 666 if (SkipArg) { 667 for (Register R : VRegs[Idx]) 668 B.buildUndef(R); 669 670 ++Idx; 671 continue; 672 } 673 } 674 675 ArgInfo OrigArg(VRegs[Idx], Arg, Idx); 676 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 677 setArgFlags(OrigArg, OrigArgIdx, DL, F); 678 679 splitToValueTypes(OrigArg, SplitArgs, DL, CC); 680 ++Idx; 681 } 682 683 // At least one interpolation mode must be enabled or else the GPU will 684 // hang. 685 // 686 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 687 // set PSInputAddr, the user wants to enable some bits after the compilation 688 // based on run-time states. Since we can't know what the final PSInputEna 689 // will look like, so we shouldn't do anything here and the user should take 690 // responsibility for the correct programming. 691 // 692 // Otherwise, the following restrictions apply: 693 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 694 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 695 // enabled too. 696 if (CC == CallingConv::AMDGPU_PS) { 697 if ((Info->getPSInputAddr() & 0x7F) == 0 || 698 ((Info->getPSInputAddr() & 0xF) == 0 && 699 Info->isPSInputAllocated(11))) { 700 CCInfo.AllocateReg(AMDGPU::VGPR0); 701 CCInfo.AllocateReg(AMDGPU::VGPR1); 702 Info->markPSInputAllocated(0); 703 Info->markPSInputEnabled(0); 704 } 705 706 if (Subtarget.isAmdPalOS()) { 707 // For isAmdPalOS, the user does not enable some bits after compilation 708 // based on run-time states; the register values being generated here are 709 // the final ones set in hardware. Therefore we need to apply the 710 // workaround to PSInputAddr and PSInputEnable together. (The case where 711 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 712 // set up an input arg for a particular interpolation mode, but nothing 713 // uses that input arg. Really we should have an earlier pass that removes 714 // such an arg.) 715 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 716 if ((PsInputBits & 0x7F) == 0 || 717 ((PsInputBits & 0xF) == 0 && 718 (PsInputBits >> 11 & 1))) 719 Info->markPSInputEnabled( 720 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 721 } 722 } 723 724 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 725 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 726 727 if (!MBB.empty()) 728 B.setInstr(*MBB.begin()); 729 730 if (!IsEntryFunc && !IsGraphics) { 731 // For the fixed ABI, pass workitem IDs in the last argument register. 732 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 733 } 734 735 IncomingValueAssigner Assigner(AssignFn); 736 if (!determineAssignments(Assigner, SplitArgs, CCInfo)) 737 return false; 738 739 FormalArgHandler Handler(B, MRI); 740 if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) 741 return false; 742 743 uint64_t StackOffset = Assigner.StackOffset; 744 745 // Start adding system SGPRs. 746 if (IsEntryFunc) { 747 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); 748 } else { 749 if (!Subtarget.enableFlatScratch()) 750 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 751 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 752 } 753 754 // When we tail call, we need to check if the callee's arguments will fit on 755 // the caller's stack. So, whenever we lower formal arguments, we should keep 756 // track of this information, since we might lower a tail call in this 757 // function later. 758 Info->setBytesInStackArgArea(StackOffset); 759 760 // Move back to the end of the basic block. 761 B.setMBB(MBB); 762 763 return true; 764 } 765 766 bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, 767 CCState &CCInfo, 768 SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, 769 CallLoweringInfo &Info) const { 770 MachineFunction &MF = MIRBuilder.getMF(); 771 772 // If there's no call site, this doesn't correspond to a call from the IR and 773 // doesn't need implicit inputs. 774 if (!Info.CB) 775 return true; 776 777 const AMDGPUFunctionArgInfo *CalleeArgInfo 778 = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 779 780 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 781 const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); 782 783 784 // TODO: Unify with private memory register handling. This is complicated by 785 // the fact that at least in kernels, the input argument is not necessarily 786 // in the same location as the input. 787 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 788 AMDGPUFunctionArgInfo::DISPATCH_PTR, 789 AMDGPUFunctionArgInfo::QUEUE_PTR, 790 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, 791 AMDGPUFunctionArgInfo::DISPATCH_ID, 792 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 793 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 794 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z 795 }; 796 797 static constexpr StringLiteral ImplicitAttrNames[] = { 798 "amdgpu-no-dispatch-ptr", 799 "amdgpu-no-queue-ptr", 800 "amdgpu-no-implicitarg-ptr", 801 "amdgpu-no-dispatch-id", 802 "amdgpu-no-workgroup-id-x", 803 "amdgpu-no-workgroup-id-y", 804 "amdgpu-no-workgroup-id-z" 805 }; 806 807 MachineRegisterInfo &MRI = MF.getRegInfo(); 808 809 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 810 const AMDGPULegalizerInfo *LI 811 = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); 812 813 unsigned I = 0; 814 for (auto InputID : InputRegs) { 815 const ArgDescriptor *OutgoingArg; 816 const TargetRegisterClass *ArgRC; 817 LLT ArgTy; 818 819 // If the callee does not use the attribute value, skip copying the value. 820 if (Info.CB->hasFnAttr(ImplicitAttrNames[I++])) 821 continue; 822 823 std::tie(OutgoingArg, ArgRC, ArgTy) = 824 CalleeArgInfo->getPreloadedValue(InputID); 825 if (!OutgoingArg) 826 continue; 827 828 const ArgDescriptor *IncomingArg; 829 const TargetRegisterClass *IncomingArgRC; 830 std::tie(IncomingArg, IncomingArgRC, ArgTy) = 831 CallerArgInfo.getPreloadedValue(InputID); 832 assert(IncomingArgRC == ArgRC); 833 834 Register InputReg = MRI.createGenericVirtualRegister(ArgTy); 835 836 if (IncomingArg) { 837 LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); 838 } else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) { 839 LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); 840 } else { 841 // We may have proven the input wasn't needed, although the ABI is 842 // requiring it. We just need to allocate the register appropriately. 843 MIRBuilder.buildUndef(InputReg); 844 } 845 846 if (OutgoingArg->isRegister()) { 847 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 848 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 849 report_fatal_error("failed to allocate implicit input argument"); 850 } else { 851 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 852 return false; 853 } 854 } 855 856 // Pack workitem IDs into a single register or pass it as is if already 857 // packed. 858 const ArgDescriptor *OutgoingArg; 859 const TargetRegisterClass *ArgRC; 860 LLT ArgTy; 861 862 std::tie(OutgoingArg, ArgRC, ArgTy) = 863 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 864 if (!OutgoingArg) 865 std::tie(OutgoingArg, ArgRC, ArgTy) = 866 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 867 if (!OutgoingArg) 868 std::tie(OutgoingArg, ArgRC, ArgTy) = 869 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 870 if (!OutgoingArg) 871 return false; 872 873 auto WorkitemIDX = 874 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 875 auto WorkitemIDY = 876 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 877 auto WorkitemIDZ = 878 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 879 880 const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); 881 const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); 882 const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); 883 const LLT S32 = LLT::scalar(32); 884 885 const bool NeedWorkItemIDX = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-x"); 886 const bool NeedWorkItemIDY = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-y"); 887 const bool NeedWorkItemIDZ = !Info.CB->hasFnAttr("amdgpu-no-workitem-id-z"); 888 889 // If incoming ids are not packed we need to pack them. 890 // FIXME: Should consider known workgroup size to eliminate known 0 cases. 891 Register InputReg; 892 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX && 893 NeedWorkItemIDX) { 894 if (ST.getMaxWorkitemID(MF.getFunction(), 0) != 0) { 895 InputReg = MRI.createGenericVirtualRegister(S32); 896 LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, 897 std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); 898 } else { 899 InputReg = MIRBuilder.buildConstant(S32, 0).getReg(0); 900 } 901 } 902 903 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY && 904 NeedWorkItemIDY && ST.getMaxWorkitemID(MF.getFunction(), 1) != 0) { 905 Register Y = MRI.createGenericVirtualRegister(S32); 906 LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), 907 std::get<2>(WorkitemIDY)); 908 909 Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); 910 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; 911 } 912 913 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ && 914 NeedWorkItemIDZ && ST.getMaxWorkitemID(MF.getFunction(), 2) != 0) { 915 Register Z = MRI.createGenericVirtualRegister(S32); 916 LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), 917 std::get<2>(WorkitemIDZ)); 918 919 Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); 920 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; 921 } 922 923 if (!InputReg && 924 (NeedWorkItemIDX || NeedWorkItemIDY || NeedWorkItemIDZ)) { 925 InputReg = MRI.createGenericVirtualRegister(S32); 926 if (!IncomingArgX && !IncomingArgY && !IncomingArgZ) { 927 // We're in a situation where the outgoing function requires the workitem 928 // ID, but the calling function does not have it (e.g a graphics function 929 // calling a C calling convention function). This is illegal, but we need 930 // to produce something. 931 MIRBuilder.buildUndef(InputReg); 932 } else { 933 // Workitem ids are already packed, any of present incoming arguments will 934 // carry all required fields. 935 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 936 IncomingArgX ? *IncomingArgX : 937 IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); 938 LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, 939 &AMDGPU::VGPR_32RegClass, S32); 940 } 941 } 942 943 if (OutgoingArg->isRegister()) { 944 if (InputReg) 945 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 946 947 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 948 report_fatal_error("failed to allocate implicit input argument"); 949 } else { 950 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 951 return false; 952 } 953 954 return true; 955 } 956 957 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 958 /// CC. 959 static std::pair<CCAssignFn *, CCAssignFn *> 960 getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { 961 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 962 } 963 964 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 965 bool IsTailCall) { 966 assert(!(IsIndirect && IsTailCall) && "Indirect calls can't be tail calls, " 967 "because the address can be divergent"); 968 return IsTailCall ? AMDGPU::SI_TCRETURN : AMDGPU::G_SI_CALL; 969 } 970 971 // Add operands to call instruction to track the callee. 972 static bool addCallTargetOperands(MachineInstrBuilder &CallInst, 973 MachineIRBuilder &MIRBuilder, 974 AMDGPUCallLowering::CallLoweringInfo &Info) { 975 if (Info.Callee.isReg()) { 976 CallInst.addReg(Info.Callee.getReg()); 977 CallInst.addImm(0); 978 } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { 979 // The call lowering lightly assumed we can directly encode a call target in 980 // the instruction, which is not the case. Materialize the address here. 981 const GlobalValue *GV = Info.Callee.getGlobal(); 982 auto Ptr = MIRBuilder.buildGlobalValue( 983 LLT::pointer(GV->getAddressSpace(), 64), GV); 984 CallInst.addReg(Ptr.getReg(0)); 985 CallInst.add(Info.Callee); 986 } else 987 return false; 988 989 return true; 990 } 991 992 bool AMDGPUCallLowering::doCallerAndCalleePassArgsTheSameWay( 993 CallLoweringInfo &Info, MachineFunction &MF, 994 SmallVectorImpl<ArgInfo> &InArgs) const { 995 const Function &CallerF = MF.getFunction(); 996 CallingConv::ID CalleeCC = Info.CallConv; 997 CallingConv::ID CallerCC = CallerF.getCallingConv(); 998 999 // If the calling conventions match, then everything must be the same. 1000 if (CalleeCC == CallerCC) 1001 return true; 1002 1003 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1004 1005 // Make sure that the caller and callee preserve all of the same registers. 1006 auto TRI = ST.getRegisterInfo(); 1007 1008 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1009 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 1010 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 1011 return false; 1012 1013 // Check if the caller and callee will handle arguments in the same way. 1014 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1015 CCAssignFn *CalleeAssignFnFixed; 1016 CCAssignFn *CalleeAssignFnVarArg; 1017 std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) = 1018 getAssignFnsForCC(CalleeCC, TLI); 1019 1020 CCAssignFn *CallerAssignFnFixed; 1021 CCAssignFn *CallerAssignFnVarArg; 1022 std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) = 1023 getAssignFnsForCC(CallerCC, TLI); 1024 1025 // FIXME: We are not accounting for potential differences in implicitly passed 1026 // inputs, but only the fixed ABI is supported now anyway. 1027 IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed, 1028 CalleeAssignFnVarArg); 1029 IncomingValueAssigner CallerAssigner(CallerAssignFnFixed, 1030 CallerAssignFnVarArg); 1031 return resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner); 1032 } 1033 1034 bool AMDGPUCallLowering::areCalleeOutgoingArgsTailCallable( 1035 CallLoweringInfo &Info, MachineFunction &MF, 1036 SmallVectorImpl<ArgInfo> &OutArgs) const { 1037 // If there are no outgoing arguments, then we are done. 1038 if (OutArgs.empty()) 1039 return true; 1040 1041 const Function &CallerF = MF.getFunction(); 1042 CallingConv::ID CalleeCC = Info.CallConv; 1043 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1044 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1045 1046 CCAssignFn *AssignFnFixed; 1047 CCAssignFn *AssignFnVarArg; 1048 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1049 1050 // We have outgoing arguments. Make sure that we can tail call with them. 1051 SmallVector<CCValAssign, 16> OutLocs; 1052 CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext()); 1053 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1054 1055 if (!determineAssignments(Assigner, OutArgs, OutInfo)) { 1056 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n"); 1057 return false; 1058 } 1059 1060 // Make sure that they can fit on the caller's stack. 1061 const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1062 if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { 1063 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); 1064 return false; 1065 } 1066 1067 // Verify that the parameters in callee-saved registers match. 1068 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1069 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1070 const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC); 1071 MachineRegisterInfo &MRI = MF.getRegInfo(); 1072 return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs); 1073 } 1074 1075 /// Return true if the calling convention is one that we can guarantee TCO for. 1076 static bool canGuaranteeTCO(CallingConv::ID CC) { 1077 return CC == CallingConv::Fast; 1078 } 1079 1080 /// Return true if we might ever do TCO for calls with this calling convention. 1081 static bool mayTailCallThisCC(CallingConv::ID CC) { 1082 switch (CC) { 1083 case CallingConv::C: 1084 case CallingConv::AMDGPU_Gfx: 1085 return true; 1086 default: 1087 return canGuaranteeTCO(CC); 1088 } 1089 } 1090 1091 bool AMDGPUCallLowering::isEligibleForTailCallOptimization( 1092 MachineIRBuilder &B, CallLoweringInfo &Info, 1093 SmallVectorImpl<ArgInfo> &InArgs, SmallVectorImpl<ArgInfo> &OutArgs) const { 1094 // Must pass all target-independent checks in order to tail call optimize. 1095 if (!Info.IsTailCall) 1096 return false; 1097 1098 // Indirect calls can't be tail calls, because the address can be divergent. 1099 // TODO Check divergence info if the call really is divergent. 1100 if (Info.Callee.isReg()) 1101 return false; 1102 1103 MachineFunction &MF = B.getMF(); 1104 const Function &CallerF = MF.getFunction(); 1105 CallingConv::ID CalleeCC = Info.CallConv; 1106 CallingConv::ID CallerCC = CallerF.getCallingConv(); 1107 1108 const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo(); 1109 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 1110 // Kernels aren't callable, and don't have a live in return address so it 1111 // doesn't make sense to do a tail call with entry functions. 1112 if (!CallerPreserved) 1113 return false; 1114 1115 if (!mayTailCallThisCC(CalleeCC)) { 1116 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n"); 1117 return false; 1118 } 1119 1120 if (any_of(CallerF.args(), [](const Argument &A) { 1121 return A.hasByValAttr() || A.hasSwiftErrorAttr(); 1122 })) { 1123 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval " 1124 "or swifterror arguments\n"); 1125 return false; 1126 } 1127 1128 // If we have -tailcallopt, then we're done. 1129 if (MF.getTarget().Options.GuaranteedTailCallOpt) 1130 return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv(); 1131 1132 // Verify that the incoming and outgoing arguments from the callee are 1133 // safe to tail call. 1134 if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) { 1135 LLVM_DEBUG( 1136 dbgs() 1137 << "... Caller and callee have incompatible calling conventions.\n"); 1138 return false; 1139 } 1140 1141 if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs)) 1142 return false; 1143 1144 LLVM_DEBUG(dbgs() << "... Call is eligible for tail call optimization.\n"); 1145 return true; 1146 } 1147 1148 // Insert outgoing implicit arguments for a call, by inserting copies to the 1149 // implicit argument registers and adding the necessary implicit uses to the 1150 // call instruction. 1151 void AMDGPUCallLowering::handleImplicitCallArguments( 1152 MachineIRBuilder &MIRBuilder, MachineInstrBuilder &CallInst, 1153 const GCNSubtarget &ST, const SIMachineFunctionInfo &FuncInfo, 1154 ArrayRef<std::pair<MCRegister, Register>> ImplicitArgRegs) const { 1155 if (!ST.enableFlatScratch()) { 1156 // Insert copies for the SRD. In the HSA case, this should be an identity 1157 // copy. 1158 auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::fixed_vector(4, 32), 1159 FuncInfo.getScratchRSrcReg()); 1160 MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 1161 CallInst.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); 1162 } 1163 1164 for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { 1165 MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); 1166 CallInst.addReg(ArgReg.first, RegState::Implicit); 1167 } 1168 } 1169 1170 bool AMDGPUCallLowering::lowerTailCall( 1171 MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info, 1172 SmallVectorImpl<ArgInfo> &OutArgs) const { 1173 MachineFunction &MF = MIRBuilder.getMF(); 1174 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1175 SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); 1176 const Function &F = MF.getFunction(); 1177 MachineRegisterInfo &MRI = MF.getRegInfo(); 1178 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1179 1180 // True when we're tail calling, but without -tailcallopt. 1181 bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt; 1182 1183 // Find out which ABI gets to decide where things go. 1184 CallingConv::ID CalleeCC = Info.CallConv; 1185 CCAssignFn *AssignFnFixed; 1186 CCAssignFn *AssignFnVarArg; 1187 std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI); 1188 1189 MachineInstrBuilder CallSeqStart; 1190 if (!IsSibCall) 1191 CallSeqStart = MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP); 1192 1193 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true); 1194 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1195 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1196 return false; 1197 1198 // Byte offset for the tail call. When we are sibcalling, this will always 1199 // be 0. 1200 MIB.addImm(0); 1201 1202 // Tell the call which registers are clobbered. 1203 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1204 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC); 1205 MIB.addRegMask(Mask); 1206 1207 // FPDiff is the byte offset of the call's argument area from the callee's. 1208 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1209 // by this amount for a tail call. In a sibling call it must be 0 because the 1210 // caller will deallocate the entire stack and the callee still expects its 1211 // arguments to begin at SP+0. 1212 int FPDiff = 0; 1213 1214 // This will be 0 for sibcalls, potentially nonzero for tail calls produced 1215 // by -tailcallopt. For sibcalls, the memory operands for the call are 1216 // already available in the caller's incoming argument space. 1217 unsigned NumBytes = 0; 1218 if (!IsSibCall) { 1219 // We aren't sibcalling, so we need to compute FPDiff. We need to do this 1220 // before handling assignments, because FPDiff must be known for memory 1221 // arguments. 1222 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1223 SmallVector<CCValAssign, 16> OutLocs; 1224 CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext()); 1225 1226 // FIXME: Not accounting for callee implicit inputs 1227 OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg); 1228 if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) 1229 return false; 1230 1231 // The callee will pop the argument stack as a tail call. Thus, we must 1232 // keep it 16-byte aligned. 1233 NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); 1234 1235 // FPDiff will be negative if this tail call requires more space than we 1236 // would automatically have in our incoming argument space. Positive if we 1237 // actually shrink the stack. 1238 FPDiff = NumReusableBytes - NumBytes; 1239 1240 // The stack pointer must be 16-byte aligned at all times it's used for a 1241 // memory operation, which in practice means at *all* times and in 1242 // particular across call boundaries. Therefore our own arguments started at 1243 // a 16-byte aligned SP and the delta applied for the tail call should 1244 // satisfy the same constraint. 1245 assert(isAligned(ST.getStackAlignment(), FPDiff) && 1246 "unaligned stack on tail call"); 1247 } 1248 1249 SmallVector<CCValAssign, 16> ArgLocs; 1250 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1251 1252 // We could pass MIB and directly add the implicit uses to the call 1253 // now. However, as an aesthetic choice, place implicit argument operands 1254 // after the ordinary user argument registers. 1255 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1256 1257 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1258 // With a fixed ABI, allocate fixed registers before user arguments. 1259 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1260 return false; 1261 } 1262 1263 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1264 1265 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1266 return false; 1267 1268 // Do the actual argument marshalling. 1269 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, true, FPDiff); 1270 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1271 return false; 1272 1273 handleImplicitCallArguments(MIRBuilder, MIB, ST, *FuncInfo, ImplicitArgRegs); 1274 1275 // If we have -tailcallopt, we need to adjust the stack. We'll do the call 1276 // sequence start and end here. 1277 if (!IsSibCall) { 1278 MIB->getOperand(1).setImm(FPDiff); 1279 CallSeqStart.addImm(NumBytes).addImm(0); 1280 // End the call sequence *before* emitting the call. Normally, we would 1281 // tidy the frame up after the call. However, here, we've laid out the 1282 // parameters so that when SP is reset, they will be in the correct 1283 // location. 1284 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN).addImm(NumBytes).addImm(0); 1285 } 1286 1287 // Now we can add the actual call instruction to the correct basic block. 1288 MIRBuilder.insertInstr(MIB); 1289 1290 // If Callee is a reg, since it is used by a target specific 1291 // instruction, it must have a register class matching the 1292 // constraint of that instruction. 1293 1294 // FIXME: We should define regbankselectable call instructions to handle 1295 // divergent call targets. 1296 if (MIB->getOperand(0).isReg()) { 1297 MIB->getOperand(0).setReg(constrainOperandRegClass( 1298 MF, *TRI, MRI, *ST.getInstrInfo(), *ST.getRegBankInfo(), *MIB, 1299 MIB->getDesc(), MIB->getOperand(0), 0)); 1300 } 1301 1302 MF.getFrameInfo().setHasTailCall(); 1303 Info.LoweredTailCall = true; 1304 return true; 1305 } 1306 1307 bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 1308 CallLoweringInfo &Info) const { 1309 if (Info.IsVarArg) { 1310 LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); 1311 return false; 1312 } 1313 1314 MachineFunction &MF = MIRBuilder.getMF(); 1315 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 1316 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1317 1318 const Function &F = MF.getFunction(); 1319 MachineRegisterInfo &MRI = MF.getRegInfo(); 1320 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 1321 const DataLayout &DL = F.getParent()->getDataLayout(); 1322 1323 SmallVector<ArgInfo, 8> OutArgs; 1324 for (auto &OrigArg : Info.OrigArgs) 1325 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); 1326 1327 SmallVector<ArgInfo, 8> InArgs; 1328 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) 1329 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); 1330 1331 // If we can lower as a tail call, do that instead. 1332 bool CanTailCallOpt = 1333 isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs); 1334 1335 // We must emit a tail call if we have musttail. 1336 if (Info.IsMustTailCall && !CanTailCallOpt) { 1337 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 1338 return false; 1339 } 1340 1341 Info.IsTailCall = CanTailCallOpt; 1342 if (CanTailCallOpt) 1343 return lowerTailCall(MIRBuilder, Info, OutArgs); 1344 1345 // Find out which ABI gets to decide where things go. 1346 CCAssignFn *AssignFnFixed; 1347 CCAssignFn *AssignFnVarArg; 1348 std::tie(AssignFnFixed, AssignFnVarArg) = 1349 getAssignFnsForCC(Info.CallConv, TLI); 1350 1351 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) 1352 .addImm(0) 1353 .addImm(0); 1354 1355 // Create a temporarily-floating call instruction so we can add the implicit 1356 // uses of arg registers. 1357 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); 1358 1359 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 1360 MIB.addDef(TRI->getReturnAddressReg(MF)); 1361 1362 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 1363 return false; 1364 1365 // Tell the call which registers are clobbered. 1366 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 1367 MIB.addRegMask(Mask); 1368 1369 SmallVector<CCValAssign, 16> ArgLocs; 1370 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 1371 1372 // We could pass MIB and directly add the implicit uses to the call 1373 // now. However, as an aesthetic choice, place implicit argument operands 1374 // after the ordinary user argument registers. 1375 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 1376 1377 if (Info.CallConv != CallingConv::AMDGPU_Gfx) { 1378 // With a fixed ABI, allocate fixed registers before user arguments. 1379 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1380 return false; 1381 } 1382 1383 // Do the actual argument marshalling. 1384 SmallVector<Register, 8> PhysRegs; 1385 1386 OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg); 1387 if (!determineAssignments(Assigner, OutArgs, CCInfo)) 1388 return false; 1389 1390 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, false); 1391 if (!handleAssignments(Handler, OutArgs, CCInfo, ArgLocs, MIRBuilder)) 1392 return false; 1393 1394 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1395 1396 handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); 1397 1398 // Get a count of how many bytes are to be pushed on the stack. 1399 unsigned NumBytes = CCInfo.getNextStackOffset(); 1400 1401 // If Callee is a reg, since it is used by a target specific 1402 // instruction, it must have a register class matching the 1403 // constraint of that instruction. 1404 1405 // FIXME: We should define regbankselectable call instructions to handle 1406 // divergent call targets. 1407 if (MIB->getOperand(1).isReg()) { 1408 MIB->getOperand(1).setReg(constrainOperandRegClass( 1409 MF, *TRI, MRI, *ST.getInstrInfo(), 1410 *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), 1411 1)); 1412 } 1413 1414 // Now we can add the actual call instruction to the correct position. 1415 MIRBuilder.insertInstr(MIB); 1416 1417 // Finally we can copy the returned value back into its virtual-register. In 1418 // symmetry with the arguments, the physical register must be an 1419 // implicit-define of the call instruction. 1420 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 1421 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, 1422 Info.IsVarArg); 1423 IncomingValueAssigner Assigner(RetAssignFn); 1424 CallReturnHandler Handler(MIRBuilder, MRI, MIB); 1425 if (!determineAndHandleAssignments(Handler, Assigner, InArgs, MIRBuilder, 1426 Info.CallConv, Info.IsVarArg)) 1427 return false; 1428 } 1429 1430 uint64_t CalleePopBytes = NumBytes; 1431 1432 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) 1433 .addImm(0) 1434 .addImm(CalleePopBytes); 1435 1436 if (!Info.CanLowerReturn) { 1437 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 1438 Info.DemoteRegister, Info.DemoteStackIndex); 1439 } 1440 1441 return true; 1442 } 1443