1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPULegalizerInfo.h" 18 #include "AMDGPUTargetMachine.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "llvm/CodeGen/Analysis.h" 22 #include "llvm/CodeGen/FunctionLoweringInfo.h" 23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 24 #include "llvm/IR/IntrinsicsAMDGPU.h" 25 26 #define DEBUG_TYPE "amdgpu-call-lowering" 27 28 using namespace llvm; 29 30 namespace { 31 32 /// Wrapper around extendRegister to ensure we extend to a full 32-bit register. 33 static Register extendRegisterMin32(CallLowering::ValueHandler &Handler, 34 Register ValVReg, CCValAssign &VA) { 35 if (VA.getLocVT().getSizeInBits() < 32) { 36 // 16-bit types are reported as legal for 32-bit registers. We need to 37 // extend and do a 32-bit copy to avoid the verifier complaining about it. 38 return Handler.MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 39 } 40 41 return Handler.extendRegister(ValVReg, VA); 42 } 43 44 struct AMDGPUOutgoingValueHandler : public CallLowering::OutgoingValueHandler { 45 AMDGPUOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 46 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 47 : OutgoingValueHandler(B, MRI, AssignFn), MIB(MIB) {} 48 49 MachineInstrBuilder MIB; 50 51 Register getStackAddress(uint64_t Size, int64_t Offset, 52 MachinePointerInfo &MPO, 53 ISD::ArgFlagsTy Flags) override { 54 llvm_unreachable("not implemented"); 55 } 56 57 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 58 MachinePointerInfo &MPO, CCValAssign &VA) override { 59 llvm_unreachable("not implemented"); 60 } 61 62 void assignValueToReg(Register ValVReg, Register PhysReg, 63 CCValAssign &VA) override { 64 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 65 66 // If this is a scalar return, insert a readfirstlane just in case the value 67 // ends up in a VGPR. 68 // FIXME: Assert this is a shader return. 69 const SIRegisterInfo *TRI 70 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 71 if (TRI->isSGPRReg(MRI, PhysReg)) { 72 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 73 {MRI.getType(ExtReg)}, false) 74 .addReg(ExtReg); 75 ExtReg = ToSGPR.getReg(0); 76 } 77 78 MIRBuilder.buildCopy(PhysReg, ExtReg); 79 MIB.addUse(PhysReg, RegState::Implicit); 80 } 81 82 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 83 CCValAssign::LocInfo LocInfo, 84 const CallLowering::ArgInfo &Info, 85 ISD::ArgFlagsTy Flags, 86 CCState &State) override { 87 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 88 } 89 }; 90 91 struct AMDGPUIncomingArgHandler : public CallLowering::IncomingValueHandler { 92 uint64_t StackUsed = 0; 93 94 AMDGPUIncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 95 CCAssignFn *AssignFn) 96 : IncomingValueHandler(B, MRI, AssignFn) {} 97 98 Register getStackAddress(uint64_t Size, int64_t Offset, 99 MachinePointerInfo &MPO, 100 ISD::ArgFlagsTy Flags) override { 101 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 102 103 // Byval is assumed to be writable memory, but other stack passed arguments 104 // are not. 105 const bool IsImmutable = !Flags.isByVal(); 106 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 107 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 108 auto AddrReg = MIRBuilder.buildFrameIndex( 109 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 110 StackUsed = std::max(StackUsed, Size + Offset); 111 return AddrReg.getReg(0); 112 } 113 114 void assignValueToReg(Register ValVReg, Register PhysReg, 115 CCValAssign &VA) override { 116 markPhysRegUsed(PhysReg); 117 118 if (VA.getLocVT().getSizeInBits() < 32) { 119 // 16-bit types are reported as legal for 32-bit registers. We need to do 120 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 121 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 122 123 // If we have signext/zeroext, it applies to the whole 32-bit register 124 // before truncation. 125 auto Extended = 126 buildExtensionHint(VA, Copy.getReg(0), LLT(VA.getLocVT())); 127 MIRBuilder.buildTrunc(ValVReg, Extended); 128 return; 129 } 130 131 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 132 } 133 134 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t MemSize, 135 MachinePointerInfo &MPO, CCValAssign &VA) override { 136 MachineFunction &MF = MIRBuilder.getMF(); 137 138 // The reported memory location may be wider than the value. 139 const LLT RegTy = MRI.getType(ValVReg); 140 MemSize = std::min(static_cast<uint64_t>(RegTy.getSizeInBytes()), MemSize); 141 142 // FIXME: Get alignment 143 auto MMO = MF.getMachineMemOperand( 144 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemSize, 145 inferAlignFromPtrInfo(MF, MPO)); 146 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 147 } 148 149 /// How the physical register gets marked varies between formal 150 /// parameters (it's a basic-block live-in), and a call instruction 151 /// (it's an implicit-def of the BL). 152 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 153 }; 154 155 struct FormalArgHandler : public AMDGPUIncomingArgHandler { 156 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 157 CCAssignFn *AssignFn) 158 : AMDGPUIncomingArgHandler(B, MRI, AssignFn) {} 159 160 void markPhysRegUsed(unsigned PhysReg) override { 161 MIRBuilder.getMBB().addLiveIn(PhysReg); 162 } 163 }; 164 165 struct CallReturnHandler : public AMDGPUIncomingArgHandler { 166 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 167 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 168 : AMDGPUIncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {} 169 170 void markPhysRegUsed(unsigned PhysReg) override { 171 MIB.addDef(PhysReg, RegState::Implicit); 172 } 173 174 MachineInstrBuilder MIB; 175 }; 176 177 struct AMDGPUOutgoingArgHandler : public AMDGPUOutgoingValueHandler { 178 CCAssignFn *AssignFnVarArg; 179 180 /// For tail calls, the byte offset of the call's argument area from the 181 /// callee's. Unused elsewhere. 182 int FPDiff; 183 184 // Cache the SP register vreg if we need it more than once in this call site. 185 Register SPReg; 186 187 bool IsTailCall; 188 189 AMDGPUOutgoingArgHandler(MachineIRBuilder &MIRBuilder, 190 MachineRegisterInfo &MRI, MachineInstrBuilder MIB, 191 CCAssignFn *AssignFn, CCAssignFn *AssignFnVarArg, 192 bool IsTailCall = false, int FPDiff = 0) 193 : AMDGPUOutgoingValueHandler(MIRBuilder, MRI, MIB, AssignFn), 194 AssignFnVarArg(AssignFnVarArg), FPDiff(FPDiff), IsTailCall(IsTailCall) { 195 } 196 197 Register getStackAddress(uint64_t Size, int64_t Offset, 198 MachinePointerInfo &MPO, 199 ISD::ArgFlagsTy Flags) override { 200 MachineFunction &MF = MIRBuilder.getMF(); 201 const LLT PtrTy = LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32); 202 const LLT S32 = LLT::scalar(32); 203 204 if (IsTailCall) { 205 llvm_unreachable("implement me"); 206 } 207 208 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 209 210 if (!SPReg) 211 SPReg = MIRBuilder.buildCopy(PtrTy, MFI->getStackPtrOffsetReg()).getReg(0); 212 213 auto OffsetReg = MIRBuilder.buildConstant(S32, Offset); 214 215 auto AddrReg = MIRBuilder.buildPtrAdd(PtrTy, SPReg, OffsetReg); 216 MPO = MachinePointerInfo::getStack(MF, Offset); 217 return AddrReg.getReg(0); 218 } 219 220 void assignValueToReg(Register ValVReg, Register PhysReg, 221 CCValAssign &VA) override { 222 MIB.addUse(PhysReg, RegState::Implicit); 223 Register ExtReg = extendRegisterMin32(*this, ValVReg, VA); 224 MIRBuilder.buildCopy(PhysReg, ExtReg); 225 } 226 227 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 228 MachinePointerInfo &MPO, CCValAssign &VA) override { 229 MachineFunction &MF = MIRBuilder.getMF(); 230 uint64_t LocMemOffset = VA.getLocMemOffset(); 231 const auto &ST = MF.getSubtarget<GCNSubtarget>(); 232 233 auto MMO = MF.getMachineMemOperand( 234 MPO, MachineMemOperand::MOStore, Size, 235 commonAlignment(ST.getStackAlignment(), LocMemOffset)); 236 MIRBuilder.buildStore(ValVReg, Addr, *MMO); 237 } 238 239 void assignValueToAddress(const CallLowering::ArgInfo &Arg, 240 unsigned ValRegIndex, Register Addr, 241 uint64_t MemSize, MachinePointerInfo &MPO, 242 CCValAssign &VA) override { 243 Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt 244 ? extendRegister(Arg.Regs[ValRegIndex], VA) 245 : Arg.Regs[ValRegIndex]; 246 247 // If we extended the value type we might need to adjust the MMO's 248 // Size. This happens if ComputeValueVTs widened a small type value to a 249 // legal register type (e.g. s8->s16) 250 const LLT RegTy = MRI.getType(ValVReg); 251 MemSize = std::min(MemSize, (uint64_t)RegTy.getSizeInBytes()); 252 assignValueToAddress(ValVReg, Addr, MemSize, MPO, VA); 253 } 254 }; 255 } 256 257 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 258 : CallLowering(&TLI) { 259 } 260 261 // FIXME: Compatability shim 262 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 263 switch (MIOpc) { 264 case TargetOpcode::G_SEXT: 265 return ISD::SIGN_EXTEND; 266 case TargetOpcode::G_ZEXT: 267 return ISD::ZERO_EXTEND; 268 case TargetOpcode::G_ANYEXT: 269 return ISD::ANY_EXTEND; 270 default: 271 llvm_unreachable("not an extend opcode"); 272 } 273 } 274 275 bool AMDGPUCallLowering::canLowerReturn(MachineFunction &MF, 276 CallingConv::ID CallConv, 277 SmallVectorImpl<BaseArgInfo> &Outs, 278 bool IsVarArg) const { 279 // For shaders. Vector types should be explicitly handled by CC. 280 if (AMDGPU::isEntryFunctionCC(CallConv)) 281 return true; 282 283 SmallVector<CCValAssign, 16> ArgLocs; 284 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 285 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, 286 MF.getFunction().getContext()); 287 288 return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv, IsVarArg)); 289 } 290 291 /// Lower the return value for the already existing \p Ret. This assumes that 292 /// \p B's insertion point is correct. 293 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 294 const Value *Val, ArrayRef<Register> VRegs, 295 MachineInstrBuilder &Ret) const { 296 if (!Val) 297 return true; 298 299 auto &MF = B.getMF(); 300 const auto &F = MF.getFunction(); 301 const DataLayout &DL = MF.getDataLayout(); 302 MachineRegisterInfo *MRI = B.getMRI(); 303 LLVMContext &Ctx = F.getContext(); 304 305 CallingConv::ID CC = F.getCallingConv(); 306 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 307 308 SmallVector<EVT, 8> SplitEVTs; 309 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 310 assert(VRegs.size() == SplitEVTs.size() && 311 "For each split Type there should be exactly one VReg."); 312 313 SmallVector<ArgInfo, 8> SplitRetInfos; 314 315 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 316 EVT VT = SplitEVTs[i]; 317 Register Reg = VRegs[i]; 318 ArgInfo RetInfo(Reg, VT.getTypeForEVT(Ctx)); 319 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 320 321 if (VT.isScalarInteger()) { 322 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 323 if (RetInfo.Flags[0].isSExt()) { 324 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 325 ExtendOp = TargetOpcode::G_SEXT; 326 } else if (RetInfo.Flags[0].isZExt()) { 327 assert(RetInfo.Regs.size() == 1 && "expect only simple return values"); 328 ExtendOp = TargetOpcode::G_ZEXT; 329 } 330 331 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 332 extOpcodeToISDExtOpcode(ExtendOp)); 333 if (ExtVT != VT) { 334 RetInfo.Ty = ExtVT.getTypeForEVT(Ctx); 335 LLT ExtTy = getLLTForType(*RetInfo.Ty, DL); 336 Reg = B.buildInstr(ExtendOp, {ExtTy}, {Reg}).getReg(0); 337 } 338 } 339 340 if (Reg != RetInfo.Regs[0]) { 341 RetInfo.Regs[0] = Reg; 342 // Reset the arg flags after modifying Reg. 343 setArgFlags(RetInfo, AttributeList::ReturnIndex, DL, F); 344 } 345 346 splitToValueTypes(RetInfo, SplitRetInfos, DL, CC); 347 } 348 349 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 350 AMDGPUOutgoingValueHandler RetHandler(B, *MRI, Ret, AssignFn); 351 return handleAssignments(B, SplitRetInfos, RetHandler, CC, F.isVarArg()); 352 } 353 354 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val, 355 ArrayRef<Register> VRegs, 356 FunctionLoweringInfo &FLI) const { 357 358 MachineFunction &MF = B.getMF(); 359 MachineRegisterInfo &MRI = MF.getRegInfo(); 360 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 361 MFI->setIfReturnsVoid(!Val); 362 363 assert(!Val == VRegs.empty() && "Return value without a vreg"); 364 365 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 366 const bool IsShader = AMDGPU::isShader(CC); 367 const bool IsWaveEnd = 368 (IsShader && MFI->returnsVoid()) || AMDGPU::isKernel(CC); 369 if (IsWaveEnd) { 370 B.buildInstr(AMDGPU::S_ENDPGM) 371 .addImm(0); 372 return true; 373 } 374 375 auto const &ST = MF.getSubtarget<GCNSubtarget>(); 376 377 unsigned ReturnOpc = 378 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 379 380 auto Ret = B.buildInstrNoInsert(ReturnOpc); 381 Register ReturnAddrVReg; 382 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 383 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 384 Ret.addUse(ReturnAddrVReg); 385 } 386 387 if (!FLI.CanLowerReturn) 388 insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister); 389 else if (!lowerReturnVal(B, Val, VRegs, Ret)) 390 return false; 391 392 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 393 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 394 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 395 &AMDGPU::SGPR_64RegClass); 396 B.buildCopy(ReturnAddrVReg, LiveInReturn); 397 } 398 399 // TODO: Handle CalleeSavedRegsViaCopy. 400 401 B.insertInstr(Ret); 402 return true; 403 } 404 405 void AMDGPUCallLowering::lowerParameterPtr(Register DstReg, MachineIRBuilder &B, 406 Type *ParamTy, 407 uint64_t Offset) const { 408 MachineFunction &MF = B.getMF(); 409 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 410 MachineRegisterInfo &MRI = MF.getRegInfo(); 411 Register KernArgSegmentPtr = 412 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 413 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 414 415 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 416 417 B.buildPtrAdd(DstReg, KernArgSegmentVReg, OffsetReg); 418 } 419 420 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, Type *ParamTy, 421 uint64_t Offset, Align Alignment, 422 Register DstReg) const { 423 MachineFunction &MF = B.getMF(); 424 const Function &F = MF.getFunction(); 425 const DataLayout &DL = F.getParent()->getDataLayout(); 426 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 427 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 428 429 LLT PtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 430 Register PtrReg = B.getMRI()->createGenericVirtualRegister(PtrTy); 431 lowerParameterPtr(PtrReg, B, ParamTy, Offset); 432 433 MachineMemOperand *MMO = MF.getMachineMemOperand( 434 PtrInfo, 435 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 436 MachineMemOperand::MOInvariant, 437 TypeSize, Alignment); 438 439 B.buildLoad(DstReg, PtrReg, *MMO); 440 } 441 442 // Allocate special inputs passed in user SGPRs. 443 static void allocateHSAUserSGPRs(CCState &CCInfo, 444 MachineIRBuilder &B, 445 MachineFunction &MF, 446 const SIRegisterInfo &TRI, 447 SIMachineFunctionInfo &Info) { 448 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 449 if (Info.hasPrivateSegmentBuffer()) { 450 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 451 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 452 CCInfo.AllocateReg(PrivateSegmentBufferReg); 453 } 454 455 if (Info.hasDispatchPtr()) { 456 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 457 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 458 CCInfo.AllocateReg(DispatchPtrReg); 459 } 460 461 if (Info.hasQueuePtr()) { 462 Register QueuePtrReg = Info.addQueuePtr(TRI); 463 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 464 CCInfo.AllocateReg(QueuePtrReg); 465 } 466 467 if (Info.hasKernargSegmentPtr()) { 468 MachineRegisterInfo &MRI = MF.getRegInfo(); 469 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 470 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 471 Register VReg = MRI.createGenericVirtualRegister(P4); 472 MRI.addLiveIn(InputPtrReg, VReg); 473 B.getMBB().addLiveIn(InputPtrReg); 474 B.buildCopy(VReg, InputPtrReg); 475 CCInfo.AllocateReg(InputPtrReg); 476 } 477 478 if (Info.hasDispatchID()) { 479 Register DispatchIDReg = Info.addDispatchID(TRI); 480 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 481 CCInfo.AllocateReg(DispatchIDReg); 482 } 483 484 if (Info.hasFlatScratchInit()) { 485 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 486 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 487 CCInfo.AllocateReg(FlatScratchInitReg); 488 } 489 490 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 491 // these from the dispatch pointer. 492 } 493 494 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 495 MachineIRBuilder &B, const Function &F, 496 ArrayRef<ArrayRef<Register>> VRegs) const { 497 MachineFunction &MF = B.getMF(); 498 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 499 MachineRegisterInfo &MRI = MF.getRegInfo(); 500 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 501 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 502 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 503 504 const DataLayout &DL = F.getParent()->getDataLayout(); 505 506 SmallVector<CCValAssign, 16> ArgLocs; 507 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 508 509 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 510 511 unsigned i = 0; 512 const Align KernArgBaseAlign(16); 513 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 514 uint64_t ExplicitArgOffset = 0; 515 516 // TODO: Align down to dword alignment and extract bits for extending loads. 517 for (auto &Arg : F.args()) { 518 const bool IsByRef = Arg.hasByRefAttr(); 519 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 520 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 521 if (AllocSize == 0) 522 continue; 523 524 MaybeAlign ABIAlign = IsByRef ? Arg.getParamAlign() : None; 525 if (!ABIAlign) 526 ABIAlign = DL.getABITypeAlign(ArgTy); 527 528 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 529 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 530 531 if (Arg.use_empty()) { 532 ++i; 533 continue; 534 } 535 536 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 537 538 if (IsByRef) { 539 unsigned ByRefAS = cast<PointerType>(Arg.getType())->getAddressSpace(); 540 541 assert(VRegs[i].size() == 1 && 542 "expected only one register for byval pointers"); 543 if (ByRefAS == AMDGPUAS::CONSTANT_ADDRESS) { 544 lowerParameterPtr(VRegs[i][0], B, ArgTy, ArgOffset); 545 } else { 546 const LLT ConstPtrTy = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 547 Register PtrReg = MRI.createGenericVirtualRegister(ConstPtrTy); 548 lowerParameterPtr(PtrReg, B, ArgTy, ArgOffset); 549 550 B.buildAddrSpaceCast(VRegs[i][0], PtrReg); 551 } 552 } else { 553 ArrayRef<Register> OrigArgRegs = VRegs[i]; 554 Register ArgReg = 555 OrigArgRegs.size() == 1 556 ? OrigArgRegs[0] 557 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL)); 558 559 lowerParameter(B, ArgTy, ArgOffset, Alignment, ArgReg); 560 if (OrigArgRegs.size() > 1) 561 unpackRegs(OrigArgRegs, ArgReg, ArgTy, B); 562 } 563 564 ++i; 565 } 566 567 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 568 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 569 return true; 570 } 571 572 bool AMDGPUCallLowering::lowerFormalArguments( 573 MachineIRBuilder &B, const Function &F, ArrayRef<ArrayRef<Register>> VRegs, 574 FunctionLoweringInfo &FLI) const { 575 CallingConv::ID CC = F.getCallingConv(); 576 577 // The infrastructure for normal calling convention lowering is essentially 578 // useless for kernels. We want to avoid any kind of legalization or argument 579 // splitting. 580 if (CC == CallingConv::AMDGPU_KERNEL) 581 return lowerFormalArgumentsKernel(B, F, VRegs); 582 583 const bool IsGraphics = AMDGPU::isGraphics(CC); 584 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 585 586 MachineFunction &MF = B.getMF(); 587 MachineBasicBlock &MBB = B.getMBB(); 588 MachineRegisterInfo &MRI = MF.getRegInfo(); 589 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 590 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 591 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 592 const DataLayout &DL = F.getParent()->getDataLayout(); 593 594 595 SmallVector<CCValAssign, 16> ArgLocs; 596 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 597 598 if (!IsEntryFunc) { 599 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 600 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 601 &AMDGPU::SGPR_64RegClass); 602 MBB.addLiveIn(ReturnAddrReg); 603 B.buildCopy(LiveInReturn, ReturnAddrReg); 604 } 605 606 if (Info->hasImplicitBufferPtr()) { 607 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 608 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 609 CCInfo.AllocateReg(ImplicitBufferPtrReg); 610 } 611 612 SmallVector<ArgInfo, 32> SplitArgs; 613 unsigned Idx = 0; 614 unsigned PSInputNum = 0; 615 616 // Insert the hidden sret parameter if the return value won't fit in the 617 // return registers. 618 if (!FLI.CanLowerReturn) 619 insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL); 620 621 for (auto &Arg : F.args()) { 622 if (DL.getTypeStoreSize(Arg.getType()) == 0) 623 continue; 624 625 const bool InReg = Arg.hasAttribute(Attribute::InReg); 626 627 // SGPR arguments to functions not implemented. 628 if (!IsGraphics && InReg) 629 return false; 630 631 if (Arg.hasAttribute(Attribute::SwiftSelf) || 632 Arg.hasAttribute(Attribute::SwiftError) || 633 Arg.hasAttribute(Attribute::Nest)) 634 return false; 635 636 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 637 const bool ArgUsed = !Arg.use_empty(); 638 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 639 640 if (!SkipArg) { 641 Info->markPSInputAllocated(PSInputNum); 642 if (ArgUsed) 643 Info->markPSInputEnabled(PSInputNum); 644 } 645 646 ++PSInputNum; 647 648 if (SkipArg) { 649 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 650 B.buildUndef(VRegs[Idx][I]); 651 652 ++Idx; 653 continue; 654 } 655 } 656 657 ArgInfo OrigArg(VRegs[Idx], Arg.getType()); 658 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 659 setArgFlags(OrigArg, OrigArgIdx, DL, F); 660 661 splitToValueTypes(OrigArg, SplitArgs, DL, CC); 662 ++Idx; 663 } 664 665 // At least one interpolation mode must be enabled or else the GPU will 666 // hang. 667 // 668 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 669 // set PSInputAddr, the user wants to enable some bits after the compilation 670 // based on run-time states. Since we can't know what the final PSInputEna 671 // will look like, so we shouldn't do anything here and the user should take 672 // responsibility for the correct programming. 673 // 674 // Otherwise, the following restrictions apply: 675 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 676 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 677 // enabled too. 678 if (CC == CallingConv::AMDGPU_PS) { 679 if ((Info->getPSInputAddr() & 0x7F) == 0 || 680 ((Info->getPSInputAddr() & 0xF) == 0 && 681 Info->isPSInputAllocated(11))) { 682 CCInfo.AllocateReg(AMDGPU::VGPR0); 683 CCInfo.AllocateReg(AMDGPU::VGPR1); 684 Info->markPSInputAllocated(0); 685 Info->markPSInputEnabled(0); 686 } 687 688 if (Subtarget.isAmdPalOS()) { 689 // For isAmdPalOS, the user does not enable some bits after compilation 690 // based on run-time states; the register values being generated here are 691 // the final ones set in hardware. Therefore we need to apply the 692 // workaround to PSInputAddr and PSInputEnable together. (The case where 693 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 694 // set up an input arg for a particular interpolation mode, but nothing 695 // uses that input arg. Really we should have an earlier pass that removes 696 // such an arg.) 697 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 698 if ((PsInputBits & 0x7F) == 0 || 699 ((PsInputBits & 0xF) == 0 && 700 (PsInputBits >> 11 & 1))) 701 Info->markPSInputEnabled( 702 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 703 } 704 } 705 706 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 707 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 708 709 if (!MBB.empty()) 710 B.setInstr(*MBB.begin()); 711 712 if (!IsEntryFunc) { 713 // For the fixed ABI, pass workitem IDs in the last argument register. 714 if (AMDGPUTargetMachine::EnableFixedFunctionABI) 715 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 716 } 717 718 FormalArgHandler Handler(B, MRI, AssignFn); 719 if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler)) 720 return false; 721 722 if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { 723 // Special inputs come after user arguments. 724 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 725 } 726 727 // Start adding system SGPRs. 728 if (IsEntryFunc) { 729 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsGraphics); 730 } else { 731 if (!Subtarget.enableFlatScratch()) 732 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 733 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 734 } 735 736 // Move back to the end of the basic block. 737 B.setMBB(MBB); 738 739 return true; 740 } 741 742 bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder, 743 CCState &CCInfo, 744 SmallVectorImpl<std::pair<MCRegister, Register>> &ArgRegs, 745 CallLoweringInfo &Info) const { 746 MachineFunction &MF = MIRBuilder.getMF(); 747 748 const AMDGPUFunctionArgInfo *CalleeArgInfo 749 = &AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 750 751 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 752 const AMDGPUFunctionArgInfo &CallerArgInfo = MFI->getArgInfo(); 753 754 755 // TODO: Unify with private memory register handling. This is complicated by 756 // the fact that at least in kernels, the input argument is not necessarily 757 // in the same location as the input. 758 AMDGPUFunctionArgInfo::PreloadedValue InputRegs[] = { 759 AMDGPUFunctionArgInfo::DISPATCH_PTR, 760 AMDGPUFunctionArgInfo::QUEUE_PTR, 761 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR, 762 AMDGPUFunctionArgInfo::DISPATCH_ID, 763 AMDGPUFunctionArgInfo::WORKGROUP_ID_X, 764 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y, 765 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z 766 }; 767 768 MachineRegisterInfo &MRI = MF.getRegInfo(); 769 770 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 771 const AMDGPULegalizerInfo *LI 772 = static_cast<const AMDGPULegalizerInfo*>(ST.getLegalizerInfo()); 773 774 for (auto InputID : InputRegs) { 775 const ArgDescriptor *OutgoingArg; 776 const TargetRegisterClass *ArgRC; 777 LLT ArgTy; 778 779 std::tie(OutgoingArg, ArgRC, ArgTy) = 780 CalleeArgInfo->getPreloadedValue(InputID); 781 if (!OutgoingArg) 782 continue; 783 784 const ArgDescriptor *IncomingArg; 785 const TargetRegisterClass *IncomingArgRC; 786 std::tie(IncomingArg, IncomingArgRC, ArgTy) = 787 CallerArgInfo.getPreloadedValue(InputID); 788 assert(IncomingArgRC == ArgRC); 789 790 Register InputReg = MRI.createGenericVirtualRegister(ArgTy); 791 792 if (IncomingArg) { 793 LI->loadInputValue(InputReg, MIRBuilder, IncomingArg, ArgRC, ArgTy); 794 } else { 795 assert(InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR); 796 LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder); 797 } 798 799 if (OutgoingArg->isRegister()) { 800 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 801 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 802 report_fatal_error("failed to allocate implicit input argument"); 803 } else { 804 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 805 return false; 806 } 807 } 808 809 // Pack workitem IDs into a single register or pass it as is if already 810 // packed. 811 const ArgDescriptor *OutgoingArg; 812 const TargetRegisterClass *ArgRC; 813 LLT ArgTy; 814 815 std::tie(OutgoingArg, ArgRC, ArgTy) = 816 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 817 if (!OutgoingArg) 818 std::tie(OutgoingArg, ArgRC, ArgTy) = 819 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 820 if (!OutgoingArg) 821 std::tie(OutgoingArg, ArgRC, ArgTy) = 822 CalleeArgInfo->getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 823 if (!OutgoingArg) 824 return false; 825 826 auto WorkitemIDX = 827 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_X); 828 auto WorkitemIDY = 829 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Y); 830 auto WorkitemIDZ = 831 CallerArgInfo.getPreloadedValue(AMDGPUFunctionArgInfo::WORKITEM_ID_Z); 832 833 const ArgDescriptor *IncomingArgX = std::get<0>(WorkitemIDX); 834 const ArgDescriptor *IncomingArgY = std::get<0>(WorkitemIDY); 835 const ArgDescriptor *IncomingArgZ = std::get<0>(WorkitemIDZ); 836 const LLT S32 = LLT::scalar(32); 837 838 // If incoming ids are not packed we need to pack them. 839 // FIXME: Should consider known workgroup size to eliminate known 0 cases. 840 Register InputReg; 841 if (IncomingArgX && !IncomingArgX->isMasked() && CalleeArgInfo->WorkItemIDX) { 842 InputReg = MRI.createGenericVirtualRegister(S32); 843 LI->loadInputValue(InputReg, MIRBuilder, IncomingArgX, 844 std::get<1>(WorkitemIDX), std::get<2>(WorkitemIDX)); 845 } 846 847 if (IncomingArgY && !IncomingArgY->isMasked() && CalleeArgInfo->WorkItemIDY) { 848 Register Y = MRI.createGenericVirtualRegister(S32); 849 LI->loadInputValue(Y, MIRBuilder, IncomingArgY, std::get<1>(WorkitemIDY), 850 std::get<2>(WorkitemIDY)); 851 852 Y = MIRBuilder.buildShl(S32, Y, MIRBuilder.buildConstant(S32, 10)).getReg(0); 853 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Y).getReg(0) : Y; 854 } 855 856 if (IncomingArgZ && !IncomingArgZ->isMasked() && CalleeArgInfo->WorkItemIDZ) { 857 Register Z = MRI.createGenericVirtualRegister(S32); 858 LI->loadInputValue(Z, MIRBuilder, IncomingArgZ, std::get<1>(WorkitemIDZ), 859 std::get<2>(WorkitemIDZ)); 860 861 Z = MIRBuilder.buildShl(S32, Z, MIRBuilder.buildConstant(S32, 20)).getReg(0); 862 InputReg = InputReg ? MIRBuilder.buildOr(S32, InputReg, Z).getReg(0) : Z; 863 } 864 865 if (!InputReg) { 866 InputReg = MRI.createGenericVirtualRegister(S32); 867 868 // Workitem ids are already packed, any of present incoming arguments will 869 // carry all required fields. 870 ArgDescriptor IncomingArg = ArgDescriptor::createArg( 871 IncomingArgX ? *IncomingArgX : 872 IncomingArgY ? *IncomingArgY : *IncomingArgZ, ~0u); 873 LI->loadInputValue(InputReg, MIRBuilder, &IncomingArg, 874 &AMDGPU::VGPR_32RegClass, S32); 875 } 876 877 if (OutgoingArg->isRegister()) { 878 ArgRegs.emplace_back(OutgoingArg->getRegister(), InputReg); 879 if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) 880 report_fatal_error("failed to allocate implicit input argument"); 881 } else { 882 LLVM_DEBUG(dbgs() << "Unhandled stack passed implicit input argument\n"); 883 return false; 884 } 885 886 return true; 887 } 888 889 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for 890 /// CC. 891 static std::pair<CCAssignFn *, CCAssignFn *> 892 getAssignFnsForCC(CallingConv::ID CC, const SITargetLowering &TLI) { 893 return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)}; 894 } 895 896 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, 897 bool IsTailCall) { 898 return AMDGPU::SI_CALL; 899 } 900 901 // Add operands to call instruction to track the callee. 902 static bool addCallTargetOperands(MachineInstrBuilder &CallInst, 903 MachineIRBuilder &MIRBuilder, 904 AMDGPUCallLowering::CallLoweringInfo &Info) { 905 if (Info.Callee.isReg()) { 906 CallInst.addReg(Info.Callee.getReg()); 907 CallInst.addImm(0); 908 } else if (Info.Callee.isGlobal() && Info.Callee.getOffset() == 0) { 909 // The call lowering lightly assumed we can directly encode a call target in 910 // the instruction, which is not the case. Materialize the address here. 911 const GlobalValue *GV = Info.Callee.getGlobal(); 912 auto Ptr = MIRBuilder.buildGlobalValue( 913 LLT::pointer(GV->getAddressSpace(), 64), GV); 914 CallInst.addReg(Ptr.getReg(0)); 915 CallInst.add(Info.Callee); 916 } else 917 return false; 918 919 return true; 920 } 921 922 bool AMDGPUCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 923 CallLoweringInfo &Info) const { 924 if (Info.IsVarArg) { 925 LLVM_DEBUG(dbgs() << "Variadic functions not implemented\n"); 926 return false; 927 } 928 929 MachineFunction &MF = MIRBuilder.getMF(); 930 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 931 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 932 933 const Function &F = MF.getFunction(); 934 MachineRegisterInfo &MRI = MF.getRegInfo(); 935 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 936 const DataLayout &DL = F.getParent()->getDataLayout(); 937 CallingConv::ID CallConv = F.getCallingConv(); 938 939 if (!AMDGPUTargetMachine::EnableFixedFunctionABI && 940 CallConv != CallingConv::AMDGPU_Gfx) { 941 LLVM_DEBUG(dbgs() << "Variable function ABI not implemented\n"); 942 return false; 943 } 944 945 if (AMDGPU::isShader(CallConv)) { 946 LLVM_DEBUG(dbgs() << "Unhandled call from graphics shader\n"); 947 return false; 948 } 949 950 SmallVector<ArgInfo, 8> OutArgs; 951 for (auto &OrigArg : Info.OrigArgs) 952 splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv); 953 954 SmallVector<ArgInfo, 8> InArgs; 955 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) 956 splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv); 957 958 // If we can lower as a tail call, do that instead. 959 bool CanTailCallOpt = false; 960 961 // We must emit a tail call if we have musttail. 962 if (Info.IsMustTailCall && !CanTailCallOpt) { 963 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n"); 964 return false; 965 } 966 967 // Find out which ABI gets to decide where things go. 968 CCAssignFn *AssignFnFixed; 969 CCAssignFn *AssignFnVarArg; 970 std::tie(AssignFnFixed, AssignFnVarArg) = 971 getAssignFnsForCC(Info.CallConv, TLI); 972 973 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKUP) 974 .addImm(0) 975 .addImm(0); 976 977 // Create a temporarily-floating call instruction so we can add the implicit 978 // uses of arg registers. 979 unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false); 980 981 auto MIB = MIRBuilder.buildInstrNoInsert(Opc); 982 MIB.addDef(TRI->getReturnAddressReg(MF)); 983 984 if (!addCallTargetOperands(MIB, MIRBuilder, Info)) 985 return false; 986 987 // Tell the call which registers are clobbered. 988 const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv); 989 MIB.addRegMask(Mask); 990 991 SmallVector<CCValAssign, 16> ArgLocs; 992 CCState CCInfo(Info.CallConv, Info.IsVarArg, MF, ArgLocs, F.getContext()); 993 994 // We could pass MIB and directly add the implicit uses to the call 995 // now. However, as an aesthetic choice, place implicit argument operands 996 // after the ordinary user argument registers. 997 SmallVector<std::pair<MCRegister, Register>, 12> ImplicitArgRegs; 998 999 if (AMDGPUTargetMachine::EnableFixedFunctionABI && 1000 Info.CallConv != CallingConv::AMDGPU_Gfx) { 1001 // With a fixed ABI, allocate fixed registers before user arguments. 1002 if (!passSpecialInputs(MIRBuilder, CCInfo, ImplicitArgRegs, Info)) 1003 return false; 1004 } 1005 1006 // Do the actual argument marshalling. 1007 SmallVector<Register, 8> PhysRegs; 1008 AMDGPUOutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed, 1009 AssignFnVarArg, false); 1010 if (!handleAssignments(CCInfo, ArgLocs, MIRBuilder, OutArgs, Handler)) 1011 return false; 1012 1013 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1014 1015 if (!ST.enableFlatScratch()) { 1016 // Insert copies for the SRD. In the HSA case, this should be an identity 1017 // copy. 1018 auto ScratchRSrcReg = MIRBuilder.buildCopy(LLT::vector(4, 32), 1019 MFI->getScratchRSrcReg()); 1020 MIRBuilder.buildCopy(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, ScratchRSrcReg); 1021 MIB.addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Implicit); 1022 } 1023 1024 for (std::pair<MCRegister, Register> ArgReg : ImplicitArgRegs) { 1025 MIRBuilder.buildCopy((Register)ArgReg.first, ArgReg.second); 1026 MIB.addReg(ArgReg.first, RegState::Implicit); 1027 } 1028 1029 // Get a count of how many bytes are to be pushed on the stack. 1030 unsigned NumBytes = CCInfo.getNextStackOffset(); 1031 1032 // If Callee is a reg, since it is used by a target specific 1033 // instruction, it must have a register class matching the 1034 // constraint of that instruction. 1035 1036 // FIXME: We should define regbankselectable call instructions to handle 1037 // divergent call targets. 1038 if (MIB->getOperand(1).isReg()) { 1039 MIB->getOperand(1).setReg(constrainOperandRegClass( 1040 MF, *TRI, MRI, *ST.getInstrInfo(), 1041 *ST.getRegBankInfo(), *MIB, MIB->getDesc(), MIB->getOperand(1), 1042 1)); 1043 } 1044 1045 // Now we can add the actual call instruction to the correct position. 1046 MIRBuilder.insertInstr(MIB); 1047 1048 // Finally we can copy the returned value back into its virtual-register. In 1049 // symmetry with the arguments, the physical register must be an 1050 // implicit-define of the call instruction. 1051 if (Info.CanLowerReturn && !Info.OrigRet.Ty->isVoidTy()) { 1052 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, 1053 Info.IsVarArg); 1054 CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn); 1055 if (!handleAssignments(MIRBuilder, InArgs, Handler, Info.CallConv, 1056 Info.IsVarArg)) 1057 return false; 1058 } 1059 1060 uint64_t CalleePopBytes = NumBytes; 1061 1062 MIRBuilder.buildInstr(AMDGPU::ADJCALLSTACKDOWN) 1063 .addImm(0) 1064 .addImm(CalleePopBytes); 1065 1066 if (!Info.CanLowerReturn) { 1067 insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs, 1068 Info.DemoteRegister, Info.DemoteStackIndex); 1069 } 1070 1071 return true; 1072 } 1073