1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUISelLowering.h" 18 #include "AMDGPUSubtarget.h" 19 #include "SIISelLowering.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "SIRegisterInfo.h" 22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 23 #include "llvm/CodeGen/Analysis.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/Support/LowLevelTypeImpl.h" 28 29 using namespace llvm; 30 31 namespace { 32 33 struct OutgoingValueHandler : public CallLowering::ValueHandler { 34 OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 35 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 36 : ValueHandler(B, MRI, AssignFn), MIB(MIB) {} 37 38 MachineInstrBuilder MIB; 39 40 bool isIncomingArgumentHandler() const override { return false; } 41 42 Register getStackAddress(uint64_t Size, int64_t Offset, 43 MachinePointerInfo &MPO) override { 44 llvm_unreachable("not implemented"); 45 } 46 47 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 48 MachinePointerInfo &MPO, CCValAssign &VA) override { 49 llvm_unreachable("not implemented"); 50 } 51 52 void assignValueToReg(Register ValVReg, Register PhysReg, 53 CCValAssign &VA) override { 54 Register ExtReg; 55 if (VA.getLocVT().getSizeInBits() < 32) { 56 // 16-bit types are reported as legal for 32-bit registers. We need to 57 // extend and do a 32-bit copy to avoid the verifier complaining about it. 58 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 59 } else 60 ExtReg = extendRegister(ValVReg, VA); 61 62 // If this is a scalar return, insert a readfirstlane just in case the value 63 // ends up in a VGPR. 64 // FIXME: Assert this is a shader return. 65 const SIRegisterInfo *TRI 66 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 67 if (TRI->isSGPRReg(MRI, PhysReg)) { 68 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 69 {MRI.getType(ExtReg)}, false) 70 .addReg(ExtReg); 71 ExtReg = ToSGPR.getReg(0); 72 } 73 74 MIRBuilder.buildCopy(PhysReg, ExtReg); 75 MIB.addUse(PhysReg, RegState::Implicit); 76 } 77 78 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 79 CCValAssign::LocInfo LocInfo, 80 const CallLowering::ArgInfo &Info, 81 ISD::ArgFlagsTy Flags, 82 CCState &State) override { 83 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 84 } 85 }; 86 87 struct IncomingArgHandler : public CallLowering::ValueHandler { 88 uint64_t StackUsed = 0; 89 90 IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 91 CCAssignFn *AssignFn) 92 : ValueHandler(B, MRI, AssignFn) {} 93 94 Register getStackAddress(uint64_t Size, int64_t Offset, 95 MachinePointerInfo &MPO) override { 96 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 97 int FI = MFI.CreateFixedObject(Size, Offset, true); 98 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 99 auto AddrReg = MIRBuilder.buildFrameIndex( 100 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 101 StackUsed = std::max(StackUsed, Size + Offset); 102 return AddrReg.getReg(0); 103 } 104 105 void assignValueToReg(Register ValVReg, Register PhysReg, 106 CCValAssign &VA) override { 107 markPhysRegUsed(PhysReg); 108 109 if (VA.getLocVT().getSizeInBits() < 32) { 110 // 16-bit types are reported as legal for 32-bit registers. We need to do 111 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 112 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 113 MIRBuilder.buildTrunc(ValVReg, Copy); 114 return; 115 } 116 117 switch (VA.getLocInfo()) { 118 case CCValAssign::LocInfo::SExt: 119 case CCValAssign::LocInfo::ZExt: 120 case CCValAssign::LocInfo::AExt: { 121 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 122 MIRBuilder.buildTrunc(ValVReg, Copy); 123 break; 124 } 125 default: 126 MIRBuilder.buildCopy(ValVReg, PhysReg); 127 break; 128 } 129 } 130 131 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 132 MachinePointerInfo &MPO, CCValAssign &VA) override { 133 MachineFunction &MF = MIRBuilder.getMF(); 134 unsigned Align = inferAlignmentFromPtrInfo(MF, MPO); 135 136 // FIXME: Get alignment 137 auto MMO = MF.getMachineMemOperand( 138 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 139 Align); 140 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 141 } 142 143 /// How the physical register gets marked varies between formal 144 /// parameters (it's a basic-block live-in), and a call instruction 145 /// (it's an implicit-def of the BL). 146 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 147 148 // FIXME: What is the point of this being a callback? 149 bool isIncomingArgumentHandler() const override { return true; } 150 }; 151 152 struct FormalArgHandler : public IncomingArgHandler { 153 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 154 CCAssignFn *AssignFn) 155 : IncomingArgHandler(B, MRI, AssignFn) {} 156 157 void markPhysRegUsed(unsigned PhysReg) override { 158 MIRBuilder.getMBB().addLiveIn(PhysReg); 159 } 160 }; 161 162 } 163 164 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 165 : CallLowering(&TLI) { 166 } 167 168 // FIXME: Compatability shim 169 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 170 switch (MIOpc) { 171 case TargetOpcode::G_SEXT: 172 return ISD::SIGN_EXTEND; 173 case TargetOpcode::G_ZEXT: 174 return ISD::ZERO_EXTEND; 175 case TargetOpcode::G_ANYEXT: 176 return ISD::ANY_EXTEND; 177 default: 178 llvm_unreachable("not an extend opcode"); 179 } 180 } 181 182 void AMDGPUCallLowering::splitToValueTypes( 183 MachineIRBuilder &B, 184 const ArgInfo &OrigArg, unsigned OrigArgIdx, 185 SmallVectorImpl<ArgInfo> &SplitArgs, 186 const DataLayout &DL, CallingConv::ID CallConv, 187 SplitArgTy PerformArgSplit) const { 188 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 189 LLVMContext &Ctx = OrigArg.Ty->getContext(); 190 191 if (OrigArg.Ty->isVoidTy()) 192 return; 193 194 SmallVector<EVT, 4> SplitVTs; 195 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs); 196 197 assert(OrigArg.Regs.size() == SplitVTs.size()); 198 199 int SplitIdx = 0; 200 for (EVT VT : SplitVTs) { 201 Register Reg = OrigArg.Regs[SplitIdx]; 202 Type *Ty = VT.getTypeForEVT(Ctx); 203 LLT LLTy = getLLTForType(*Ty, DL); 204 205 if (OrigArgIdx == AttributeList::ReturnIndex && VT.isScalarInteger()) { 206 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 207 if (OrigArg.Flags[0].isSExt()) { 208 assert(OrigArg.Regs.size() == 1 && "expect only simple return values"); 209 ExtendOp = TargetOpcode::G_SEXT; 210 } else if (OrigArg.Flags[0].isZExt()) { 211 assert(OrigArg.Regs.size() == 1 && "expect only simple return values"); 212 ExtendOp = TargetOpcode::G_ZEXT; 213 } 214 215 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 216 extOpcodeToISDExtOpcode(ExtendOp)); 217 if (ExtVT != VT) { 218 VT = ExtVT; 219 Ty = ExtVT.getTypeForEVT(Ctx); 220 LLTy = getLLTForType(*Ty, DL); 221 Reg = B.buildInstr(ExtendOp, {LLTy}, {Reg}).getReg(0); 222 } 223 } 224 225 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT); 226 MVT RegVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT); 227 228 if (NumParts == 1) { 229 // No splitting to do, but we want to replace the original type (e.g. [1 x 230 // double] -> double). 231 SplitArgs.emplace_back(Reg, Ty, OrigArg.Flags, OrigArg.IsFixed); 232 233 ++SplitIdx; 234 continue; 235 } 236 237 SmallVector<Register, 8> SplitRegs; 238 Type *PartTy = EVT(RegVT).getTypeForEVT(Ctx); 239 LLT PartLLT = getLLTForType(*PartTy, DL); 240 MachineRegisterInfo &MRI = *B.getMRI(); 241 242 // FIXME: Should we be reporting all of the part registers for a single 243 // argument, and let handleAssignments take care of the repacking? 244 for (unsigned i = 0; i < NumParts; ++i) { 245 Register PartReg = MRI.createGenericVirtualRegister(PartLLT); 246 SplitRegs.push_back(PartReg); 247 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags); 248 } 249 250 PerformArgSplit(SplitRegs, Reg, LLTy, PartLLT, SplitIdx); 251 252 ++SplitIdx; 253 } 254 } 255 256 // Get the appropriate type to make \p OrigTy \p Factor times bigger. 257 static LLT getMultipleType(LLT OrigTy, int Factor) { 258 if (OrigTy.isVector()) { 259 return LLT::vector(OrigTy.getNumElements() * Factor, 260 OrigTy.getElementType()); 261 } 262 263 return LLT::scalar(OrigTy.getSizeInBits() * Factor); 264 } 265 266 // TODO: Move to generic code 267 static void unpackRegsToOrigType(MachineIRBuilder &B, 268 ArrayRef<Register> DstRegs, 269 Register SrcReg, 270 const CallLowering::ArgInfo &Info, 271 LLT SrcTy, 272 LLT PartTy) { 273 assert(DstRegs.size() > 1 && "Nothing to unpack"); 274 275 const unsigned SrcSize = SrcTy.getSizeInBits(); 276 const unsigned PartSize = PartTy.getSizeInBits(); 277 278 if (SrcTy.isVector() && !PartTy.isVector() && 279 PartSize > SrcTy.getElementType().getSizeInBits()) { 280 // Vector was scalarized, and the elements extended. 281 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), 282 SrcReg); 283 for (int i = 0, e = DstRegs.size(); i != e; ++i) 284 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i)); 285 return; 286 } 287 288 if (SrcSize % PartSize == 0) { 289 B.buildUnmerge(DstRegs, SrcReg); 290 return; 291 } 292 293 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize; 294 295 LLT BigTy = getMultipleType(PartTy, NumRoundedParts); 296 auto ImpDef = B.buildUndef(BigTy); 297 298 auto Big = B.buildInsert(BigTy, ImpDef.getReg(0), SrcReg, 0).getReg(0); 299 300 int64_t Offset = 0; 301 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize) 302 B.buildExtract(DstRegs[i], Big, Offset); 303 } 304 305 /// Lower the return value for the already existing \p Ret. This assumes that 306 /// \p B's insertion point is correct. 307 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 308 const Value *Val, ArrayRef<Register> VRegs, 309 MachineInstrBuilder &Ret) const { 310 if (!Val) 311 return true; 312 313 auto &MF = B.getMF(); 314 const auto &F = MF.getFunction(); 315 const DataLayout &DL = MF.getDataLayout(); 316 MachineRegisterInfo *MRI = B.getMRI(); 317 318 CallingConv::ID CC = F.getCallingConv(); 319 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 320 321 ArgInfo OrigRetInfo(VRegs, Val->getType()); 322 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 323 SmallVector<ArgInfo, 4> SplitRetInfos; 324 325 splitToValueTypes( 326 B, OrigRetInfo, AttributeList::ReturnIndex, SplitRetInfos, DL, CC, 327 [&](ArrayRef<Register> Regs, Register SrcReg, LLT LLTy, LLT PartLLT, 328 int VTSplitIdx) { 329 unpackRegsToOrigType(B, Regs, SrcReg, 330 SplitRetInfos[VTSplitIdx], 331 LLTy, PartLLT); 332 }); 333 334 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 335 OutgoingValueHandler RetHandler(B, *MRI, Ret, AssignFn); 336 return handleAssignments(B, SplitRetInfos, RetHandler); 337 } 338 339 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, 340 const Value *Val, 341 ArrayRef<Register> VRegs) const { 342 343 MachineFunction &MF = B.getMF(); 344 MachineRegisterInfo &MRI = MF.getRegInfo(); 345 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 346 MFI->setIfReturnsVoid(!Val); 347 348 assert(!Val == VRegs.empty() && "Return value without a vreg"); 349 350 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 351 const bool IsShader = AMDGPU::isShader(CC); 352 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) || 353 AMDGPU::isKernel(CC); 354 if (IsWaveEnd) { 355 B.buildInstr(AMDGPU::S_ENDPGM) 356 .addImm(0); 357 return true; 358 } 359 360 auto const &ST = MF.getSubtarget<GCNSubtarget>(); 361 362 unsigned ReturnOpc = 363 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 364 365 auto Ret = B.buildInstrNoInsert(ReturnOpc); 366 Register ReturnAddrVReg; 367 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 368 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 369 Ret.addUse(ReturnAddrVReg); 370 } 371 372 if (!lowerReturnVal(B, Val, VRegs, Ret)) 373 return false; 374 375 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 376 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 377 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 378 &AMDGPU::SGPR_64RegClass); 379 B.buildCopy(ReturnAddrVReg, LiveInReturn); 380 } 381 382 // TODO: Handle CalleeSavedRegsViaCopy. 383 384 B.insertInstr(Ret); 385 return true; 386 } 387 388 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B, 389 Type *ParamTy, 390 uint64_t Offset) const { 391 392 MachineFunction &MF = B.getMF(); 393 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 394 MachineRegisterInfo &MRI = MF.getRegInfo(); 395 const Function &F = MF.getFunction(); 396 const DataLayout &DL = F.getParent()->getDataLayout(); 397 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); 398 LLT PtrType = getLLTForType(*PtrTy, DL); 399 Register KernArgSegmentPtr = 400 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 401 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 402 403 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 404 405 return B.buildPtrAdd(PtrType, KernArgSegmentVReg, OffsetReg).getReg(0); 406 } 407 408 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, 409 Type *ParamTy, uint64_t Offset, 410 unsigned Align, 411 Register DstReg) const { 412 MachineFunction &MF = B.getMF(); 413 const Function &F = MF.getFunction(); 414 const DataLayout &DL = F.getParent()->getDataLayout(); 415 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 416 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 417 Register PtrReg = lowerParameterPtr(B, ParamTy, Offset); 418 419 MachineMemOperand *MMO = 420 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | 421 MachineMemOperand::MODereferenceable | 422 MachineMemOperand::MOInvariant, 423 TypeSize, Align); 424 425 B.buildLoad(DstReg, PtrReg, *MMO); 426 } 427 428 // Allocate special inputs passed in user SGPRs. 429 static void allocateHSAUserSGPRs(CCState &CCInfo, 430 MachineIRBuilder &B, 431 MachineFunction &MF, 432 const SIRegisterInfo &TRI, 433 SIMachineFunctionInfo &Info) { 434 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 435 if (Info.hasPrivateSegmentBuffer()) { 436 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 437 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 438 CCInfo.AllocateReg(PrivateSegmentBufferReg); 439 } 440 441 if (Info.hasDispatchPtr()) { 442 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 443 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 444 CCInfo.AllocateReg(DispatchPtrReg); 445 } 446 447 if (Info.hasQueuePtr()) { 448 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 449 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 450 CCInfo.AllocateReg(QueuePtrReg); 451 } 452 453 if (Info.hasKernargSegmentPtr()) { 454 MachineRegisterInfo &MRI = MF.getRegInfo(); 455 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 456 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 457 Register VReg = MRI.createGenericVirtualRegister(P4); 458 MRI.addLiveIn(InputPtrReg, VReg); 459 B.getMBB().addLiveIn(InputPtrReg); 460 B.buildCopy(VReg, InputPtrReg); 461 CCInfo.AllocateReg(InputPtrReg); 462 } 463 464 if (Info.hasDispatchID()) { 465 unsigned DispatchIDReg = Info.addDispatchID(TRI); 466 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 467 CCInfo.AllocateReg(DispatchIDReg); 468 } 469 470 if (Info.hasFlatScratchInit()) { 471 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 472 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 473 CCInfo.AllocateReg(FlatScratchInitReg); 474 } 475 476 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 477 // these from the dispatch pointer. 478 } 479 480 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 481 MachineIRBuilder &B, const Function &F, 482 ArrayRef<ArrayRef<Register>> VRegs) const { 483 MachineFunction &MF = B.getMF(); 484 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 485 MachineRegisterInfo &MRI = MF.getRegInfo(); 486 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 487 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 488 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 489 490 const DataLayout &DL = F.getParent()->getDataLayout(); 491 492 SmallVector<CCValAssign, 16> ArgLocs; 493 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 494 495 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 496 497 unsigned i = 0; 498 const unsigned KernArgBaseAlign = 16; 499 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 500 uint64_t ExplicitArgOffset = 0; 501 502 // TODO: Align down to dword alignment and extract bits for extending loads. 503 for (auto &Arg : F.args()) { 504 Type *ArgTy = Arg.getType(); 505 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 506 if (AllocSize == 0) 507 continue; 508 509 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy); 510 511 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 512 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 513 514 ArrayRef<Register> OrigArgRegs = VRegs[i]; 515 Register ArgReg = 516 OrigArgRegs.size() == 1 517 ? OrigArgRegs[0] 518 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL)); 519 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset); 520 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy)); 521 lowerParameter(B, ArgTy, ArgOffset, Align, ArgReg); 522 if (OrigArgRegs.size() > 1) 523 unpackRegs(OrigArgRegs, ArgReg, ArgTy, B); 524 ++i; 525 } 526 527 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 528 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 529 return true; 530 } 531 532 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs. 533 static MachineInstrBuilder mergeVectorRegsToResultRegs( 534 MachineIRBuilder &B, ArrayRef<Register> DstRegs, ArrayRef<Register> SrcRegs) { 535 MachineRegisterInfo &MRI = *B.getMRI(); 536 LLT LLTy = MRI.getType(DstRegs[0]); 537 LLT PartLLT = MRI.getType(SrcRegs[0]); 538 539 // Deal with v3s16 split into v2s16 540 LLT LCMTy = getLCMType(LLTy, PartLLT); 541 if (LCMTy == LLTy) { 542 // Common case where no padding is needed. 543 assert(DstRegs.size() == 1); 544 return B.buildConcatVectors(DstRegs[0], SrcRegs); 545 } 546 547 const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits(); 548 Register Undef = B.buildUndef(PartLLT).getReg(0); 549 550 // Build vector of undefs. 551 SmallVector<Register, 8> WidenedSrcs(NumWide, Undef); 552 553 // Replace the first sources with the real registers. 554 std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin()); 555 556 auto Widened = B.buildConcatVectors(LCMTy, WidenedSrcs); 557 int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits(); 558 559 SmallVector<Register, 8> PadDstRegs(NumDst); 560 std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin()); 561 562 // Create the excess dead defs for the unmerge. 563 for (int I = DstRegs.size(); I != NumDst; ++I) 564 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy); 565 566 return B.buildUnmerge(PadDstRegs, Widened); 567 } 568 569 // TODO: Move this to generic code 570 static void packSplitRegsToOrigType(MachineIRBuilder &B, 571 ArrayRef<Register> OrigRegs, 572 ArrayRef<Register> Regs, 573 LLT LLTy, 574 LLT PartLLT) { 575 MachineRegisterInfo &MRI = *B.getMRI(); 576 577 if (!LLTy.isVector() && !PartLLT.isVector()) { 578 assert(OrigRegs.size() == 1); 579 LLT OrigTy = MRI.getType(OrigRegs[0]); 580 581 unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size(); 582 if (SrcSize == OrigTy.getSizeInBits()) 583 B.buildMerge(OrigRegs[0], Regs); 584 else { 585 auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs); 586 B.buildTrunc(OrigRegs[0], Widened); 587 } 588 589 return; 590 } 591 592 if (LLTy.isVector() && PartLLT.isVector()) { 593 assert(OrigRegs.size() == 1); 594 assert(LLTy.getElementType() == PartLLT.getElementType()); 595 mergeVectorRegsToResultRegs(B, OrigRegs, Regs); 596 return; 597 } 598 599 assert(LLTy.isVector() && !PartLLT.isVector()); 600 601 LLT DstEltTy = LLTy.getElementType(); 602 603 // Pointer information was discarded. We'll need to coerce some register types 604 // to avoid violating type constraints. 605 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType(); 606 607 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits()); 608 609 if (DstEltTy == PartLLT) { 610 // Vector was trivially scalarized. 611 612 if (RealDstEltTy.isPointer()) { 613 for (Register Reg : Regs) 614 MRI.setType(Reg, RealDstEltTy); 615 } 616 617 B.buildBuildVector(OrigRegs[0], Regs); 618 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) { 619 // Deal with vector with 64-bit elements decomposed to 32-bit 620 // registers. Need to create intermediate 64-bit elements. 621 SmallVector<Register, 8> EltMerges; 622 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits(); 623 624 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0); 625 626 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) { 627 auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt)); 628 // Fix the type in case this is really a vector of pointers. 629 MRI.setType(Merge.getReg(0), RealDstEltTy); 630 EltMerges.push_back(Merge.getReg(0)); 631 Regs = Regs.drop_front(PartsPerElt); 632 } 633 634 B.buildBuildVector(OrigRegs[0], EltMerges); 635 } else { 636 // Vector was split, and elements promoted to a wider type. 637 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT); 638 auto BV = B.buildBuildVector(BVType, Regs); 639 B.buildTrunc(OrigRegs[0], BV); 640 } 641 } 642 643 bool AMDGPUCallLowering::lowerFormalArguments( 644 MachineIRBuilder &B, const Function &F, 645 ArrayRef<ArrayRef<Register>> VRegs) const { 646 CallingConv::ID CC = F.getCallingConv(); 647 648 // The infrastructure for normal calling convention lowering is essentially 649 // useless for kernels. We want to avoid any kind of legalization or argument 650 // splitting. 651 if (CC == CallingConv::AMDGPU_KERNEL) 652 return lowerFormalArgumentsKernel(B, F, VRegs); 653 654 const bool IsShader = AMDGPU::isShader(CC); 655 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 656 657 MachineFunction &MF = B.getMF(); 658 MachineBasicBlock &MBB = B.getMBB(); 659 MachineRegisterInfo &MRI = MF.getRegInfo(); 660 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 661 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 662 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 663 const DataLayout &DL = F.getParent()->getDataLayout(); 664 665 666 SmallVector<CCValAssign, 16> ArgLocs; 667 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 668 669 if (!IsEntryFunc) { 670 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 671 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 672 &AMDGPU::SGPR_64RegClass); 673 MBB.addLiveIn(ReturnAddrReg); 674 B.buildCopy(LiveInReturn, ReturnAddrReg); 675 } 676 677 if (Info->hasImplicitBufferPtr()) { 678 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 679 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 680 CCInfo.AllocateReg(ImplicitBufferPtrReg); 681 } 682 683 684 SmallVector<ArgInfo, 32> SplitArgs; 685 unsigned Idx = 0; 686 unsigned PSInputNum = 0; 687 688 for (auto &Arg : F.args()) { 689 if (DL.getTypeStoreSize(Arg.getType()) == 0) 690 continue; 691 692 const bool InReg = Arg.hasAttribute(Attribute::InReg); 693 694 // SGPR arguments to functions not implemented. 695 if (!IsShader && InReg) 696 return false; 697 698 if (Arg.hasAttribute(Attribute::SwiftSelf) || 699 Arg.hasAttribute(Attribute::SwiftError) || 700 Arg.hasAttribute(Attribute::Nest)) 701 return false; 702 703 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 704 const bool ArgUsed = !Arg.use_empty(); 705 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 706 707 if (!SkipArg) { 708 Info->markPSInputAllocated(PSInputNum); 709 if (ArgUsed) 710 Info->markPSInputEnabled(PSInputNum); 711 } 712 713 ++PSInputNum; 714 715 if (SkipArg) { 716 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 717 B.buildUndef(VRegs[Idx][I]); 718 719 ++Idx; 720 continue; 721 } 722 } 723 724 ArgInfo OrigArg(VRegs[Idx], Arg.getType()); 725 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 726 setArgFlags(OrigArg, OrigArgIdx, DL, F); 727 728 splitToValueTypes( 729 B, OrigArg, OrigArgIdx, SplitArgs, DL, CC, 730 // FIXME: We should probably be passing multiple registers to 731 // handleAssignments to do this 732 [&](ArrayRef<Register> Regs, Register DstReg, 733 LLT LLTy, LLT PartLLT, int VTSplitIdx) { 734 assert(DstReg == VRegs[Idx][VTSplitIdx]); 735 packSplitRegsToOrigType(B, VRegs[Idx][VTSplitIdx], Regs, 736 LLTy, PartLLT); 737 }); 738 739 ++Idx; 740 } 741 742 // At least one interpolation mode must be enabled or else the GPU will 743 // hang. 744 // 745 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 746 // set PSInputAddr, the user wants to enable some bits after the compilation 747 // based on run-time states. Since we can't know what the final PSInputEna 748 // will look like, so we shouldn't do anything here and the user should take 749 // responsibility for the correct programming. 750 // 751 // Otherwise, the following restrictions apply: 752 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 753 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 754 // enabled too. 755 if (CC == CallingConv::AMDGPU_PS) { 756 if ((Info->getPSInputAddr() & 0x7F) == 0 || 757 ((Info->getPSInputAddr() & 0xF) == 0 && 758 Info->isPSInputAllocated(11))) { 759 CCInfo.AllocateReg(AMDGPU::VGPR0); 760 CCInfo.AllocateReg(AMDGPU::VGPR1); 761 Info->markPSInputAllocated(0); 762 Info->markPSInputEnabled(0); 763 } 764 765 if (Subtarget.isAmdPalOS()) { 766 // For isAmdPalOS, the user does not enable some bits after compilation 767 // based on run-time states; the register values being generated here are 768 // the final ones set in hardware. Therefore we need to apply the 769 // workaround to PSInputAddr and PSInputEnable together. (The case where 770 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 771 // set up an input arg for a particular interpolation mode, but nothing 772 // uses that input arg. Really we should have an earlier pass that removes 773 // such an arg.) 774 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 775 if ((PsInputBits & 0x7F) == 0 || 776 ((PsInputBits & 0xF) == 0 && 777 (PsInputBits >> 11 & 1))) 778 Info->markPSInputEnabled( 779 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 780 } 781 } 782 783 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 784 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 785 786 if (!MBB.empty()) 787 B.setInstr(*MBB.begin()); 788 789 FormalArgHandler Handler(B, MRI, AssignFn); 790 if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler)) 791 return false; 792 793 if (!IsEntryFunc) { 794 // Special inputs come after user arguments. 795 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 796 } 797 798 // Start adding system SGPRs. 799 if (IsEntryFunc) { 800 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader); 801 } else { 802 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 803 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 804 } 805 806 // Move back to the end of the basic block. 807 B.setMBB(MBB); 808 809 return true; 810 } 811