1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUISelLowering.h" 18 #include "AMDGPUSubtarget.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIISelLowering.h" 21 #include "SIMachineFunctionInfo.h" 22 #include "SIRegisterInfo.h" 23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 24 #include "llvm/CodeGen/Analysis.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/Support/LowLevelTypeImpl.h" 29 30 using namespace llvm; 31 32 namespace { 33 34 struct OutgoingValueHandler : public CallLowering::ValueHandler { 35 OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 36 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 37 : ValueHandler(B, MRI, AssignFn), MIB(MIB) {} 38 39 MachineInstrBuilder MIB; 40 41 bool isIncomingArgumentHandler() const override { return false; } 42 43 Register getStackAddress(uint64_t Size, int64_t Offset, 44 MachinePointerInfo &MPO) override { 45 llvm_unreachable("not implemented"); 46 } 47 48 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 49 MachinePointerInfo &MPO, CCValAssign &VA) override { 50 llvm_unreachable("not implemented"); 51 } 52 53 void assignValueToReg(Register ValVReg, Register PhysReg, 54 CCValAssign &VA) override { 55 Register ExtReg; 56 if (VA.getLocVT().getSizeInBits() < 32) { 57 // 16-bit types are reported as legal for 32-bit registers. We need to 58 // extend and do a 32-bit copy to avoid the verifier complaining about it. 59 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 60 } else 61 ExtReg = extendRegister(ValVReg, VA); 62 63 // If this is a scalar return, insert a readfirstlane just in case the value 64 // ends up in a VGPR. 65 // FIXME: Assert this is a shader return. 66 const SIRegisterInfo *TRI 67 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo()); 68 if (TRI->isSGPRReg(MRI, PhysReg)) { 69 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane, 70 {MRI.getType(ExtReg)}, false) 71 .addReg(ExtReg); 72 ExtReg = ToSGPR.getReg(0); 73 } 74 75 MIRBuilder.buildCopy(PhysReg, ExtReg); 76 MIB.addUse(PhysReg, RegState::Implicit); 77 } 78 79 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 80 CCValAssign::LocInfo LocInfo, 81 const CallLowering::ArgInfo &Info, 82 ISD::ArgFlagsTy Flags, 83 CCState &State) override { 84 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 85 } 86 }; 87 88 struct IncomingArgHandler : public CallLowering::ValueHandler { 89 uint64_t StackUsed = 0; 90 91 IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 92 CCAssignFn *AssignFn) 93 : ValueHandler(B, MRI, AssignFn) {} 94 95 Register getStackAddress(uint64_t Size, int64_t Offset, 96 MachinePointerInfo &MPO) override { 97 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 98 int FI = MFI.CreateFixedObject(Size, Offset, true); 99 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 100 auto AddrReg = MIRBuilder.buildFrameIndex( 101 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 102 StackUsed = std::max(StackUsed, Size + Offset); 103 return AddrReg.getReg(0); 104 } 105 106 void assignValueToReg(Register ValVReg, Register PhysReg, 107 CCValAssign &VA) override { 108 markPhysRegUsed(PhysReg); 109 110 if (VA.getLocVT().getSizeInBits() < 32) { 111 // 16-bit types are reported as legal for 32-bit registers. We need to do 112 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 113 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 114 MIRBuilder.buildTrunc(ValVReg, Copy); 115 return; 116 } 117 118 switch (VA.getLocInfo()) { 119 case CCValAssign::LocInfo::SExt: 120 case CCValAssign::LocInfo::ZExt: 121 case CCValAssign::LocInfo::AExt: { 122 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 123 MIRBuilder.buildTrunc(ValVReg, Copy); 124 break; 125 } 126 default: 127 MIRBuilder.buildCopy(ValVReg, PhysReg); 128 break; 129 } 130 } 131 132 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 133 MachinePointerInfo &MPO, CCValAssign &VA) override { 134 MachineFunction &MF = MIRBuilder.getMF(); 135 136 // FIXME: Get alignment 137 auto MMO = MF.getMachineMemOperand( 138 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 139 inferAlignFromPtrInfo(MF, MPO)); 140 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 141 } 142 143 /// How the physical register gets marked varies between formal 144 /// parameters (it's a basic-block live-in), and a call instruction 145 /// (it's an implicit-def of the BL). 146 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 147 148 // FIXME: What is the point of this being a callback? 149 bool isIncomingArgumentHandler() const override { return true; } 150 }; 151 152 struct FormalArgHandler : public IncomingArgHandler { 153 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 154 CCAssignFn *AssignFn) 155 : IncomingArgHandler(B, MRI, AssignFn) {} 156 157 void markPhysRegUsed(unsigned PhysReg) override { 158 MIRBuilder.getMBB().addLiveIn(PhysReg); 159 } 160 }; 161 162 } 163 164 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 165 : CallLowering(&TLI) { 166 } 167 168 // FIXME: Compatability shim 169 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 170 switch (MIOpc) { 171 case TargetOpcode::G_SEXT: 172 return ISD::SIGN_EXTEND; 173 case TargetOpcode::G_ZEXT: 174 return ISD::ZERO_EXTEND; 175 case TargetOpcode::G_ANYEXT: 176 return ISD::ANY_EXTEND; 177 default: 178 llvm_unreachable("not an extend opcode"); 179 } 180 } 181 182 void AMDGPUCallLowering::splitToValueTypes( 183 MachineIRBuilder &B, 184 const ArgInfo &OrigArg, unsigned OrigArgIdx, 185 SmallVectorImpl<ArgInfo> &SplitArgs, 186 const DataLayout &DL, CallingConv::ID CallConv, 187 SplitArgTy PerformArgSplit) const { 188 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 189 LLVMContext &Ctx = OrigArg.Ty->getContext(); 190 191 if (OrigArg.Ty->isVoidTy()) 192 return; 193 194 SmallVector<EVT, 4> SplitVTs; 195 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs); 196 197 assert(OrigArg.Regs.size() == SplitVTs.size()); 198 199 int SplitIdx = 0; 200 for (EVT VT : SplitVTs) { 201 Register Reg = OrigArg.Regs[SplitIdx]; 202 Type *Ty = VT.getTypeForEVT(Ctx); 203 LLT LLTy = getLLTForType(*Ty, DL); 204 205 if (OrigArgIdx == AttributeList::ReturnIndex && VT.isScalarInteger()) { 206 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 207 if (OrigArg.Flags[0].isSExt()) { 208 assert(OrigArg.Regs.size() == 1 && "expect only simple return values"); 209 ExtendOp = TargetOpcode::G_SEXT; 210 } else if (OrigArg.Flags[0].isZExt()) { 211 assert(OrigArg.Regs.size() == 1 && "expect only simple return values"); 212 ExtendOp = TargetOpcode::G_ZEXT; 213 } 214 215 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 216 extOpcodeToISDExtOpcode(ExtendOp)); 217 if (ExtVT != VT) { 218 VT = ExtVT; 219 Ty = ExtVT.getTypeForEVT(Ctx); 220 LLTy = getLLTForType(*Ty, DL); 221 Reg = B.buildInstr(ExtendOp, {LLTy}, {Reg}).getReg(0); 222 } 223 } 224 225 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT); 226 MVT RegVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT); 227 228 if (NumParts == 1) { 229 // Fixup EVTs to an MVT. 230 // 231 // FIXME: This is pretty hacky. Why do we have to split the type 232 // legalization logic between here and handleAssignments? 233 if (OrigArgIdx != AttributeList::ReturnIndex && VT != RegVT) { 234 assert(VT.getSizeInBits() < 32 && 235 "unexpected illegal type"); 236 Ty = Type::getInt32Ty(Ctx); 237 Register OrigReg = Reg; 238 Reg = B.getMRI()->createGenericVirtualRegister(LLT::scalar(32)); 239 B.buildTrunc(OrigReg, Reg); 240 } 241 242 // No splitting to do, but we want to replace the original type (e.g. [1 x 243 // double] -> double). 244 SplitArgs.emplace_back(Reg, Ty, OrigArg.Flags, OrigArg.IsFixed); 245 246 ++SplitIdx; 247 continue; 248 } 249 250 SmallVector<Register, 8> SplitRegs; 251 Type *PartTy = EVT(RegVT).getTypeForEVT(Ctx); 252 LLT PartLLT = getLLTForType(*PartTy, DL); 253 MachineRegisterInfo &MRI = *B.getMRI(); 254 255 // FIXME: Should we be reporting all of the part registers for a single 256 // argument, and let handleAssignments take care of the repacking? 257 for (unsigned i = 0; i < NumParts; ++i) { 258 Register PartReg = MRI.createGenericVirtualRegister(PartLLT); 259 SplitRegs.push_back(PartReg); 260 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags); 261 } 262 263 PerformArgSplit(SplitRegs, Reg, LLTy, PartLLT, SplitIdx); 264 265 ++SplitIdx; 266 } 267 } 268 269 // Get the appropriate type to make \p OrigTy \p Factor times bigger. 270 static LLT getMultipleType(LLT OrigTy, int Factor) { 271 if (OrigTy.isVector()) { 272 return LLT::vector(OrigTy.getNumElements() * Factor, 273 OrigTy.getElementType()); 274 } 275 276 return LLT::scalar(OrigTy.getSizeInBits() * Factor); 277 } 278 279 // TODO: Move to generic code 280 static void unpackRegsToOrigType(MachineIRBuilder &B, 281 ArrayRef<Register> DstRegs, 282 Register SrcReg, 283 const CallLowering::ArgInfo &Info, 284 LLT SrcTy, 285 LLT PartTy) { 286 assert(DstRegs.size() > 1 && "Nothing to unpack"); 287 288 const unsigned SrcSize = SrcTy.getSizeInBits(); 289 const unsigned PartSize = PartTy.getSizeInBits(); 290 291 if (SrcTy.isVector() && !PartTy.isVector() && 292 PartSize > SrcTy.getElementType().getSizeInBits()) { 293 // Vector was scalarized, and the elements extended. 294 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), 295 SrcReg); 296 for (int i = 0, e = DstRegs.size(); i != e; ++i) 297 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i)); 298 return; 299 } 300 301 if (SrcSize % PartSize == 0) { 302 B.buildUnmerge(DstRegs, SrcReg); 303 return; 304 } 305 306 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize; 307 308 LLT BigTy = getMultipleType(PartTy, NumRoundedParts); 309 auto ImpDef = B.buildUndef(BigTy); 310 311 auto Big = B.buildInsert(BigTy, ImpDef.getReg(0), SrcReg, 0).getReg(0); 312 313 int64_t Offset = 0; 314 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize) 315 B.buildExtract(DstRegs[i], Big, Offset); 316 } 317 318 /// Lower the return value for the already existing \p Ret. This assumes that 319 /// \p B's insertion point is correct. 320 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 321 const Value *Val, ArrayRef<Register> VRegs, 322 MachineInstrBuilder &Ret) const { 323 if (!Val) 324 return true; 325 326 auto &MF = B.getMF(); 327 const auto &F = MF.getFunction(); 328 const DataLayout &DL = MF.getDataLayout(); 329 MachineRegisterInfo *MRI = B.getMRI(); 330 331 CallingConv::ID CC = F.getCallingConv(); 332 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 333 334 ArgInfo OrigRetInfo(VRegs, Val->getType()); 335 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 336 SmallVector<ArgInfo, 4> SplitRetInfos; 337 338 splitToValueTypes( 339 B, OrigRetInfo, AttributeList::ReturnIndex, SplitRetInfos, DL, CC, 340 [&](ArrayRef<Register> Regs, Register SrcReg, LLT LLTy, LLT PartLLT, 341 int VTSplitIdx) { 342 unpackRegsToOrigType(B, Regs, SrcReg, 343 SplitRetInfos[VTSplitIdx], 344 LLTy, PartLLT); 345 }); 346 347 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 348 OutgoingValueHandler RetHandler(B, *MRI, Ret, AssignFn); 349 return handleAssignments(B, SplitRetInfos, RetHandler); 350 } 351 352 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, 353 const Value *Val, 354 ArrayRef<Register> VRegs) const { 355 356 MachineFunction &MF = B.getMF(); 357 MachineRegisterInfo &MRI = MF.getRegInfo(); 358 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 359 MFI->setIfReturnsVoid(!Val); 360 361 assert(!Val == VRegs.empty() && "Return value without a vreg"); 362 363 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 364 const bool IsShader = AMDGPU::isShader(CC); 365 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) || 366 AMDGPU::isKernel(CC); 367 if (IsWaveEnd) { 368 B.buildInstr(AMDGPU::S_ENDPGM) 369 .addImm(0); 370 return true; 371 } 372 373 auto const &ST = MF.getSubtarget<GCNSubtarget>(); 374 375 unsigned ReturnOpc = 376 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 377 378 auto Ret = B.buildInstrNoInsert(ReturnOpc); 379 Register ReturnAddrVReg; 380 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 381 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 382 Ret.addUse(ReturnAddrVReg); 383 } 384 385 if (!lowerReturnVal(B, Val, VRegs, Ret)) 386 return false; 387 388 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 389 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 390 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 391 &AMDGPU::SGPR_64RegClass); 392 B.buildCopy(ReturnAddrVReg, LiveInReturn); 393 } 394 395 // TODO: Handle CalleeSavedRegsViaCopy. 396 397 B.insertInstr(Ret); 398 return true; 399 } 400 401 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B, 402 Type *ParamTy, 403 uint64_t Offset) const { 404 405 MachineFunction &MF = B.getMF(); 406 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 407 MachineRegisterInfo &MRI = MF.getRegInfo(); 408 const Function &F = MF.getFunction(); 409 const DataLayout &DL = F.getParent()->getDataLayout(); 410 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); 411 LLT PtrType = getLLTForType(*PtrTy, DL); 412 Register KernArgSegmentPtr = 413 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 414 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 415 416 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 417 418 return B.buildPtrAdd(PtrType, KernArgSegmentVReg, OffsetReg).getReg(0); 419 } 420 421 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, Type *ParamTy, 422 uint64_t Offset, Align Alignment, 423 Register DstReg) const { 424 MachineFunction &MF = B.getMF(); 425 const Function &F = MF.getFunction(); 426 const DataLayout &DL = F.getParent()->getDataLayout(); 427 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 428 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 429 Register PtrReg = lowerParameterPtr(B, ParamTy, Offset); 430 431 MachineMemOperand *MMO = MF.getMachineMemOperand( 432 PtrInfo, 433 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 434 MachineMemOperand::MOInvariant, 435 TypeSize, Alignment); 436 437 B.buildLoad(DstReg, PtrReg, *MMO); 438 } 439 440 // Allocate special inputs passed in user SGPRs. 441 static void allocateHSAUserSGPRs(CCState &CCInfo, 442 MachineIRBuilder &B, 443 MachineFunction &MF, 444 const SIRegisterInfo &TRI, 445 SIMachineFunctionInfo &Info) { 446 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 447 if (Info.hasPrivateSegmentBuffer()) { 448 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 449 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 450 CCInfo.AllocateReg(PrivateSegmentBufferReg); 451 } 452 453 if (Info.hasDispatchPtr()) { 454 Register DispatchPtrReg = Info.addDispatchPtr(TRI); 455 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 456 CCInfo.AllocateReg(DispatchPtrReg); 457 } 458 459 if (Info.hasQueuePtr()) { 460 Register QueuePtrReg = Info.addQueuePtr(TRI); 461 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 462 CCInfo.AllocateReg(QueuePtrReg); 463 } 464 465 if (Info.hasKernargSegmentPtr()) { 466 MachineRegisterInfo &MRI = MF.getRegInfo(); 467 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 468 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 469 Register VReg = MRI.createGenericVirtualRegister(P4); 470 MRI.addLiveIn(InputPtrReg, VReg); 471 B.getMBB().addLiveIn(InputPtrReg); 472 B.buildCopy(VReg, InputPtrReg); 473 CCInfo.AllocateReg(InputPtrReg); 474 } 475 476 if (Info.hasDispatchID()) { 477 Register DispatchIDReg = Info.addDispatchID(TRI); 478 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 479 CCInfo.AllocateReg(DispatchIDReg); 480 } 481 482 if (Info.hasFlatScratchInit()) { 483 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI); 484 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 485 CCInfo.AllocateReg(FlatScratchInitReg); 486 } 487 488 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 489 // these from the dispatch pointer. 490 } 491 492 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 493 MachineIRBuilder &B, const Function &F, 494 ArrayRef<ArrayRef<Register>> VRegs) const { 495 MachineFunction &MF = B.getMF(); 496 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 497 MachineRegisterInfo &MRI = MF.getRegInfo(); 498 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 499 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 500 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 501 502 const DataLayout &DL = F.getParent()->getDataLayout(); 503 504 SmallVector<CCValAssign, 16> ArgLocs; 505 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 506 507 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 508 509 unsigned i = 0; 510 const Align KernArgBaseAlign(16); 511 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 512 uint64_t ExplicitArgOffset = 0; 513 514 // TODO: Align down to dword alignment and extract bits for extending loads. 515 for (auto &Arg : F.args()) { 516 Type *ArgTy = Arg.getType(); 517 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 518 if (AllocSize == 0) 519 continue; 520 521 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy); 522 523 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 524 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 525 526 ArrayRef<Register> OrigArgRegs = VRegs[i]; 527 Register ArgReg = 528 OrigArgRegs.size() == 1 529 ? OrigArgRegs[0] 530 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL)); 531 532 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset); 533 lowerParameter(B, ArgTy, ArgOffset, Alignment, ArgReg); 534 if (OrigArgRegs.size() > 1) 535 unpackRegs(OrigArgRegs, ArgReg, ArgTy, B); 536 ++i; 537 } 538 539 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 540 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 541 return true; 542 } 543 544 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs. 545 static MachineInstrBuilder mergeVectorRegsToResultRegs( 546 MachineIRBuilder &B, ArrayRef<Register> DstRegs, ArrayRef<Register> SrcRegs) { 547 MachineRegisterInfo &MRI = *B.getMRI(); 548 LLT LLTy = MRI.getType(DstRegs[0]); 549 LLT PartLLT = MRI.getType(SrcRegs[0]); 550 551 // Deal with v3s16 split into v2s16 552 LLT LCMTy = getLCMType(LLTy, PartLLT); 553 if (LCMTy == LLTy) { 554 // Common case where no padding is needed. 555 assert(DstRegs.size() == 1); 556 return B.buildConcatVectors(DstRegs[0], SrcRegs); 557 } 558 559 const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits(); 560 Register Undef = B.buildUndef(PartLLT).getReg(0); 561 562 // Build vector of undefs. 563 SmallVector<Register, 8> WidenedSrcs(NumWide, Undef); 564 565 // Replace the first sources with the real registers. 566 std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin()); 567 568 auto Widened = B.buildConcatVectors(LCMTy, WidenedSrcs); 569 int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits(); 570 571 SmallVector<Register, 8> PadDstRegs(NumDst); 572 std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin()); 573 574 // Create the excess dead defs for the unmerge. 575 for (int I = DstRegs.size(); I != NumDst; ++I) 576 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy); 577 578 return B.buildUnmerge(PadDstRegs, Widened); 579 } 580 581 // TODO: Move this to generic code 582 static void packSplitRegsToOrigType(MachineIRBuilder &B, 583 ArrayRef<Register> OrigRegs, 584 ArrayRef<Register> Regs, 585 LLT LLTy, 586 LLT PartLLT) { 587 MachineRegisterInfo &MRI = *B.getMRI(); 588 589 if (!LLTy.isVector() && !PartLLT.isVector()) { 590 assert(OrigRegs.size() == 1); 591 LLT OrigTy = MRI.getType(OrigRegs[0]); 592 593 unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size(); 594 if (SrcSize == OrigTy.getSizeInBits()) 595 B.buildMerge(OrigRegs[0], Regs); 596 else { 597 auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs); 598 B.buildTrunc(OrigRegs[0], Widened); 599 } 600 601 return; 602 } 603 604 if (LLTy.isVector() && PartLLT.isVector()) { 605 assert(OrigRegs.size() == 1); 606 assert(LLTy.getElementType() == PartLLT.getElementType()); 607 mergeVectorRegsToResultRegs(B, OrigRegs, Regs); 608 return; 609 } 610 611 assert(LLTy.isVector() && !PartLLT.isVector()); 612 613 LLT DstEltTy = LLTy.getElementType(); 614 615 // Pointer information was discarded. We'll need to coerce some register types 616 // to avoid violating type constraints. 617 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType(); 618 619 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits()); 620 621 if (DstEltTy == PartLLT) { 622 // Vector was trivially scalarized. 623 624 if (RealDstEltTy.isPointer()) { 625 for (Register Reg : Regs) 626 MRI.setType(Reg, RealDstEltTy); 627 } 628 629 B.buildBuildVector(OrigRegs[0], Regs); 630 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) { 631 // Deal with vector with 64-bit elements decomposed to 32-bit 632 // registers. Need to create intermediate 64-bit elements. 633 SmallVector<Register, 8> EltMerges; 634 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits(); 635 636 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0); 637 638 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) { 639 auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt)); 640 // Fix the type in case this is really a vector of pointers. 641 MRI.setType(Merge.getReg(0), RealDstEltTy); 642 EltMerges.push_back(Merge.getReg(0)); 643 Regs = Regs.drop_front(PartsPerElt); 644 } 645 646 B.buildBuildVector(OrigRegs[0], EltMerges); 647 } else { 648 // Vector was split, and elements promoted to a wider type. 649 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT); 650 auto BV = B.buildBuildVector(BVType, Regs); 651 B.buildTrunc(OrigRegs[0], BV); 652 } 653 } 654 655 bool AMDGPUCallLowering::lowerFormalArguments( 656 MachineIRBuilder &B, const Function &F, 657 ArrayRef<ArrayRef<Register>> VRegs) const { 658 CallingConv::ID CC = F.getCallingConv(); 659 660 // The infrastructure for normal calling convention lowering is essentially 661 // useless for kernels. We want to avoid any kind of legalization or argument 662 // splitting. 663 if (CC == CallingConv::AMDGPU_KERNEL) 664 return lowerFormalArgumentsKernel(B, F, VRegs); 665 666 const bool IsShader = AMDGPU::isShader(CC); 667 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 668 669 MachineFunction &MF = B.getMF(); 670 MachineBasicBlock &MBB = B.getMBB(); 671 MachineRegisterInfo &MRI = MF.getRegInfo(); 672 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 673 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 674 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 675 const DataLayout &DL = F.getParent()->getDataLayout(); 676 677 678 SmallVector<CCValAssign, 16> ArgLocs; 679 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 680 681 if (!IsEntryFunc) { 682 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 683 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 684 &AMDGPU::SGPR_64RegClass); 685 MBB.addLiveIn(ReturnAddrReg); 686 B.buildCopy(LiveInReturn, ReturnAddrReg); 687 } 688 689 if (Info->hasImplicitBufferPtr()) { 690 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 691 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 692 CCInfo.AllocateReg(ImplicitBufferPtrReg); 693 } 694 695 696 SmallVector<ArgInfo, 32> SplitArgs; 697 unsigned Idx = 0; 698 unsigned PSInputNum = 0; 699 700 for (auto &Arg : F.args()) { 701 if (DL.getTypeStoreSize(Arg.getType()) == 0) 702 continue; 703 704 const bool InReg = Arg.hasAttribute(Attribute::InReg); 705 706 // SGPR arguments to functions not implemented. 707 if (!IsShader && InReg) 708 return false; 709 710 if (Arg.hasAttribute(Attribute::SwiftSelf) || 711 Arg.hasAttribute(Attribute::SwiftError) || 712 Arg.hasAttribute(Attribute::Nest)) 713 return false; 714 715 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 716 const bool ArgUsed = !Arg.use_empty(); 717 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 718 719 if (!SkipArg) { 720 Info->markPSInputAllocated(PSInputNum); 721 if (ArgUsed) 722 Info->markPSInputEnabled(PSInputNum); 723 } 724 725 ++PSInputNum; 726 727 if (SkipArg) { 728 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 729 B.buildUndef(VRegs[Idx][I]); 730 731 ++Idx; 732 continue; 733 } 734 } 735 736 ArgInfo OrigArg(VRegs[Idx], Arg.getType()); 737 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 738 setArgFlags(OrigArg, OrigArgIdx, DL, F); 739 740 splitToValueTypes( 741 B, OrigArg, OrigArgIdx, SplitArgs, DL, CC, 742 // FIXME: We should probably be passing multiple registers to 743 // handleAssignments to do this 744 [&](ArrayRef<Register> Regs, Register DstReg, 745 LLT LLTy, LLT PartLLT, int VTSplitIdx) { 746 assert(DstReg == VRegs[Idx][VTSplitIdx]); 747 packSplitRegsToOrigType(B, VRegs[Idx][VTSplitIdx], Regs, 748 LLTy, PartLLT); 749 }); 750 751 ++Idx; 752 } 753 754 // At least one interpolation mode must be enabled or else the GPU will 755 // hang. 756 // 757 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 758 // set PSInputAddr, the user wants to enable some bits after the compilation 759 // based on run-time states. Since we can't know what the final PSInputEna 760 // will look like, so we shouldn't do anything here and the user should take 761 // responsibility for the correct programming. 762 // 763 // Otherwise, the following restrictions apply: 764 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 765 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 766 // enabled too. 767 if (CC == CallingConv::AMDGPU_PS) { 768 if ((Info->getPSInputAddr() & 0x7F) == 0 || 769 ((Info->getPSInputAddr() & 0xF) == 0 && 770 Info->isPSInputAllocated(11))) { 771 CCInfo.AllocateReg(AMDGPU::VGPR0); 772 CCInfo.AllocateReg(AMDGPU::VGPR1); 773 Info->markPSInputAllocated(0); 774 Info->markPSInputEnabled(0); 775 } 776 777 if (Subtarget.isAmdPalOS()) { 778 // For isAmdPalOS, the user does not enable some bits after compilation 779 // based on run-time states; the register values being generated here are 780 // the final ones set in hardware. Therefore we need to apply the 781 // workaround to PSInputAddr and PSInputEnable together. (The case where 782 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 783 // set up an input arg for a particular interpolation mode, but nothing 784 // uses that input arg. Really we should have an earlier pass that removes 785 // such an arg.) 786 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 787 if ((PsInputBits & 0x7F) == 0 || 788 ((PsInputBits & 0xF) == 0 && 789 (PsInputBits >> 11 & 1))) 790 Info->markPSInputEnabled( 791 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 792 } 793 } 794 795 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 796 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 797 798 if (!MBB.empty()) 799 B.setInstr(*MBB.begin()); 800 801 if (!IsEntryFunc) { 802 // For the fixed ABI, pass workitem IDs in the last argument register. 803 if (AMDGPUTargetMachine::EnableFixedFunctionABI) 804 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info); 805 } 806 807 FormalArgHandler Handler(B, MRI, AssignFn); 808 if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler)) 809 return false; 810 811 if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) { 812 // Special inputs come after user arguments. 813 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 814 } 815 816 // Start adding system SGPRs. 817 if (IsEntryFunc) { 818 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader); 819 } else { 820 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 821 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 822 } 823 824 // Move back to the end of the basic block. 825 B.setMBB(MBB); 826 827 return true; 828 } 829