1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUISelLowering.h" 18 #include "AMDGPUSubtarget.h" 19 #include "SIISelLowering.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "SIRegisterInfo.h" 22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 23 #include "llvm/CodeGen/Analysis.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/Support/LowLevelTypeImpl.h" 28 29 using namespace llvm; 30 31 namespace { 32 33 struct OutgoingValueHandler : public CallLowering::ValueHandler { 34 OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 35 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 36 : ValueHandler(B, MRI, AssignFn), MIB(MIB) {} 37 38 MachineInstrBuilder MIB; 39 40 bool isIncomingArgumentHandler() const override { return false; } 41 42 Register getStackAddress(uint64_t Size, int64_t Offset, 43 MachinePointerInfo &MPO) override { 44 llvm_unreachable("not implemented"); 45 } 46 47 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 48 MachinePointerInfo &MPO, CCValAssign &VA) override { 49 llvm_unreachable("not implemented"); 50 } 51 52 void assignValueToReg(Register ValVReg, Register PhysReg, 53 CCValAssign &VA) override { 54 Register ExtReg; 55 if (VA.getLocVT().getSizeInBits() < 32) { 56 // 16-bit types are reported as legal for 32-bit registers. We need to 57 // extend and do a 32-bit copy to avoid the verifier complaining about it. 58 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 59 } else 60 ExtReg = extendRegister(ValVReg, VA); 61 62 MIRBuilder.buildCopy(PhysReg, ExtReg); 63 MIB.addUse(PhysReg, RegState::Implicit); 64 } 65 66 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 67 CCValAssign::LocInfo LocInfo, 68 const CallLowering::ArgInfo &Info, 69 ISD::ArgFlagsTy Flags, 70 CCState &State) override { 71 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 72 } 73 }; 74 75 struct IncomingArgHandler : public CallLowering::ValueHandler { 76 uint64_t StackUsed = 0; 77 78 IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 79 CCAssignFn *AssignFn) 80 : ValueHandler(B, MRI, AssignFn) {} 81 82 Register getStackAddress(uint64_t Size, int64_t Offset, 83 MachinePointerInfo &MPO) override { 84 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 85 int FI = MFI.CreateFixedObject(Size, Offset, true); 86 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 87 auto AddrReg = MIRBuilder.buildFrameIndex( 88 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI); 89 StackUsed = std::max(StackUsed, Size + Offset); 90 return AddrReg.getReg(0); 91 } 92 93 void assignValueToReg(Register ValVReg, Register PhysReg, 94 CCValAssign &VA) override { 95 markPhysRegUsed(PhysReg); 96 97 if (VA.getLocVT().getSizeInBits() < 32) { 98 // 16-bit types are reported as legal for 32-bit registers. We need to do 99 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 100 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 101 MIRBuilder.buildTrunc(ValVReg, Copy); 102 return; 103 } 104 105 switch (VA.getLocInfo()) { 106 case CCValAssign::LocInfo::SExt: 107 case CCValAssign::LocInfo::ZExt: 108 case CCValAssign::LocInfo::AExt: { 109 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 110 MIRBuilder.buildTrunc(ValVReg, Copy); 111 break; 112 } 113 default: 114 MIRBuilder.buildCopy(ValVReg, PhysReg); 115 break; 116 } 117 } 118 119 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 120 MachinePointerInfo &MPO, CCValAssign &VA) override { 121 MachineFunction &MF = MIRBuilder.getMF(); 122 unsigned Align = inferAlignmentFromPtrInfo(MF, MPO); 123 124 // FIXME: Get alignment 125 auto MMO = MF.getMachineMemOperand( 126 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 127 Align); 128 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 129 } 130 131 /// How the physical register gets marked varies between formal 132 /// parameters (it's a basic-block live-in), and a call instruction 133 /// (it's an implicit-def of the BL). 134 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 135 136 // FIXME: What is the point of this being a callback? 137 bool isIncomingArgumentHandler() const override { return true; } 138 }; 139 140 struct FormalArgHandler : public IncomingArgHandler { 141 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI, 142 CCAssignFn *AssignFn) 143 : IncomingArgHandler(B, MRI, AssignFn) {} 144 145 void markPhysRegUsed(unsigned PhysReg) override { 146 MIRBuilder.getMBB().addLiveIn(PhysReg); 147 } 148 }; 149 150 } 151 152 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 153 : CallLowering(&TLI) { 154 } 155 156 // FIXME: Compatability shim 157 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) { 158 switch (MIOpc) { 159 case TargetOpcode::G_SEXT: 160 return ISD::SIGN_EXTEND; 161 case TargetOpcode::G_ZEXT: 162 return ISD::ZERO_EXTEND; 163 case TargetOpcode::G_ANYEXT: 164 return ISD::ANY_EXTEND; 165 default: 166 llvm_unreachable("not an extend opcode"); 167 } 168 } 169 170 void AMDGPUCallLowering::splitToValueTypes( 171 MachineIRBuilder &B, 172 const ArgInfo &OrigArg, unsigned OrigArgIdx, 173 SmallVectorImpl<ArgInfo> &SplitArgs, 174 const DataLayout &DL, CallingConv::ID CallConv, 175 SplitArgTy PerformArgSplit) const { 176 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 177 LLVMContext &Ctx = OrigArg.Ty->getContext(); 178 179 if (OrigArg.Ty->isVoidTy()) 180 return; 181 182 SmallVector<EVT, 4> SplitVTs; 183 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs); 184 185 assert(OrigArg.Regs.size() == SplitVTs.size()); 186 187 int SplitIdx = 0; 188 for (EVT VT : SplitVTs) { 189 Register Reg = OrigArg.Regs[SplitIdx]; 190 Type *Ty = VT.getTypeForEVT(Ctx); 191 LLT LLTy = getLLTForType(*Ty, DL); 192 193 if (OrigArgIdx == AttributeList::ReturnIndex && VT.isScalarInteger()) { 194 unsigned ExtendOp = TargetOpcode::G_ANYEXT; 195 if (OrigArg.Flags[0].isSExt()) { 196 assert(OrigArg.Regs.size() == 1 && "expect only simple return values"); 197 ExtendOp = TargetOpcode::G_SEXT; 198 } else if (OrigArg.Flags[0].isZExt()) { 199 assert(OrigArg.Regs.size() == 1 && "expect only simple return values"); 200 ExtendOp = TargetOpcode::G_ZEXT; 201 } 202 203 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT, 204 extOpcodeToISDExtOpcode(ExtendOp)); 205 if (ExtVT != VT) { 206 VT = ExtVT; 207 Ty = ExtVT.getTypeForEVT(Ctx); 208 LLTy = getLLTForType(*Ty, DL); 209 Reg = B.buildInstr(ExtendOp, {LLTy}, {Reg}).getReg(0); 210 } 211 } 212 213 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT); 214 MVT RegVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT); 215 216 if (NumParts == 1) { 217 // No splitting to do, but we want to replace the original type (e.g. [1 x 218 // double] -> double). 219 SplitArgs.emplace_back(Reg, Ty, OrigArg.Flags, OrigArg.IsFixed); 220 221 ++SplitIdx; 222 continue; 223 } 224 225 SmallVector<Register, 8> SplitRegs; 226 Type *PartTy = EVT(RegVT).getTypeForEVT(Ctx); 227 LLT PartLLT = getLLTForType(*PartTy, DL); 228 MachineRegisterInfo &MRI = *B.getMRI(); 229 230 // FIXME: Should we be reporting all of the part registers for a single 231 // argument, and let handleAssignments take care of the repacking? 232 for (unsigned i = 0; i < NumParts; ++i) { 233 Register PartReg = MRI.createGenericVirtualRegister(PartLLT); 234 SplitRegs.push_back(PartReg); 235 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags); 236 } 237 238 PerformArgSplit(SplitRegs, Reg, LLTy, PartLLT, SplitIdx); 239 240 ++SplitIdx; 241 } 242 } 243 244 // Get the appropriate type to make \p OrigTy \p Factor times bigger. 245 static LLT getMultipleType(LLT OrigTy, int Factor) { 246 if (OrigTy.isVector()) { 247 return LLT::vector(OrigTy.getNumElements() * Factor, 248 OrigTy.getElementType()); 249 } 250 251 return LLT::scalar(OrigTy.getSizeInBits() * Factor); 252 } 253 254 // TODO: Move to generic code 255 static void unpackRegsToOrigType(MachineIRBuilder &B, 256 ArrayRef<Register> DstRegs, 257 Register SrcReg, 258 const CallLowering::ArgInfo &Info, 259 LLT SrcTy, 260 LLT PartTy) { 261 assert(DstRegs.size() > 1 && "Nothing to unpack"); 262 263 const unsigned SrcSize = SrcTy.getSizeInBits(); 264 const unsigned PartSize = PartTy.getSizeInBits(); 265 266 if (SrcTy.isVector() && !PartTy.isVector() && 267 PartSize > SrcTy.getElementType().getSizeInBits()) { 268 // Vector was scalarized, and the elements extended. 269 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), 270 SrcReg); 271 for (int i = 0, e = DstRegs.size(); i != e; ++i) 272 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i)); 273 return; 274 } 275 276 if (SrcSize % PartSize == 0) { 277 B.buildUnmerge(DstRegs, SrcReg); 278 return; 279 } 280 281 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize; 282 283 LLT BigTy = getMultipleType(PartTy, NumRoundedParts); 284 auto ImpDef = B.buildUndef(BigTy); 285 286 auto Big = B.buildInsert(BigTy, ImpDef.getReg(0), SrcReg, 0).getReg(0); 287 288 int64_t Offset = 0; 289 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize) 290 B.buildExtract(DstRegs[i], Big, Offset); 291 } 292 293 /// Lower the return value for the already existing \p Ret. This assumes that 294 /// \p B's insertion point is correct. 295 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B, 296 const Value *Val, ArrayRef<Register> VRegs, 297 MachineInstrBuilder &Ret) const { 298 if (!Val) 299 return true; 300 301 auto &MF = B.getMF(); 302 const auto &F = MF.getFunction(); 303 const DataLayout &DL = MF.getDataLayout(); 304 MachineRegisterInfo *MRI = B.getMRI(); 305 306 CallingConv::ID CC = F.getCallingConv(); 307 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 308 309 ArgInfo OrigRetInfo(VRegs, Val->getType()); 310 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 311 SmallVector<ArgInfo, 4> SplitRetInfos; 312 313 splitToValueTypes( 314 B, OrigRetInfo, AttributeList::ReturnIndex, SplitRetInfos, DL, CC, 315 [&](ArrayRef<Register> Regs, Register SrcReg, LLT LLTy, LLT PartLLT, 316 int VTSplitIdx) { 317 unpackRegsToOrigType(B, Regs, SrcReg, 318 SplitRetInfos[VTSplitIdx], 319 LLTy, PartLLT); 320 }); 321 322 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 323 OutgoingValueHandler RetHandler(B, *MRI, Ret, AssignFn); 324 return handleAssignments(B, SplitRetInfos, RetHandler); 325 } 326 327 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, 328 const Value *Val, 329 ArrayRef<Register> VRegs) const { 330 331 MachineFunction &MF = B.getMF(); 332 MachineRegisterInfo &MRI = MF.getRegInfo(); 333 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 334 MFI->setIfReturnsVoid(!Val); 335 336 assert(!Val == VRegs.empty() && "Return value without a vreg"); 337 338 CallingConv::ID CC = B.getMF().getFunction().getCallingConv(); 339 const bool IsShader = AMDGPU::isShader(CC); 340 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) || 341 AMDGPU::isKernel(CC); 342 if (IsWaveEnd) { 343 B.buildInstr(AMDGPU::S_ENDPGM) 344 .addImm(0); 345 return true; 346 } 347 348 auto const &ST = MF.getSubtarget<GCNSubtarget>(); 349 350 unsigned ReturnOpc = 351 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 352 353 auto Ret = B.buildInstrNoInsert(ReturnOpc); 354 Register ReturnAddrVReg; 355 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 356 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 357 Ret.addUse(ReturnAddrVReg); 358 } 359 360 if (!lowerReturnVal(B, Val, VRegs, Ret)) 361 return false; 362 363 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 364 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 365 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 366 &AMDGPU::SGPR_64RegClass); 367 B.buildCopy(ReturnAddrVReg, LiveInReturn); 368 } 369 370 // TODO: Handle CalleeSavedRegsViaCopy. 371 372 B.insertInstr(Ret); 373 return true; 374 } 375 376 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B, 377 Type *ParamTy, 378 uint64_t Offset) const { 379 380 MachineFunction &MF = B.getMF(); 381 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 382 MachineRegisterInfo &MRI = MF.getRegInfo(); 383 const Function &F = MF.getFunction(); 384 const DataLayout &DL = F.getParent()->getDataLayout(); 385 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); 386 LLT PtrType = getLLTForType(*PtrTy, DL); 387 Register KernArgSegmentPtr = 388 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 389 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 390 391 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset); 392 393 return B.buildPtrAdd(PtrType, KernArgSegmentVReg, OffsetReg).getReg(0); 394 } 395 396 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, 397 Type *ParamTy, uint64_t Offset, 398 unsigned Align, 399 Register DstReg) const { 400 MachineFunction &MF = B.getMF(); 401 const Function &F = MF.getFunction(); 402 const DataLayout &DL = F.getParent()->getDataLayout(); 403 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); 404 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 405 Register PtrReg = lowerParameterPtr(B, ParamTy, Offset); 406 407 MachineMemOperand *MMO = 408 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | 409 MachineMemOperand::MODereferenceable | 410 MachineMemOperand::MOInvariant, 411 TypeSize, Align); 412 413 B.buildLoad(DstReg, PtrReg, *MMO); 414 } 415 416 // Allocate special inputs passed in user SGPRs. 417 static void allocateHSAUserSGPRs(CCState &CCInfo, 418 MachineIRBuilder &B, 419 MachineFunction &MF, 420 const SIRegisterInfo &TRI, 421 SIMachineFunctionInfo &Info) { 422 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 423 if (Info.hasPrivateSegmentBuffer()) { 424 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 425 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 426 CCInfo.AllocateReg(PrivateSegmentBufferReg); 427 } 428 429 if (Info.hasDispatchPtr()) { 430 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 431 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 432 CCInfo.AllocateReg(DispatchPtrReg); 433 } 434 435 if (Info.hasQueuePtr()) { 436 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 437 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 438 CCInfo.AllocateReg(QueuePtrReg); 439 } 440 441 if (Info.hasKernargSegmentPtr()) { 442 MachineRegisterInfo &MRI = MF.getRegInfo(); 443 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 444 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 445 Register VReg = MRI.createGenericVirtualRegister(P4); 446 MRI.addLiveIn(InputPtrReg, VReg); 447 B.getMBB().addLiveIn(InputPtrReg); 448 B.buildCopy(VReg, InputPtrReg); 449 CCInfo.AllocateReg(InputPtrReg); 450 } 451 452 if (Info.hasDispatchID()) { 453 unsigned DispatchIDReg = Info.addDispatchID(TRI); 454 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 455 CCInfo.AllocateReg(DispatchIDReg); 456 } 457 458 if (Info.hasFlatScratchInit()) { 459 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 460 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 461 CCInfo.AllocateReg(FlatScratchInitReg); 462 } 463 464 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 465 // these from the dispatch pointer. 466 } 467 468 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 469 MachineIRBuilder &B, const Function &F, 470 ArrayRef<ArrayRef<Register>> VRegs) const { 471 MachineFunction &MF = B.getMF(); 472 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 473 MachineRegisterInfo &MRI = MF.getRegInfo(); 474 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 475 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 476 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 477 478 const DataLayout &DL = F.getParent()->getDataLayout(); 479 480 SmallVector<CCValAssign, 16> ArgLocs; 481 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 482 483 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info); 484 485 unsigned i = 0; 486 const unsigned KernArgBaseAlign = 16; 487 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 488 uint64_t ExplicitArgOffset = 0; 489 490 // TODO: Align down to dword alignment and extract bits for extending loads. 491 for (auto &Arg : F.args()) { 492 Type *ArgTy = Arg.getType(); 493 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 494 if (AllocSize == 0) 495 continue; 496 497 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy); 498 499 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 500 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 501 502 ArrayRef<Register> OrigArgRegs = VRegs[i]; 503 Register ArgReg = 504 OrigArgRegs.size() == 1 505 ? OrigArgRegs[0] 506 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL)); 507 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset); 508 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy)); 509 lowerParameter(B, ArgTy, ArgOffset, Align, ArgReg); 510 if (OrigArgRegs.size() > 1) 511 unpackRegs(OrigArgRegs, ArgReg, ArgTy, B); 512 ++i; 513 } 514 515 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 516 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 517 return true; 518 } 519 520 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs. 521 static MachineInstrBuilder mergeVectorRegsToResultRegs( 522 MachineIRBuilder &B, ArrayRef<Register> DstRegs, ArrayRef<Register> SrcRegs) { 523 MachineRegisterInfo &MRI = *B.getMRI(); 524 LLT LLTy = MRI.getType(DstRegs[0]); 525 LLT PartLLT = MRI.getType(SrcRegs[0]); 526 527 // Deal with v3s16 split into v2s16 528 LLT LCMTy = getLCMType(LLTy, PartLLT); 529 if (LCMTy == LLTy) { 530 // Common case where no padding is needed. 531 assert(DstRegs.size() == 1); 532 return B.buildConcatVectors(DstRegs[0], SrcRegs); 533 } 534 535 const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits(); 536 Register Undef = B.buildUndef(PartLLT).getReg(0); 537 538 // Build vector of undefs. 539 SmallVector<Register, 8> WidenedSrcs(NumWide, Undef); 540 541 // Replace the first sources with the real registers. 542 std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin()); 543 544 auto Widened = B.buildConcatVectors(LCMTy, WidenedSrcs); 545 int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits(); 546 547 SmallVector<Register, 8> PadDstRegs(NumDst); 548 std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin()); 549 550 // Create the excess dead defs for the unmerge. 551 for (int I = DstRegs.size(); I != NumDst; ++I) 552 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy); 553 554 return B.buildUnmerge(PadDstRegs, Widened); 555 } 556 557 // TODO: Move this to generic code 558 static void packSplitRegsToOrigType(MachineIRBuilder &B, 559 ArrayRef<Register> OrigRegs, 560 ArrayRef<Register> Regs, 561 LLT LLTy, 562 LLT PartLLT) { 563 if (!LLTy.isVector() && !PartLLT.isVector()) { 564 B.buildMerge(OrigRegs[0], Regs); 565 return; 566 } 567 568 if (LLTy.isVector() && PartLLT.isVector()) { 569 assert(OrigRegs.size() == 1); 570 assert(LLTy.getElementType() == PartLLT.getElementType()); 571 mergeVectorRegsToResultRegs(B, OrigRegs, Regs); 572 return; 573 } 574 575 MachineRegisterInfo &MRI = *B.getMRI(); 576 577 assert(LLTy.isVector() && !PartLLT.isVector()); 578 579 LLT DstEltTy = LLTy.getElementType(); 580 581 // Pointer information was discarded. We'll need to coerce some register types 582 // to avoid violating type constraints. 583 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType(); 584 585 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits()); 586 587 if (DstEltTy == PartLLT) { 588 // Vector was trivially scalarized. 589 590 if (RealDstEltTy.isPointer()) { 591 for (Register Reg : Regs) 592 MRI.setType(Reg, RealDstEltTy); 593 } 594 595 B.buildBuildVector(OrigRegs[0], Regs); 596 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) { 597 // Deal with vector with 64-bit elements decomposed to 32-bit 598 // registers. Need to create intermediate 64-bit elements. 599 SmallVector<Register, 8> EltMerges; 600 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits(); 601 602 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0); 603 604 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) { 605 auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt)); 606 // Fix the type in case this is really a vector of pointers. 607 MRI.setType(Merge.getReg(0), RealDstEltTy); 608 EltMerges.push_back(Merge.getReg(0)); 609 Regs = Regs.drop_front(PartsPerElt); 610 } 611 612 B.buildBuildVector(OrigRegs[0], EltMerges); 613 } else { 614 // Vector was split, and elements promoted to a wider type. 615 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT); 616 auto BV = B.buildBuildVector(BVType, Regs); 617 B.buildTrunc(OrigRegs[0], BV); 618 } 619 } 620 621 bool AMDGPUCallLowering::lowerFormalArguments( 622 MachineIRBuilder &B, const Function &F, 623 ArrayRef<ArrayRef<Register>> VRegs) const { 624 CallingConv::ID CC = F.getCallingConv(); 625 626 // The infrastructure for normal calling convention lowering is essentially 627 // useless for kernels. We want to avoid any kind of legalization or argument 628 // splitting. 629 if (CC == CallingConv::AMDGPU_KERNEL) 630 return lowerFormalArgumentsKernel(B, F, VRegs); 631 632 const bool IsShader = AMDGPU::isShader(CC); 633 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 634 635 MachineFunction &MF = B.getMF(); 636 MachineBasicBlock &MBB = B.getMBB(); 637 MachineRegisterInfo &MRI = MF.getRegInfo(); 638 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 639 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 640 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 641 const DataLayout &DL = F.getParent()->getDataLayout(); 642 643 644 SmallVector<CCValAssign, 16> ArgLocs; 645 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 646 647 if (!IsEntryFunc) { 648 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 649 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 650 &AMDGPU::SGPR_64RegClass); 651 MBB.addLiveIn(ReturnAddrReg); 652 B.buildCopy(LiveInReturn, ReturnAddrReg); 653 } 654 655 if (Info->hasImplicitBufferPtr()) { 656 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 657 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 658 CCInfo.AllocateReg(ImplicitBufferPtrReg); 659 } 660 661 662 SmallVector<ArgInfo, 32> SplitArgs; 663 unsigned Idx = 0; 664 unsigned PSInputNum = 0; 665 666 for (auto &Arg : F.args()) { 667 if (DL.getTypeStoreSize(Arg.getType()) == 0) 668 continue; 669 670 const bool InReg = Arg.hasAttribute(Attribute::InReg); 671 672 // SGPR arguments to functions not implemented. 673 if (!IsShader && InReg) 674 return false; 675 676 if (Arg.hasAttribute(Attribute::SwiftSelf) || 677 Arg.hasAttribute(Attribute::SwiftError) || 678 Arg.hasAttribute(Attribute::Nest)) 679 return false; 680 681 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 682 const bool ArgUsed = !Arg.use_empty(); 683 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 684 685 if (!SkipArg) { 686 Info->markPSInputAllocated(PSInputNum); 687 if (ArgUsed) 688 Info->markPSInputEnabled(PSInputNum); 689 } 690 691 ++PSInputNum; 692 693 if (SkipArg) { 694 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 695 B.buildUndef(VRegs[Idx][I]); 696 697 ++Idx; 698 continue; 699 } 700 } 701 702 ArgInfo OrigArg(VRegs[Idx], Arg.getType()); 703 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex; 704 setArgFlags(OrigArg, OrigArgIdx, DL, F); 705 706 splitToValueTypes( 707 B, OrigArg, OrigArgIdx, SplitArgs, DL, CC, 708 // FIXME: We should probably be passing multiple registers to 709 // handleAssignments to do this 710 [&](ArrayRef<Register> Regs, Register DstReg, 711 LLT LLTy, LLT PartLLT, int VTSplitIdx) { 712 assert(DstReg == VRegs[Idx][VTSplitIdx]); 713 packSplitRegsToOrigType(B, VRegs[Idx][VTSplitIdx], Regs, 714 LLTy, PartLLT); 715 }); 716 717 ++Idx; 718 } 719 720 // At least one interpolation mode must be enabled or else the GPU will 721 // hang. 722 // 723 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 724 // set PSInputAddr, the user wants to enable some bits after the compilation 725 // based on run-time states. Since we can't know what the final PSInputEna 726 // will look like, so we shouldn't do anything here and the user should take 727 // responsibility for the correct programming. 728 // 729 // Otherwise, the following restrictions apply: 730 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 731 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 732 // enabled too. 733 if (CC == CallingConv::AMDGPU_PS) { 734 if ((Info->getPSInputAddr() & 0x7F) == 0 || 735 ((Info->getPSInputAddr() & 0xF) == 0 && 736 Info->isPSInputAllocated(11))) { 737 CCInfo.AllocateReg(AMDGPU::VGPR0); 738 CCInfo.AllocateReg(AMDGPU::VGPR1); 739 Info->markPSInputAllocated(0); 740 Info->markPSInputEnabled(0); 741 } 742 743 if (Subtarget.isAmdPalOS()) { 744 // For isAmdPalOS, the user does not enable some bits after compilation 745 // based on run-time states; the register values being generated here are 746 // the final ones set in hardware. Therefore we need to apply the 747 // workaround to PSInputAddr and PSInputEnable together. (The case where 748 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 749 // set up an input arg for a particular interpolation mode, but nothing 750 // uses that input arg. Really we should have an earlier pass that removes 751 // such an arg.) 752 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 753 if ((PsInputBits & 0x7F) == 0 || 754 ((PsInputBits & 0xF) == 0 && 755 (PsInputBits >> 11 & 1))) 756 Info->markPSInputEnabled( 757 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 758 } 759 } 760 761 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 762 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 763 764 if (!MBB.empty()) 765 B.setInstr(*MBB.begin()); 766 767 FormalArgHandler Handler(B, MRI, AssignFn); 768 if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler)) 769 return false; 770 771 if (!IsEntryFunc) { 772 // Special inputs come after user arguments. 773 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 774 } 775 776 // Start adding system SGPRs. 777 if (IsEntryFunc) { 778 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader); 779 } else { 780 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 781 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 782 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 783 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 784 } 785 786 // Move back to the end of the basic block. 787 B.setMBB(MBB); 788 789 return true; 790 } 791