1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUISelLowering.h" 18 #include "AMDGPUSubtarget.h" 19 #include "SIISelLowering.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "SIRegisterInfo.h" 22 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 23 #include "llvm/CodeGen/Analysis.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/Support/LowLevelTypeImpl.h" 28 29 using namespace llvm; 30 31 namespace { 32 33 struct OutgoingValueHandler : public CallLowering::ValueHandler { 34 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 35 MachineInstrBuilder MIB, CCAssignFn *AssignFn) 36 : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {} 37 38 MachineInstrBuilder MIB; 39 40 Register getStackAddress(uint64_t Size, int64_t Offset, 41 MachinePointerInfo &MPO) override { 42 llvm_unreachable("not implemented"); 43 } 44 45 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 46 MachinePointerInfo &MPO, CCValAssign &VA) override { 47 llvm_unreachable("not implemented"); 48 } 49 50 void assignValueToReg(Register ValVReg, Register PhysReg, 51 CCValAssign &VA) override { 52 Register ExtReg; 53 if (VA.getLocVT().getSizeInBits() < 32) { 54 // 16-bit types are reported as legal for 32-bit registers. We need to 55 // extend and do a 32-bit copy to avoid the verifier complaining about it. 56 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0); 57 } else 58 ExtReg = extendRegister(ValVReg, VA); 59 60 MIRBuilder.buildCopy(PhysReg, ExtReg); 61 MIB.addUse(PhysReg, RegState::Implicit); 62 } 63 64 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, 65 CCValAssign::LocInfo LocInfo, 66 const CallLowering::ArgInfo &Info, 67 CCState &State) override { 68 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Info.Flags, State); 69 } 70 }; 71 72 struct IncomingArgHandler : public CallLowering::ValueHandler { 73 uint64_t StackUsed = 0; 74 75 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 76 CCAssignFn *AssignFn) 77 : ValueHandler(MIRBuilder, MRI, AssignFn) {} 78 79 Register getStackAddress(uint64_t Size, int64_t Offset, 80 MachinePointerInfo &MPO) override { 81 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 82 int FI = MFI.CreateFixedObject(Size, Offset, true); 83 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 84 Register AddrReg = MRI.createGenericVirtualRegister( 85 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32)); 86 MIRBuilder.buildFrameIndex(AddrReg, FI); 87 StackUsed = std::max(StackUsed, Size + Offset); 88 return AddrReg; 89 } 90 91 void assignValueToReg(Register ValVReg, Register PhysReg, 92 CCValAssign &VA) override { 93 markPhysRegUsed(PhysReg); 94 95 if (VA.getLocVT().getSizeInBits() < 32) { 96 // 16-bit types are reported as legal for 32-bit registers. We need to do 97 // a 32-bit copy, and truncate to avoid the verifier complaining about it. 98 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg); 99 MIRBuilder.buildTrunc(ValVReg, Copy); 100 return; 101 } 102 103 switch (VA.getLocInfo()) { 104 case CCValAssign::LocInfo::SExt: 105 case CCValAssign::LocInfo::ZExt: 106 case CCValAssign::LocInfo::AExt: { 107 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 108 MIRBuilder.buildTrunc(ValVReg, Copy); 109 break; 110 } 111 default: 112 MIRBuilder.buildCopy(ValVReg, PhysReg); 113 break; 114 } 115 } 116 117 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, 118 MachinePointerInfo &MPO, CCValAssign &VA) override { 119 // FIXME: Get alignment 120 auto MMO = MIRBuilder.getMF().getMachineMemOperand( 121 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, 1); 122 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 123 } 124 125 /// How the physical register gets marked varies between formal 126 /// parameters (it's a basic-block live-in), and a call instruction 127 /// (it's an implicit-def of the BL). 128 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 129 130 // FIXME: What is the point of this being a callback? 131 bool isArgumentHandler() const override { return true; } 132 }; 133 134 struct FormalArgHandler : public IncomingArgHandler { 135 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 136 CCAssignFn *AssignFn) 137 : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {} 138 139 void markPhysRegUsed(unsigned PhysReg) override { 140 MIRBuilder.getMBB().addLiveIn(PhysReg); 141 } 142 }; 143 144 } 145 146 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 147 : CallLowering(&TLI) { 148 } 149 150 void AMDGPUCallLowering::splitToValueTypes( 151 const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs, 152 const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv, 153 SplitArgTy PerformArgSplit) const { 154 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 155 LLVMContext &Ctx = OrigArg.Ty->getContext(); 156 157 if (OrigArg.Ty->isVoidTy()) 158 return; 159 160 SmallVector<EVT, 4> SplitVTs; 161 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs); 162 163 assert(OrigArg.Regs.size() == SplitVTs.size()); 164 165 int SplitIdx = 0; 166 for (EVT VT : SplitVTs) { 167 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT); 168 Type *Ty = VT.getTypeForEVT(Ctx); 169 170 171 172 if (NumParts == 1) { 173 // No splitting to do, but we want to replace the original type (e.g. [1 x 174 // double] -> double). 175 SplitArgs.emplace_back(OrigArg.Regs[SplitIdx], Ty, 176 OrigArg.Flags, OrigArg.IsFixed); 177 178 ++SplitIdx; 179 continue; 180 } 181 182 LLT LLTy = getLLTForType(*Ty, DL); 183 184 SmallVector<Register, 8> SplitRegs; 185 186 EVT PartVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT); 187 Type *PartTy = PartVT.getTypeForEVT(Ctx); 188 LLT PartLLT = getLLTForType(*PartTy, DL); 189 190 // FIXME: Should we be reporting all of the part registers for a single 191 // argument, and let handleAssignments take care of the repacking? 192 for (unsigned i = 0; i < NumParts; ++i) { 193 Register PartReg = MRI.createGenericVirtualRegister(PartLLT); 194 SplitRegs.push_back(PartReg); 195 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags); 196 } 197 198 PerformArgSplit(SplitRegs, LLTy, PartLLT, SplitIdx); 199 200 ++SplitIdx; 201 } 202 } 203 204 // Get the appropriate type to make \p OrigTy \p Factor times bigger. 205 static LLT getMultipleType(LLT OrigTy, int Factor) { 206 if (OrigTy.isVector()) { 207 return LLT::vector(OrigTy.getNumElements() * Factor, 208 OrigTy.getElementType()); 209 } 210 211 return LLT::scalar(OrigTy.getSizeInBits() * Factor); 212 } 213 214 // TODO: Move to generic code 215 static void unpackRegsToOrigType(MachineIRBuilder &MIRBuilder, 216 ArrayRef<Register> DstRegs, 217 Register SrcReg, 218 LLT SrcTy, 219 LLT PartTy) { 220 assert(DstRegs.size() > 1 && "Nothing to unpack"); 221 222 MachineFunction &MF = MIRBuilder.getMF(); 223 MachineRegisterInfo &MRI = MF.getRegInfo(); 224 225 const unsigned SrcSize = SrcTy.getSizeInBits(); 226 const unsigned PartSize = PartTy.getSizeInBits(); 227 228 if (SrcTy.isVector() && !PartTy.isVector() && 229 PartSize > SrcTy.getElementType().getSizeInBits()) { 230 // Vector was scalarized, and the elements extended. 231 auto UnmergeToEltTy = MIRBuilder.buildUnmerge(SrcTy.getElementType(), 232 SrcReg); 233 for (int i = 0, e = DstRegs.size(); i != e; ++i) 234 MIRBuilder.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i)); 235 return; 236 } 237 238 if (SrcSize % PartSize == 0) { 239 MIRBuilder.buildUnmerge(DstRegs, SrcReg); 240 return; 241 } 242 243 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize; 244 245 LLT BigTy = getMultipleType(PartTy, NumRoundedParts); 246 auto ImpDef = MIRBuilder.buildUndef(BigTy); 247 248 Register BigReg = MRI.createGenericVirtualRegister(BigTy); 249 MIRBuilder.buildInsert(BigReg, ImpDef.getReg(0), SrcReg, 0).getReg(0); 250 251 int64_t Offset = 0; 252 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize) 253 MIRBuilder.buildExtract(DstRegs[i], BigReg, Offset); 254 } 255 256 /// Lower the return value for the already existing \p Ret. This assumes that 257 /// \p MIRBuilder's insertion point is correct. 258 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, 259 const Value *Val, ArrayRef<Register> VRegs, 260 MachineInstrBuilder &Ret) const { 261 if (!Val) 262 return true; 263 264 auto &MF = MIRBuilder.getMF(); 265 const auto &F = MF.getFunction(); 266 const DataLayout &DL = MF.getDataLayout(); 267 268 CallingConv::ID CC = F.getCallingConv(); 269 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 270 MachineRegisterInfo &MRI = MF.getRegInfo(); 271 272 ArgInfo OrigRetInfo(VRegs, Val->getType()); 273 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 274 SmallVector<ArgInfo, 4> SplitRetInfos; 275 276 splitToValueTypes( 277 OrigRetInfo, SplitRetInfos, DL, MRI, CC, 278 [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) { 279 unpackRegsToOrigType(MIRBuilder, Regs, VRegs[VTSplitIdx], LLTy, PartLLT); 280 }); 281 282 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg()); 283 284 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret, AssignFn); 285 return handleAssignments(MIRBuilder, SplitRetInfos, RetHandler); 286 } 287 288 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 289 const Value *Val, 290 ArrayRef<Register> VRegs) const { 291 292 MachineFunction &MF = MIRBuilder.getMF(); 293 MachineRegisterInfo &MRI = MF.getRegInfo(); 294 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 295 MFI->setIfReturnsVoid(!Val); 296 297 assert(!Val == VRegs.empty() && "Return value without a vreg"); 298 299 CallingConv::ID CC = MIRBuilder.getMF().getFunction().getCallingConv(); 300 const bool IsShader = AMDGPU::isShader(CC); 301 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) || 302 AMDGPU::isKernel(CC); 303 if (IsWaveEnd) { 304 MIRBuilder.buildInstr(AMDGPU::S_ENDPGM) 305 .addImm(0); 306 return true; 307 } 308 309 auto const &ST = MIRBuilder.getMF().getSubtarget<GCNSubtarget>(); 310 311 unsigned ReturnOpc = 312 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return; 313 314 auto Ret = MIRBuilder.buildInstrNoInsert(ReturnOpc); 315 Register ReturnAddrVReg; 316 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 317 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass); 318 Ret.addUse(ReturnAddrVReg); 319 } 320 321 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret)) 322 return false; 323 324 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) { 325 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 326 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF), 327 &AMDGPU::SGPR_64RegClass); 328 MIRBuilder.buildCopy(ReturnAddrVReg, LiveInReturn); 329 } 330 331 // TODO: Handle CalleeSavedRegsViaCopy. 332 333 MIRBuilder.insertInstr(Ret); 334 return true; 335 } 336 337 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, 338 Type *ParamTy, 339 uint64_t Offset) const { 340 341 MachineFunction &MF = MIRBuilder.getMF(); 342 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 343 MachineRegisterInfo &MRI = MF.getRegInfo(); 344 const Function &F = MF.getFunction(); 345 const DataLayout &DL = F.getParent()->getDataLayout(); 346 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); 347 LLT PtrType = getLLTForType(*PtrTy, DL); 348 Register DstReg = MRI.createGenericVirtualRegister(PtrType); 349 Register KernArgSegmentPtr = 350 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 351 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 352 353 Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); 354 MIRBuilder.buildConstant(OffsetReg, Offset); 355 356 MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg); 357 358 return DstReg; 359 } 360 361 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder, 362 Type *ParamTy, uint64_t Offset, 363 unsigned Align, 364 Register DstReg) const { 365 MachineFunction &MF = MIRBuilder.getMF(); 366 const Function &F = MF.getFunction(); 367 const DataLayout &DL = F.getParent()->getDataLayout(); 368 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); 369 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 370 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 371 Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset); 372 373 MachineMemOperand *MMO = 374 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | 375 MachineMemOperand::MODereferenceable | 376 MachineMemOperand::MOInvariant, 377 TypeSize, Align); 378 379 MIRBuilder.buildLoad(DstReg, PtrReg, *MMO); 380 } 381 382 // Allocate special inputs passed in user SGPRs. 383 static void allocateHSAUserSGPRs(CCState &CCInfo, 384 MachineIRBuilder &MIRBuilder, 385 MachineFunction &MF, 386 const SIRegisterInfo &TRI, 387 SIMachineFunctionInfo &Info) { 388 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 389 if (Info.hasPrivateSegmentBuffer()) { 390 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI); 391 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass); 392 CCInfo.AllocateReg(PrivateSegmentBufferReg); 393 } 394 395 if (Info.hasDispatchPtr()) { 396 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI); 397 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass); 398 CCInfo.AllocateReg(DispatchPtrReg); 399 } 400 401 if (Info.hasQueuePtr()) { 402 unsigned QueuePtrReg = Info.addQueuePtr(TRI); 403 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass); 404 CCInfo.AllocateReg(QueuePtrReg); 405 } 406 407 if (Info.hasKernargSegmentPtr()) { 408 MachineRegisterInfo &MRI = MF.getRegInfo(); 409 Register InputPtrReg = Info.addKernargSegmentPtr(TRI); 410 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 411 Register VReg = MRI.createGenericVirtualRegister(P4); 412 MRI.addLiveIn(InputPtrReg, VReg); 413 MIRBuilder.getMBB().addLiveIn(InputPtrReg); 414 MIRBuilder.buildCopy(VReg, InputPtrReg); 415 CCInfo.AllocateReg(InputPtrReg); 416 } 417 418 if (Info.hasDispatchID()) { 419 unsigned DispatchIDReg = Info.addDispatchID(TRI); 420 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass); 421 CCInfo.AllocateReg(DispatchIDReg); 422 } 423 424 if (Info.hasFlatScratchInit()) { 425 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI); 426 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass); 427 CCInfo.AllocateReg(FlatScratchInitReg); 428 } 429 430 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read 431 // these from the dispatch pointer. 432 } 433 434 bool AMDGPUCallLowering::lowerFormalArgumentsKernel( 435 MachineIRBuilder &MIRBuilder, const Function &F, 436 ArrayRef<ArrayRef<Register>> VRegs) const { 437 MachineFunction &MF = MIRBuilder.getMF(); 438 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>(); 439 MachineRegisterInfo &MRI = MF.getRegInfo(); 440 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 441 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); 442 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 443 444 const DataLayout &DL = F.getParent()->getDataLayout(); 445 446 SmallVector<CCValAssign, 16> ArgLocs; 447 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 448 449 allocateHSAUserSGPRs(CCInfo, MIRBuilder, MF, *TRI, *Info); 450 451 unsigned i = 0; 452 const unsigned KernArgBaseAlign = 16; 453 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F); 454 uint64_t ExplicitArgOffset = 0; 455 456 // TODO: Align down to dword alignment and extract bits for extending loads. 457 for (auto &Arg : F.args()) { 458 Type *ArgTy = Arg.getType(); 459 unsigned AllocSize = DL.getTypeAllocSize(ArgTy); 460 if (AllocSize == 0) 461 continue; 462 463 unsigned ABIAlign = DL.getABITypeAlignment(ArgTy); 464 465 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset; 466 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize; 467 468 ArrayRef<Register> OrigArgRegs = VRegs[i]; 469 Register ArgReg = 470 OrigArgRegs.size() == 1 471 ? OrigArgRegs[0] 472 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL)); 473 unsigned Align = MinAlign(KernArgBaseAlign, ArgOffset); 474 ArgOffset = alignTo(ArgOffset, DL.getABITypeAlignment(ArgTy)); 475 lowerParameter(MIRBuilder, ArgTy, ArgOffset, Align, ArgReg); 476 if (OrigArgRegs.size() > 1) 477 unpackRegs(OrigArgRegs, ArgReg, ArgTy, MIRBuilder); 478 ++i; 479 } 480 481 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info); 482 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false); 483 return true; 484 } 485 486 // TODO: Move this to generic code 487 static void packSplitRegsToOrigType(MachineIRBuilder &MIRBuilder, 488 ArrayRef<Register> OrigRegs, 489 ArrayRef<Register> Regs, 490 LLT LLTy, 491 LLT PartLLT) { 492 if (!LLTy.isVector() && !PartLLT.isVector()) { 493 MIRBuilder.buildMerge(OrigRegs[0], Regs); 494 return; 495 } 496 497 if (LLTy.isVector() && PartLLT.isVector()) { 498 assert(LLTy.getElementType() == PartLLT.getElementType()); 499 500 int DstElts = LLTy.getNumElements(); 501 int PartElts = PartLLT.getNumElements(); 502 if (DstElts % PartElts == 0) 503 MIRBuilder.buildConcatVectors(OrigRegs[0], Regs); 504 else { 505 // Deal with v3s16 split into v2s16 506 assert(PartElts == 2 && DstElts % 2 != 0); 507 int RoundedElts = PartElts * ((DstElts + PartElts - 1) / PartElts); 508 509 LLT RoundedDestTy = LLT::vector(RoundedElts, PartLLT.getElementType()); 510 auto RoundedConcat = MIRBuilder.buildConcatVectors(RoundedDestTy, Regs); 511 MIRBuilder.buildExtract(OrigRegs[0], RoundedConcat, 0); 512 } 513 514 return; 515 } 516 517 assert(LLTy.isVector() && !PartLLT.isVector()); 518 519 LLT DstEltTy = LLTy.getElementType(); 520 if (DstEltTy == PartLLT) { 521 // Vector was trivially scalarized. 522 MIRBuilder.buildBuildVector(OrigRegs[0], Regs); 523 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) { 524 // Deal with vector with 64-bit elements decomposed to 32-bit 525 // registers. Need to create intermediate 64-bit elements. 526 SmallVector<Register, 8> EltMerges; 527 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits(); 528 529 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0); 530 531 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) { 532 auto Merge = MIRBuilder.buildMerge(DstEltTy, 533 Regs.take_front(PartsPerElt)); 534 EltMerges.push_back(Merge.getReg(0)); 535 Regs = Regs.drop_front(PartsPerElt); 536 } 537 538 MIRBuilder.buildBuildVector(OrigRegs[0], EltMerges); 539 } else { 540 // Vector was split, and elements promoted to a wider type. 541 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT); 542 auto BV = MIRBuilder.buildBuildVector(BVType, Regs); 543 MIRBuilder.buildTrunc(OrigRegs[0], BV); 544 } 545 } 546 547 bool AMDGPUCallLowering::lowerFormalArguments( 548 MachineIRBuilder &MIRBuilder, const Function &F, 549 ArrayRef<ArrayRef<Register>> VRegs) const { 550 CallingConv::ID CC = F.getCallingConv(); 551 552 // The infrastructure for normal calling convention lowering is essentially 553 // useless for kernels. We want to avoid any kind of legalization or argument 554 // splitting. 555 if (CC == CallingConv::AMDGPU_KERNEL) 556 return lowerFormalArgumentsKernel(MIRBuilder, F, VRegs); 557 558 // AMDGPU_GS and AMDGP_HS are not supported yet. 559 if (CC == CallingConv::AMDGPU_GS || CC == CallingConv::AMDGPU_HS) 560 return false; 561 562 const bool IsShader = AMDGPU::isShader(CC); 563 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC); 564 565 MachineFunction &MF = MIRBuilder.getMF(); 566 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 567 MachineRegisterInfo &MRI = MF.getRegInfo(); 568 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 569 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>(); 570 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo(); 571 const DataLayout &DL = F.getParent()->getDataLayout(); 572 573 574 SmallVector<CCValAssign, 16> ArgLocs; 575 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext()); 576 577 if (!IsEntryFunc) { 578 Register ReturnAddrReg = TRI->getReturnAddressReg(MF); 579 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg, 580 &AMDGPU::SGPR_64RegClass); 581 MBB.addLiveIn(ReturnAddrReg); 582 MIRBuilder.buildCopy(LiveInReturn, ReturnAddrReg); 583 } 584 585 if (Info->hasImplicitBufferPtr()) { 586 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI); 587 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass); 588 CCInfo.AllocateReg(ImplicitBufferPtrReg); 589 } 590 591 592 SmallVector<ArgInfo, 32> SplitArgs; 593 unsigned Idx = 0; 594 unsigned PSInputNum = 0; 595 596 for (auto &Arg : F.args()) { 597 if (DL.getTypeStoreSize(Arg.getType()) == 0) 598 continue; 599 600 const bool InReg = Arg.hasAttribute(Attribute::InReg); 601 602 // SGPR arguments to functions not implemented. 603 if (!IsShader && InReg) 604 return false; 605 606 if (Arg.hasAttribute(Attribute::SwiftSelf) || 607 Arg.hasAttribute(Attribute::SwiftError) || 608 Arg.hasAttribute(Attribute::Nest)) 609 return false; 610 611 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) { 612 const bool ArgUsed = !Arg.use_empty(); 613 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum); 614 615 if (!SkipArg) { 616 Info->markPSInputAllocated(PSInputNum); 617 if (ArgUsed) 618 Info->markPSInputEnabled(PSInputNum); 619 } 620 621 ++PSInputNum; 622 623 if (SkipArg) { 624 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I) 625 MIRBuilder.buildUndef(VRegs[Idx][I]); 626 627 ++Idx; 628 continue; 629 } 630 } 631 632 ArgInfo OrigArg(VRegs[Idx], Arg.getType()); 633 setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F); 634 635 splitToValueTypes( 636 OrigArg, SplitArgs, DL, MRI, CC, 637 // FIXME: We should probably be passing multiple registers to 638 // handleAssignments to do this 639 [&](ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT, int VTSplitIdx) { 640 packSplitRegsToOrigType(MIRBuilder, VRegs[Idx][VTSplitIdx], Regs, 641 LLTy, PartLLT); 642 }); 643 644 ++Idx; 645 } 646 647 // At least one interpolation mode must be enabled or else the GPU will 648 // hang. 649 // 650 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user 651 // set PSInputAddr, the user wants to enable some bits after the compilation 652 // based on run-time states. Since we can't know what the final PSInputEna 653 // will look like, so we shouldn't do anything here and the user should take 654 // responsibility for the correct programming. 655 // 656 // Otherwise, the following restrictions apply: 657 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled. 658 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be 659 // enabled too. 660 if (CC == CallingConv::AMDGPU_PS) { 661 if ((Info->getPSInputAddr() & 0x7F) == 0 || 662 ((Info->getPSInputAddr() & 0xF) == 0 && 663 Info->isPSInputAllocated(11))) { 664 CCInfo.AllocateReg(AMDGPU::VGPR0); 665 CCInfo.AllocateReg(AMDGPU::VGPR1); 666 Info->markPSInputAllocated(0); 667 Info->markPSInputEnabled(0); 668 } 669 670 if (Subtarget.isAmdPalOS()) { 671 // For isAmdPalOS, the user does not enable some bits after compilation 672 // based on run-time states; the register values being generated here are 673 // the final ones set in hardware. Therefore we need to apply the 674 // workaround to PSInputAddr and PSInputEnable together. (The case where 675 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend 676 // set up an input arg for a particular interpolation mode, but nothing 677 // uses that input arg. Really we should have an earlier pass that removes 678 // such an arg.) 679 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable(); 680 if ((PsInputBits & 0x7F) == 0 || 681 ((PsInputBits & 0xF) == 0 && 682 (PsInputBits >> 11 & 1))) 683 Info->markPSInputEnabled( 684 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined)); 685 } 686 } 687 688 const SITargetLowering &TLI = *getTLI<SITargetLowering>(); 689 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg()); 690 691 if (!MBB.empty()) 692 MIRBuilder.setInstr(*MBB.begin()); 693 694 FormalArgHandler Handler(MIRBuilder, MRI, AssignFn); 695 if (!handleAssignments(CCInfo, ArgLocs, MIRBuilder, SplitArgs, Handler)) 696 return false; 697 698 if (!IsEntryFunc) { 699 // Special inputs come after user arguments. 700 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info); 701 } 702 703 // Start adding system SGPRs. 704 if (IsEntryFunc) { 705 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader); 706 } else { 707 CCInfo.AllocateReg(Info->getScratchRSrcReg()); 708 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg()); 709 CCInfo.AllocateReg(Info->getFrameOffsetReg()); 710 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info); 711 } 712 713 // Move back to the end of the basic block. 714 MIRBuilder.setMBB(MBB); 715 716 return true; 717 } 718