1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements the lowering of LLVM calls to machine code calls for 12 /// GlobalISel. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPUCallLowering.h" 17 #include "AMDGPU.h" 18 #include "AMDGPUISelLowering.h" 19 #include "AMDGPUSubtarget.h" 20 #include "SIISelLowering.h" 21 #include "SIMachineFunctionInfo.h" 22 #include "SIRegisterInfo.h" 23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 28 using namespace llvm; 29 30 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI) 31 : CallLowering(&TLI), AMDGPUASI(TLI.getAMDGPUAS()) { 32 } 33 34 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 35 const Value *Val, unsigned VReg) const { 36 // FIXME: Add support for non-void returns. 37 if (Val) 38 return false; 39 40 MIRBuilder.buildInstr(AMDGPU::S_ENDPGM); 41 return true; 42 } 43 44 unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, 45 Type *ParamTy, 46 unsigned Offset) const { 47 48 MachineFunction &MF = MIRBuilder.getMF(); 49 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 50 MachineRegisterInfo &MRI = MF.getRegInfo(); 51 const Function &F = MF.getFunction(); 52 const DataLayout &DL = F.getParent()->getDataLayout(); 53 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS); 54 LLT PtrType = getLLTForType(*PtrTy, DL); 55 unsigned DstReg = MRI.createGenericVirtualRegister(PtrType); 56 unsigned KernArgSegmentPtr = 57 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 58 unsigned KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); 59 60 unsigned OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); 61 MIRBuilder.buildConstant(OffsetReg, Offset); 62 63 MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg); 64 65 return DstReg; 66 } 67 68 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder, 69 Type *ParamTy, unsigned Offset, 70 unsigned DstReg) const { 71 MachineFunction &MF = MIRBuilder.getMF(); 72 const Function &F = MF.getFunction(); 73 const DataLayout &DL = F.getParent()->getDataLayout(); 74 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS); 75 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); 76 unsigned TypeSize = DL.getTypeStoreSize(ParamTy); 77 unsigned Align = DL.getABITypeAlignment(ParamTy); 78 unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset); 79 80 MachineMemOperand *MMO = 81 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | 82 MachineMemOperand::MONonTemporal | 83 MachineMemOperand::MOInvariant, 84 TypeSize, Align); 85 86 MIRBuilder.buildLoad(DstReg, PtrReg, *MMO); 87 } 88 89 bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, 90 const Function &F, 91 ArrayRef<unsigned> VRegs) const { 92 // AMDGPU_GS and AMDGP_HS are not supported yet. 93 if (F.getCallingConv() == CallingConv::AMDGPU_GS || 94 F.getCallingConv() == CallingConv::AMDGPU_HS) 95 return false; 96 97 MachineFunction &MF = MIRBuilder.getMF(); 98 const SISubtarget *Subtarget = static_cast<const SISubtarget *>(&MF.getSubtarget()); 99 MachineRegisterInfo &MRI = MF.getRegInfo(); 100 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); 101 const SIRegisterInfo *TRI = MF.getSubtarget<SISubtarget>().getRegisterInfo(); 102 const DataLayout &DL = F.getParent()->getDataLayout(); 103 104 SmallVector<CCValAssign, 16> ArgLocs; 105 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 106 107 // FIXME: How should these inputs interact with inreg / custom SGPR inputs? 108 if (Info->hasPrivateSegmentBuffer()) { 109 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); 110 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); 111 CCInfo.AllocateReg(PrivateSegmentBufferReg); 112 } 113 114 if (Info->hasDispatchPtr()) { 115 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); 116 // FIXME: Need to add reg as live-in 117 CCInfo.AllocateReg(DispatchPtrReg); 118 } 119 120 if (Info->hasQueuePtr()) { 121 unsigned QueuePtrReg = Info->addQueuePtr(*TRI); 122 // FIXME: Need to add reg as live-in 123 CCInfo.AllocateReg(QueuePtrReg); 124 } 125 126 if (Info->hasKernargSegmentPtr()) { 127 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); 128 const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); 129 unsigned VReg = MRI.createGenericVirtualRegister(P2); 130 MRI.addLiveIn(InputPtrReg, VReg); 131 MIRBuilder.getMBB().addLiveIn(InputPtrReg); 132 MIRBuilder.buildCopy(VReg, InputPtrReg); 133 CCInfo.AllocateReg(InputPtrReg); 134 } 135 136 if (Info->hasDispatchID()) { 137 unsigned DispatchIDReg = Info->addDispatchID(*TRI); 138 // FIXME: Need to add reg as live-in 139 CCInfo.AllocateReg(DispatchIDReg); 140 } 141 142 if (Info->hasFlatScratchInit()) { 143 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI); 144 // FIXME: Need to add reg as live-in 145 CCInfo.AllocateReg(FlatScratchInitReg); 146 } 147 148 unsigned NumArgs = F.arg_size(); 149 Function::const_arg_iterator CurOrigArg = F.arg_begin(); 150 const AMDGPUTargetLowering &TLI = *getTLI<AMDGPUTargetLowering>(); 151 unsigned PSInputNum = 0; 152 BitVector Skipped(NumArgs); 153 for (unsigned i = 0; i != NumArgs; ++i, ++CurOrigArg) { 154 EVT ValEVT = TLI.getValueType(DL, CurOrigArg->getType()); 155 156 // We can only hanlde simple value types at the moment. 157 ISD::ArgFlagsTy Flags; 158 ArgInfo OrigArg{VRegs[i], CurOrigArg->getType()}; 159 setArgFlags(OrigArg, i + 1, DL, F); 160 Flags.setOrigAlign(DL.getABITypeAlignment(CurOrigArg->getType())); 161 162 if (F.getCallingConv() == CallingConv::AMDGPU_PS && 163 !OrigArg.Flags.isInReg() && !OrigArg.Flags.isByVal() && 164 PSInputNum <= 15) { 165 if (CurOrigArg->use_empty() && !Info->isPSInputAllocated(PSInputNum)) { 166 Skipped.set(i); 167 ++PSInputNum; 168 continue; 169 } 170 171 Info->markPSInputAllocated(PSInputNum); 172 if (!CurOrigArg->use_empty()) 173 Info->markPSInputEnabled(PSInputNum); 174 175 ++PSInputNum; 176 } 177 178 CCAssignFn *AssignFn = CCAssignFnForCall(F.getCallingConv(), 179 /*IsVarArg=*/false); 180 181 if (ValEVT.isVector()) { 182 EVT ElemVT = ValEVT.getVectorElementType(); 183 if (!ValEVT.isSimple()) 184 return false; 185 MVT ValVT = ElemVT.getSimpleVT(); 186 bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, 187 OrigArg.Flags, CCInfo); 188 if (!Res) 189 return false; 190 } else { 191 MVT ValVT = ValEVT.getSimpleVT(); 192 if (!ValEVT.isSimple()) 193 return false; 194 bool Res = 195 AssignFn(i, ValVT, ValVT, CCValAssign::Full, OrigArg.Flags, CCInfo); 196 197 // Fail if we don't know how to handle this type. 198 if (Res) 199 return false; 200 } 201 } 202 203 Function::const_arg_iterator Arg = F.arg_begin(); 204 205 if (F.getCallingConv() == CallingConv::AMDGPU_VS || 206 F.getCallingConv() == CallingConv::AMDGPU_PS) { 207 for (unsigned i = 0, OrigArgIdx = 0; 208 OrigArgIdx != NumArgs && i != ArgLocs.size(); ++Arg, ++OrigArgIdx) { 209 if (Skipped.test(OrigArgIdx)) 210 continue; 211 CCValAssign &VA = ArgLocs[i++]; 212 MRI.addLiveIn(VA.getLocReg(), VRegs[OrigArgIdx]); 213 MIRBuilder.getMBB().addLiveIn(VA.getLocReg()); 214 MIRBuilder.buildCopy(VRegs[OrigArgIdx], VA.getLocReg()); 215 } 216 return true; 217 } 218 219 for (unsigned i = 0; i != ArgLocs.size(); ++i, ++Arg) { 220 // FIXME: We should be getting DebugInfo from the arguments some how. 221 CCValAssign &VA = ArgLocs[i]; 222 lowerParameter(MIRBuilder, Arg->getType(), 223 VA.getLocMemOffset() + 224 Subtarget->getExplicitKernelArgOffset(F), VRegs[i]); 225 } 226 227 return true; 228 } 229