1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 15 #include "llvm/CodeGen/Analysis.h" 16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/IR/Module.h" 24 25 #define DEBUG_TYPE "call-lowering" 26 27 using namespace llvm; 28 29 void CallLowering::anchor() {} 30 31 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, 32 ArrayRef<Register> ResRegs, 33 ArrayRef<ArrayRef<Register>> ArgRegs, 34 Register SwiftErrorVReg, 35 std::function<unsigned()> GetCalleeReg) const { 36 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); 37 38 // First step is to marshall all the function's parameters into the correct 39 // physregs and memory locations. Gather the sequence of argument types that 40 // we'll pass to the assigner function. 41 SmallVector<ArgInfo, 8> OrigArgs; 42 unsigned i = 0; 43 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); 44 for (auto &Arg : CS.args()) { 45 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 46 i < NumFixedArgs}; 47 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); 48 // We don't currently support swiftself args. 49 if (OrigArg.Flags.isSwiftSelf()) 50 return false; 51 OrigArgs.push_back(OrigArg); 52 ++i; 53 } 54 55 MachineOperand Callee = MachineOperand::CreateImm(0); 56 if (const Function *F = CS.getCalledFunction()) 57 Callee = MachineOperand::CreateGA(F, 0); 58 else 59 Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 60 61 ArgInfo OrigRet{ResRegs, CS.getType(), ISD::ArgFlagsTy{}}; 62 if (!OrigRet.Ty->isVoidTy()) 63 setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS); 64 65 const MDNode *KnownCallees = 66 CS.getInstruction()->getMetadata(LLVMContext::MD_callees); 67 68 return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs, 69 SwiftErrorVReg, KnownCallees); 70 } 71 72 template <typename FuncInfoTy> 73 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 74 const DataLayout &DL, 75 const FuncInfoTy &FuncInfo) const { 76 const AttributeList &Attrs = FuncInfo.getAttributes(); 77 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 78 Arg.Flags.setZExt(); 79 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 80 Arg.Flags.setSExt(); 81 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 82 Arg.Flags.setInReg(); 83 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 84 Arg.Flags.setSRet(); 85 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 86 Arg.Flags.setSwiftSelf(); 87 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 88 Arg.Flags.setSwiftError(); 89 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 90 Arg.Flags.setByVal(); 91 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 92 Arg.Flags.setInAlloca(); 93 94 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 95 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 96 97 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 98 Arg.Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 99 100 // For ByVal, alignment should be passed from FE. BE will guess if 101 // this info is not there but there are cases it cannot get right. 102 unsigned FrameAlign; 103 if (FuncInfo.getParamAlignment(OpIdx - 2)) 104 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); 105 else 106 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 107 Arg.Flags.setByValAlign(FrameAlign); 108 } 109 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 110 Arg.Flags.setNest(); 111 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 112 } 113 114 template void 115 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 116 const DataLayout &DL, 117 const Function &FuncInfo) const; 118 119 template void 120 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 121 const DataLayout &DL, 122 const CallInst &FuncInfo) const; 123 124 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 125 MachineIRBuilder &MIRBuilder) const { 126 assert(SrcRegs.size() > 1 && "Nothing to pack"); 127 128 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 129 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 130 131 LLT PackedLLT = getLLTForType(*PackedTy, DL); 132 133 SmallVector<LLT, 8> LLTs; 134 SmallVector<uint64_t, 8> Offsets; 135 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 136 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 137 138 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 139 MIRBuilder.buildUndef(Dst); 140 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 141 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 142 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 143 Dst = NewDst; 144 } 145 146 return Dst; 147 } 148 149 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 150 Type *PackedTy, 151 MachineIRBuilder &MIRBuilder) const { 152 assert(DstRegs.size() > 1 && "Nothing to unpack"); 153 154 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 155 156 SmallVector<LLT, 8> LLTs; 157 SmallVector<uint64_t, 8> Offsets; 158 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 159 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 160 161 for (unsigned i = 0; i < DstRegs.size(); ++i) 162 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 163 } 164 165 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 166 ArrayRef<ArgInfo> Args, 167 ValueHandler &Handler) const { 168 MachineFunction &MF = MIRBuilder.getMF(); 169 const Function &F = MF.getFunction(); 170 SmallVector<CCValAssign, 16> ArgLocs; 171 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 172 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 173 } 174 175 bool CallLowering::handleAssignments(CCState &CCInfo, 176 SmallVectorImpl<CCValAssign> &ArgLocs, 177 MachineIRBuilder &MIRBuilder, 178 ArrayRef<ArgInfo> Args, 179 ValueHandler &Handler) const { 180 MachineFunction &MF = MIRBuilder.getMF(); 181 const Function &F = MF.getFunction(); 182 const DataLayout &DL = F.getParent()->getDataLayout(); 183 184 unsigned NumArgs = Args.size(); 185 for (unsigned i = 0; i != NumArgs; ++i) { 186 MVT CurVT = MVT::getVT(Args[i].Ty); 187 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) { 188 // Try to use the register type if we couldn't assign the VT. 189 if (!Handler.isArgumentHandler() || !CurVT.isValid()) 190 return false; 191 CurVT = TLI->getRegisterTypeForCallingConv( 192 F.getContext(), F.getCallingConv(), EVT(CurVT)); 193 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 194 return false; 195 } 196 } 197 198 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 199 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 200 201 CCValAssign &VA = ArgLocs[j]; 202 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 203 204 if (VA.needsCustom()) { 205 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 206 continue; 207 } 208 209 assert(Args[i].Regs.size() == 1 && 210 "Can't handle multiple virtual regs yet"); 211 212 // FIXME: Pack registers if we have more than one. 213 Register ArgReg = Args[i].Regs[0]; 214 215 if (VA.isRegLoc()) { 216 MVT OrigVT = MVT::getVT(Args[i].Ty); 217 MVT VAVT = VA.getValVT(); 218 if (Handler.isArgumentHandler() && VAVT != OrigVT) { 219 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) 220 return false; // Can't handle this type of arg yet. 221 const LLT VATy(VAVT); 222 Register NewReg = 223 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 224 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 225 // If it's a vector type, we either need to truncate the elements 226 // or do an unmerge to get the lower block of elements. 227 if (VATy.isVector() && 228 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 229 const LLT OrigTy(OrigVT); 230 // Just handle the case where the VA type is 2 * original type. 231 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 232 LLVM_DEBUG(dbgs() 233 << "Incoming promoted vector arg has too many elts"); 234 return false; 235 } 236 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 237 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 238 } else { 239 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 240 } 241 } else { 242 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA); 243 } 244 } else if (VA.isMemLoc()) { 245 MVT VT = MVT::getVT(Args[i].Ty); 246 unsigned Size = VT == MVT::iPTR ? DL.getPointerSize() 247 : alignTo(VT.getSizeInBits(), 8) / 8; 248 unsigned Offset = VA.getLocMemOffset(); 249 MachinePointerInfo MPO; 250 Register StackAddr = Handler.getStackAddress(Size, Offset, MPO); 251 Handler.assignValueToAddress(ArgReg, StackAddr, Size, MPO, VA); 252 } else { 253 // FIXME: Support byvals and other weirdness 254 return false; 255 } 256 } 257 return true; 258 } 259 260 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 261 CCValAssign &VA) { 262 LLT LocTy{VA.getLocVT()}; 263 if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) 264 return ValReg; 265 switch (VA.getLocInfo()) { 266 default: break; 267 case CCValAssign::Full: 268 case CCValAssign::BCvt: 269 // FIXME: bitconverting between vector types may or may not be a 270 // nop in big-endian situations. 271 return ValReg; 272 case CCValAssign::AExt: { 273 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 274 return MIB->getOperand(0).getReg(); 275 } 276 case CCValAssign::SExt: { 277 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 278 MIRBuilder.buildSExt(NewReg, ValReg); 279 return NewReg; 280 } 281 case CCValAssign::ZExt: { 282 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 283 MIRBuilder.buildZExt(NewReg, ValReg); 284 return NewReg; 285 } 286 } 287 llvm_unreachable("unable to extend register"); 288 } 289 290 void CallLowering::ValueHandler::anchor() {} 291