1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file implements some simple delegations needed for call lowering. 11 /// 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/Analysis.h" 15 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 16 #include "llvm/CodeGen/GlobalISel/Utils.h" 17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 18 #include "llvm/CodeGen/MachineOperand.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetLowering.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/LLVMContext.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/Target/TargetMachine.h" 26 27 #define DEBUG_TYPE "call-lowering" 28 29 using namespace llvm; 30 31 void CallLowering::anchor() {} 32 33 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB, 34 ArrayRef<Register> ResRegs, 35 ArrayRef<ArrayRef<Register>> ArgRegs, 36 Register SwiftErrorVReg, 37 std::function<unsigned()> GetCalleeReg) const { 38 CallLoweringInfo Info; 39 const DataLayout &DL = MIRBuilder.getDataLayout(); 40 41 // First step is to marshall all the function's parameters into the correct 42 // physregs and memory locations. Gather the sequence of argument types that 43 // we'll pass to the assigner function. 44 unsigned i = 0; 45 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams(); 46 for (auto &Arg : CB.args()) { 47 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 48 i < NumFixedArgs}; 49 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB); 50 Info.OrigArgs.push_back(OrigArg); 51 ++i; 52 } 53 54 // Try looking through a bitcast from one function type to another. 55 // Commonly happens with calls to objc_msgSend(). 56 const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts(); 57 if (const Function *F = dyn_cast<Function>(CalleeV)) 58 Info.Callee = MachineOperand::CreateGA(F, 0); 59 else 60 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 61 62 Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}}; 63 if (!Info.OrigRet.Ty->isVoidTy()) 64 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB); 65 66 MachineFunction &MF = MIRBuilder.getMF(); 67 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees); 68 Info.CallConv = CB.getCallingConv(); 69 Info.SwiftErrorVReg = SwiftErrorVReg; 70 Info.IsMustTailCall = CB.isMustTailCall(); 71 Info.IsTailCall = 72 CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) && 73 (MF.getFunction() 74 .getFnAttribute("disable-tail-calls") 75 .getValueAsString() != "true"); 76 Info.IsVarArg = CB.getFunctionType()->isVarArg(); 77 return lowerCall(MIRBuilder, Info); 78 } 79 80 template <typename FuncInfoTy> 81 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 82 const DataLayout &DL, 83 const FuncInfoTy &FuncInfo) const { 84 auto &Flags = Arg.Flags[0]; 85 const AttributeList &Attrs = FuncInfo.getAttributes(); 86 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 87 Flags.setZExt(); 88 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 89 Flags.setSExt(); 90 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 91 Flags.setInReg(); 92 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 93 Flags.setSRet(); 94 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 95 Flags.setSwiftSelf(); 96 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 97 Flags.setSwiftError(); 98 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 99 Flags.setByVal(); 100 if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated)) 101 Flags.setPreallocated(); 102 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 103 Flags.setInAlloca(); 104 105 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) { 106 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 107 108 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType(); 109 Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy)); 110 111 // For ByVal, alignment should be passed from FE. BE will guess if 112 // this info is not there but there are cases it cannot get right. 113 Align FrameAlign; 114 if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2)) 115 FrameAlign = *ParamAlign; 116 else 117 FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL)); 118 Flags.setByValAlign(FrameAlign); 119 } 120 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 121 Flags.setNest(); 122 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); 123 } 124 125 template void 126 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 127 const DataLayout &DL, 128 const Function &FuncInfo) const; 129 130 template void 131 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 132 const DataLayout &DL, 133 const CallBase &FuncInfo) const; 134 135 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy, 136 MachineIRBuilder &MIRBuilder) const { 137 assert(SrcRegs.size() > 1 && "Nothing to pack"); 138 139 const DataLayout &DL = MIRBuilder.getMF().getDataLayout(); 140 MachineRegisterInfo *MRI = MIRBuilder.getMRI(); 141 142 LLT PackedLLT = getLLTForType(*PackedTy, DL); 143 144 SmallVector<LLT, 8> LLTs; 145 SmallVector<uint64_t, 8> Offsets; 146 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 147 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch"); 148 149 Register Dst = MRI->createGenericVirtualRegister(PackedLLT); 150 MIRBuilder.buildUndef(Dst); 151 for (unsigned i = 0; i < SrcRegs.size(); ++i) { 152 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT); 153 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]); 154 Dst = NewDst; 155 } 156 157 return Dst; 158 } 159 160 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg, 161 Type *PackedTy, 162 MachineIRBuilder &MIRBuilder) const { 163 assert(DstRegs.size() > 1 && "Nothing to unpack"); 164 165 const DataLayout &DL = MIRBuilder.getDataLayout(); 166 167 SmallVector<LLT, 8> LLTs; 168 SmallVector<uint64_t, 8> Offsets; 169 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets); 170 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch"); 171 172 for (unsigned i = 0; i < DstRegs.size(); ++i) 173 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]); 174 } 175 176 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 177 SmallVectorImpl<ArgInfo> &Args, 178 ValueHandler &Handler) const { 179 MachineFunction &MF = MIRBuilder.getMF(); 180 const Function &F = MF.getFunction(); 181 SmallVector<CCValAssign, 16> ArgLocs; 182 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 183 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler); 184 } 185 186 bool CallLowering::handleAssignments(CCState &CCInfo, 187 SmallVectorImpl<CCValAssign> &ArgLocs, 188 MachineIRBuilder &MIRBuilder, 189 SmallVectorImpl<ArgInfo> &Args, 190 ValueHandler &Handler) const { 191 MachineFunction &MF = MIRBuilder.getMF(); 192 const Function &F = MF.getFunction(); 193 const DataLayout &DL = F.getParent()->getDataLayout(); 194 195 unsigned NumArgs = Args.size(); 196 for (unsigned i = 0; i != NumArgs; ++i) { 197 EVT CurVT = EVT::getEVT(Args[i].Ty); 198 if (CurVT.isSimple() && 199 !Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(), 200 CCValAssign::Full, Args[i], Args[i].Flags[0], 201 CCInfo)) 202 continue; 203 204 MVT NewVT = TLI->getRegisterTypeForCallingConv( 205 F.getContext(), F.getCallingConv(), EVT(CurVT)); 206 207 // If we need to split the type over multiple regs, check it's a scenario 208 // we currently support. 209 unsigned NumParts = TLI->getNumRegistersForCallingConv( 210 F.getContext(), F.getCallingConv(), CurVT); 211 if (NumParts > 1) { 212 // For now only handle exact splits. 213 if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits()) 214 return false; 215 } 216 217 // For incoming arguments (physregs to vregs), we could have values in 218 // physregs (or memlocs) which we want to extract and copy to vregs. 219 // During this, we might have to deal with the LLT being split across 220 // multiple regs, so we have to record this information for later. 221 // 222 // If we have outgoing args, then we have the opposite case. We have a 223 // vreg with an LLT which we want to assign to a physical location, and 224 // we might have to record that the value has to be split later. 225 if (Handler.isIncomingArgumentHandler()) { 226 if (NumParts == 1) { 227 // Try to use the register type if we couldn't assign the VT. 228 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i], 229 Args[i].Flags[0], CCInfo)) 230 return false; 231 } else { 232 // We're handling an incoming arg which is split over multiple regs. 233 // E.g. passing an s128 on AArch64. 234 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; 235 Args[i].OrigRegs.push_back(Args[i].Regs[0]); 236 Args[i].Regs.clear(); 237 Args[i].Flags.clear(); 238 LLT NewLLT = getLLTForMVT(NewVT); 239 // For each split register, create and assign a vreg that will store 240 // the incoming component of the larger value. These will later be 241 // merged to form the final vreg. 242 for (unsigned Part = 0; Part < NumParts; ++Part) { 243 Register Reg = 244 MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT); 245 ISD::ArgFlagsTy Flags = OrigFlags; 246 if (Part == 0) { 247 Flags.setSplit(); 248 } else { 249 Flags.setOrigAlign(Align(1)); 250 if (Part == NumParts - 1) 251 Flags.setSplitEnd(); 252 } 253 Args[i].Regs.push_back(Reg); 254 Args[i].Flags.push_back(Flags); 255 if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full, 256 Args[i], Args[i].Flags[Part], CCInfo)) { 257 // Still couldn't assign this smaller part type for some reason. 258 return false; 259 } 260 } 261 } 262 } else { 263 // Handling an outgoing arg that might need to be split. 264 if (NumParts < 2) 265 return false; // Don't know how to deal with this type combination. 266 267 // This type is passed via multiple registers in the calling convention. 268 // We need to extract the individual parts. 269 Register LargeReg = Args[i].Regs[0]; 270 LLT SmallTy = LLT::scalar(NewVT.getSizeInBits()); 271 auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg); 272 assert(Unmerge->getNumOperands() == NumParts + 1); 273 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0]; 274 // We're going to replace the regs and flags with the split ones. 275 Args[i].Regs.clear(); 276 Args[i].Flags.clear(); 277 for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) { 278 ISD::ArgFlagsTy Flags = OrigFlags; 279 if (PartIdx == 0) { 280 Flags.setSplit(); 281 } else { 282 Flags.setOrigAlign(Align(1)); 283 if (PartIdx == NumParts - 1) 284 Flags.setSplitEnd(); 285 } 286 Args[i].Regs.push_back(Unmerge.getReg(PartIdx)); 287 Args[i].Flags.push_back(Flags); 288 if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full, 289 Args[i], Args[i].Flags[PartIdx], CCInfo)) 290 return false; 291 } 292 } 293 } 294 295 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 296 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 297 298 CCValAssign &VA = ArgLocs[j]; 299 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 300 301 if (VA.needsCustom()) { 302 unsigned NumArgRegs = 303 Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 304 if (!NumArgRegs) 305 return false; 306 j += NumArgRegs; 307 continue; 308 } 309 310 // FIXME: Pack registers if we have more than one. 311 Register ArgReg = Args[i].Regs[0]; 312 313 EVT OrigVT = EVT::getEVT(Args[i].Ty); 314 EVT VAVT = VA.getValVT(); 315 const LLT OrigTy = getLLTForType(*Args[i].Ty, DL); 316 317 // Expected to be multiple regs for a single incoming arg. 318 // There should be Regs.size() ArgLocs per argument. 319 unsigned NumArgRegs = Args[i].Regs.size(); 320 321 assert((j + (NumArgRegs - 1)) < ArgLocs.size() && 322 "Too many regs for number of args"); 323 for (unsigned Part = 0; Part < NumArgRegs; ++Part) { 324 // There should be Regs.size() ArgLocs per argument. 325 VA = ArgLocs[j + Part]; 326 if (VA.isMemLoc()) { 327 // Don't currently support loading/storing a type that needs to be split 328 // to the stack. Should be easy, just not implemented yet. 329 if (NumArgRegs > 1) { 330 LLVM_DEBUG( 331 dbgs() 332 << "Load/store a split arg to/from the stack not implemented yet\n"); 333 return false; 334 } 335 336 // FIXME: Use correct address space for pointer size 337 EVT LocVT = VA.getValVT(); 338 unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize() 339 : LocVT.getStoreSize(); 340 unsigned Offset = VA.getLocMemOffset(); 341 MachinePointerInfo MPO; 342 Register StackAddr = Handler.getStackAddress(MemSize, Offset, MPO); 343 Handler.assignValueToAddress(Args[i], StackAddr, 344 MemSize, MPO, VA); 345 continue; 346 } 347 348 assert(VA.isRegLoc() && "custom loc should have been handled already"); 349 350 if (OrigVT.getSizeInBits() >= VAVT.getSizeInBits() || 351 !Handler.isIncomingArgumentHandler()) { 352 // This is an argument that might have been split. There should be 353 // Regs.size() ArgLocs per argument. 354 355 // Insert the argument copies. If VAVT < OrigVT, we'll insert the merge 356 // to the original register after handling all of the parts. 357 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA); 358 continue; 359 } 360 361 // This ArgLoc covers multiple pieces, so we need to split it. 362 const LLT VATy(VAVT.getSimpleVT()); 363 Register NewReg = 364 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy); 365 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA); 366 // If it's a vector type, we either need to truncate the elements 367 // or do an unmerge to get the lower block of elements. 368 if (VATy.isVector() && 369 VATy.getNumElements() > OrigVT.getVectorNumElements()) { 370 // Just handle the case where the VA type is 2 * original type. 371 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) { 372 LLVM_DEBUG(dbgs() 373 << "Incoming promoted vector arg has too many elts"); 374 return false; 375 } 376 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg}); 377 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0)); 378 } else { 379 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0); 380 } 381 } 382 383 // Now that all pieces have been handled, re-pack any arguments into any 384 // wider, original registers. 385 if (Handler.isIncomingArgumentHandler()) { 386 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) { 387 assert(NumArgRegs >= 2); 388 389 // Merge the split registers into the expected larger result vreg 390 // of the original call. 391 MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs); 392 } 393 } 394 395 j += NumArgRegs - 1; 396 } 397 398 return true; 399 } 400 401 bool CallLowering::analyzeArgInfo(CCState &CCState, 402 SmallVectorImpl<ArgInfo> &Args, 403 CCAssignFn &AssignFnFixed, 404 CCAssignFn &AssignFnVarArg) const { 405 for (unsigned i = 0, e = Args.size(); i < e; ++i) { 406 MVT VT = MVT::getVT(Args[i].Ty); 407 CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg; 408 if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) { 409 // Bail out on anything we can't handle. 410 LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString() 411 << " (arg number = " << i << "\n"); 412 return false; 413 } 414 } 415 return true; 416 } 417 418 bool CallLowering::resultsCompatible(CallLoweringInfo &Info, 419 MachineFunction &MF, 420 SmallVectorImpl<ArgInfo> &InArgs, 421 CCAssignFn &CalleeAssignFnFixed, 422 CCAssignFn &CalleeAssignFnVarArg, 423 CCAssignFn &CallerAssignFnFixed, 424 CCAssignFn &CallerAssignFnVarArg) const { 425 const Function &F = MF.getFunction(); 426 CallingConv::ID CalleeCC = Info.CallConv; 427 CallingConv::ID CallerCC = F.getCallingConv(); 428 429 if (CallerCC == CalleeCC) 430 return true; 431 432 SmallVector<CCValAssign, 16> ArgLocs1; 433 CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext()); 434 if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed, 435 CalleeAssignFnVarArg)) 436 return false; 437 438 SmallVector<CCValAssign, 16> ArgLocs2; 439 CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext()); 440 if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed, 441 CalleeAssignFnVarArg)) 442 return false; 443 444 // We need the argument locations to match up exactly. If there's more in 445 // one than the other, then we are done. 446 if (ArgLocs1.size() != ArgLocs2.size()) 447 return false; 448 449 // Make sure that each location is passed in exactly the same way. 450 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) { 451 const CCValAssign &Loc1 = ArgLocs1[i]; 452 const CCValAssign &Loc2 = ArgLocs2[i]; 453 454 // We need both of them to be the same. So if one is a register and one 455 // isn't, we're done. 456 if (Loc1.isRegLoc() != Loc2.isRegLoc()) 457 return false; 458 459 if (Loc1.isRegLoc()) { 460 // If they don't have the same register location, we're done. 461 if (Loc1.getLocReg() != Loc2.getLocReg()) 462 return false; 463 464 // They matched, so we can move to the next ArgLoc. 465 continue; 466 } 467 468 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match. 469 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) 470 return false; 471 } 472 473 return true; 474 } 475 476 Register CallLowering::ValueHandler::extendRegister(Register ValReg, 477 CCValAssign &VA, 478 unsigned MaxSizeBits) { 479 LLT LocTy{VA.getLocVT()}; 480 LLT ValTy = MRI.getType(ValReg); 481 if (LocTy.getSizeInBits() == ValTy.getSizeInBits()) 482 return ValReg; 483 484 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) { 485 if (MaxSizeBits <= ValTy.getSizeInBits()) 486 return ValReg; 487 LocTy = LLT::scalar(MaxSizeBits); 488 } 489 490 switch (VA.getLocInfo()) { 491 default: break; 492 case CCValAssign::Full: 493 case CCValAssign::BCvt: 494 // FIXME: bitconverting between vector types may or may not be a 495 // nop in big-endian situations. 496 return ValReg; 497 case CCValAssign::AExt: { 498 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 499 return MIB.getReg(0); 500 } 501 case CCValAssign::SExt: { 502 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 503 MIRBuilder.buildSExt(NewReg, ValReg); 504 return NewReg; 505 } 506 case CCValAssign::ZExt: { 507 Register NewReg = MRI.createGenericVirtualRegister(LocTy); 508 MIRBuilder.buildZExt(NewReg, ValReg); 509 return NewReg; 510 } 511 } 512 llvm_unreachable("unable to extend register"); 513 } 514 515 void CallLowering::ValueHandler::anchor() {} 516