1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 /// Helper function which updates \p Flags when \p AttrFn returns true.
34 static void
35 addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
36                     const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37   if (AttrFn(Attribute::SExt))
38     Flags.setSExt();
39   if (AttrFn(Attribute::ZExt))
40     Flags.setZExt();
41   if (AttrFn(Attribute::InReg))
42     Flags.setInReg();
43   if (AttrFn(Attribute::StructRet))
44     Flags.setSRet();
45   if (AttrFn(Attribute::Nest))
46     Flags.setNest();
47   if (AttrFn(Attribute::ByVal))
48     Flags.setByVal();
49   if (AttrFn(Attribute::Preallocated))
50     Flags.setPreallocated();
51   if (AttrFn(Attribute::InAlloca))
52     Flags.setInAlloca();
53   if (AttrFn(Attribute::Returned))
54     Flags.setReturned();
55   if (AttrFn(Attribute::SwiftSelf))
56     Flags.setSwiftSelf();
57   if (AttrFn(Attribute::SwiftError))
58     Flags.setSwiftError();
59 }
60 
61 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
62                                                      unsigned ArgIdx) const {
63   ISD::ArgFlagsTy Flags;
64   addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
65     return Call.paramHasAttr(ArgIdx, Attr);
66   });
67   return Flags;
68 }
69 
70 void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
71                                              const AttributeList &Attrs,
72                                              unsigned OpIdx) const {
73   addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
74     return Attrs.hasAttribute(OpIdx, Attr);
75   });
76 }
77 
78 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
79                              ArrayRef<Register> ResRegs,
80                              ArrayRef<ArrayRef<Register>> ArgRegs,
81                              Register SwiftErrorVReg,
82                              std::function<unsigned()> GetCalleeReg) const {
83   CallLoweringInfo Info;
84   const DataLayout &DL = MIRBuilder.getDataLayout();
85   MachineFunction &MF = MIRBuilder.getMF();
86   bool CanBeTailCalled = CB.isTailCall() &&
87                          isInTailCallPosition(CB, MF.getTarget()) &&
88                          (MF.getFunction()
89                               .getFnAttribute("disable-tail-calls")
90                               .getValueAsString() != "true");
91 
92   CallingConv::ID CallConv = CB.getCallingConv();
93   Type *RetTy = CB.getType();
94   bool IsVarArg = CB.getFunctionType()->isVarArg();
95 
96   SmallVector<BaseArgInfo, 4> SplitArgs;
97   getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
98   Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
99 
100   if (!Info.CanLowerReturn) {
101     // Callee requires sret demotion.
102     insertSRetOutgoingArgument(MIRBuilder, CB, Info);
103 
104     // The sret demotion isn't compatible with tail-calls, since the sret
105     // argument points into the caller's stack frame.
106     CanBeTailCalled = false;
107   }
108 
109   // First step is to marshall all the function's parameters into the correct
110   // physregs and memory locations. Gather the sequence of argument types that
111   // we'll pass to the assigner function.
112   unsigned i = 0;
113   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
114   for (auto &Arg : CB.args()) {
115     ArgInfo OrigArg{ArgRegs[i], *Arg.get(), getAttributesForArgIdx(CB, i),
116                     i < NumFixedArgs};
117     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
118 
119     // If we have an explicit sret argument that is an Instruction, (i.e., it
120     // might point to function-local memory), we can't meaningfully tail-call.
121     if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
122       CanBeTailCalled = false;
123 
124     Info.OrigArgs.push_back(OrigArg);
125     ++i;
126   }
127 
128   // Try looking through a bitcast from one function type to another.
129   // Commonly happens with calls to objc_msgSend().
130   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
131   if (const Function *F = dyn_cast<Function>(CalleeV))
132     Info.Callee = MachineOperand::CreateGA(F, 0);
133   else
134     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
135 
136   Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}};
137   if (!Info.OrigRet.Ty->isVoidTy())
138     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
139 
140   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
141   Info.CallConv = CallConv;
142   Info.SwiftErrorVReg = SwiftErrorVReg;
143   Info.IsMustTailCall = CB.isMustTailCall();
144   Info.IsTailCall = CanBeTailCalled;
145   Info.IsVarArg = IsVarArg;
146   return lowerCall(MIRBuilder, Info);
147 }
148 
149 template <typename FuncInfoTy>
150 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
151                                const DataLayout &DL,
152                                const FuncInfoTy &FuncInfo) const {
153   auto &Flags = Arg.Flags[0];
154   const AttributeList &Attrs = FuncInfo.getAttributes();
155   addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
156 
157   Align MemAlign = DL.getABITypeAlign(Arg.Ty);
158   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
159     assert(OpIdx >= AttributeList::FirstArgIndex);
160     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
161 
162     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
163     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
164 
165     // For ByVal, alignment should be passed from FE.  BE will guess if
166     // this info is not there but there are cases it cannot get right.
167     if (auto ParamAlign =
168             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
169       MemAlign = *ParamAlign;
170     else if ((ParamAlign =
171                   FuncInfo.getParamAlign(OpIdx - AttributeList::FirstArgIndex)))
172       MemAlign = *ParamAlign;
173     else
174       MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
175   } else if (OpIdx >= AttributeList::FirstArgIndex) {
176     if (auto ParamAlign =
177             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
178       MemAlign = *ParamAlign;
179   }
180   Flags.setMemAlign(MemAlign);
181   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
182 
183   // Don't try to use the returned attribute if the argument is marked as
184   // swiftself, since it won't be passed in x0.
185   if (Flags.isSwiftSelf())
186     Flags.setReturned(false);
187 }
188 
189 template void
190 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
191                                     const DataLayout &DL,
192                                     const Function &FuncInfo) const;
193 
194 template void
195 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
196                                     const DataLayout &DL,
197                                     const CallBase &FuncInfo) const;
198 
199 void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
200                                      SmallVectorImpl<ArgInfo> &SplitArgs,
201                                      const DataLayout &DL,
202                                      CallingConv::ID CallConv) const {
203   LLVMContext &Ctx = OrigArg.Ty->getContext();
204 
205   SmallVector<EVT, 4> SplitVTs;
206   SmallVector<uint64_t, 4> Offsets;
207   ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
208 
209   if (SplitVTs.size() == 0)
210     return;
211 
212   if (SplitVTs.size() == 1) {
213     // No splitting to do, but we want to replace the original type (e.g. [1 x
214     // double] -> double).
215     SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
216                            OrigArg.Flags[0], OrigArg.IsFixed,
217                            OrigArg.OrigValue);
218     return;
219   }
220 
221   // Create one ArgInfo for each virtual register in the original ArgInfo.
222   assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
223 
224   bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
225       OrigArg.Ty, CallConv, false);
226   for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
227     Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
228     SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
229                            OrigArg.IsFixed);
230     if (NeedsRegBlock)
231       SplitArgs.back().Flags[0].setInConsecutiveRegs();
232   }
233 
234   SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
235 }
236 
237 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
238                               Type *PackedTy,
239                               MachineIRBuilder &MIRBuilder) const {
240   assert(DstRegs.size() > 1 && "Nothing to unpack");
241 
242   const DataLayout &DL = MIRBuilder.getDataLayout();
243 
244   SmallVector<LLT, 8> LLTs;
245   SmallVector<uint64_t, 8> Offsets;
246   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
247   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
248 
249   for (unsigned i = 0; i < DstRegs.size(); ++i)
250     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
251 }
252 
253 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
254 static MachineInstrBuilder
255 mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
256                             ArrayRef<Register> SrcRegs) {
257   MachineRegisterInfo &MRI = *B.getMRI();
258   LLT LLTy = MRI.getType(DstRegs[0]);
259   LLT PartLLT = MRI.getType(SrcRegs[0]);
260 
261   // Deal with v3s16 split into v2s16
262   LLT LCMTy = getLCMType(LLTy, PartLLT);
263   if (LCMTy == LLTy) {
264     // Common case where no padding is needed.
265     assert(DstRegs.size() == 1);
266     return B.buildConcatVectors(DstRegs[0], SrcRegs);
267   }
268 
269   // We need to create an unmerge to the result registers, which may require
270   // widening the original value.
271   Register UnmergeSrcReg;
272   if (LCMTy != PartLLT) {
273     // e.g. A <3 x s16> value was split to <2 x s16>
274     // %register_value0:_(<2 x s16>)
275     // %register_value1:_(<2 x s16>)
276     // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
277     // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
278     // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
279     const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
280     Register Undef = B.buildUndef(PartLLT).getReg(0);
281 
282     // Build vector of undefs.
283     SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
284 
285     // Replace the first sources with the real registers.
286     std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
287     UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
288   } else {
289     // We don't need to widen anything if we're extracting a scalar which was
290     // promoted to a vector e.g. s8 -> v4s8 -> s8
291     assert(SrcRegs.size() == 1);
292     UnmergeSrcReg = SrcRegs[0];
293   }
294 
295   int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
296 
297   SmallVector<Register, 8> PadDstRegs(NumDst);
298   std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
299 
300   // Create the excess dead defs for the unmerge.
301   for (int I = DstRegs.size(); I != NumDst; ++I)
302     PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
303 
304   return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
305 }
306 
307 /// Create a sequence of instructions to combine pieces split into register
308 /// typed values to the original IR value. \p OrigRegs contains the destination
309 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
310 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
311 static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
312                               ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
313                               const ISD::ArgFlagsTy Flags) {
314   MachineRegisterInfo &MRI = *B.getMRI();
315 
316   if (PartLLT == LLTy) {
317     // We should have avoided introducing a new virtual register, and just
318     // directly assigned here.
319     assert(OrigRegs[0] == Regs[0]);
320     return;
321   }
322 
323   if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
324       Regs.size() == 1) {
325     B.buildBitcast(OrigRegs[0], Regs[0]);
326     return;
327   }
328 
329   if (PartLLT.isVector() == LLTy.isVector() &&
330       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
331       OrigRegs.size() == 1 && Regs.size() == 1) {
332     Register SrcReg = Regs[0];
333 
334     LLT LocTy = MRI.getType(SrcReg);
335 
336     if (Flags.isSExt()) {
337       SrcReg = B.buildAssertSExt(LocTy, SrcReg,
338                                  LLTy.getScalarSizeInBits()).getReg(0);
339     } else if (Flags.isZExt()) {
340       SrcReg = B.buildAssertZExt(LocTy, SrcReg,
341                                  LLTy.getScalarSizeInBits()).getReg(0);
342     }
343 
344     B.buildTrunc(OrigRegs[0], SrcReg);
345     return;
346   }
347 
348   if (!LLTy.isVector() && !PartLLT.isVector()) {
349     assert(OrigRegs.size() == 1);
350     LLT OrigTy = MRI.getType(OrigRegs[0]);
351 
352     unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size();
353     if (SrcSize == OrigTy.getSizeInBits())
354       B.buildMerge(OrigRegs[0], Regs);
355     else {
356       auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
357       B.buildTrunc(OrigRegs[0], Widened);
358     }
359 
360     return;
361   }
362 
363   if (PartLLT.isVector()) {
364     assert(OrigRegs.size() == 1);
365 
366     if (LLTy.getScalarType() == PartLLT.getElementType()) {
367       mergeVectorRegsToResultRegs(B, OrigRegs, Regs);
368     } else {
369       SmallVector<Register> CastRegs(Regs.size());
370       unsigned I = 0;
371       LLT GCDTy = getGCDType(LLTy, PartLLT);
372 
373       // We are both splitting a vector, and bitcasting its element types. Cast
374       // the source pieces into the appropriate number of pieces with the result
375       // element type.
376       for (Register SrcReg : Regs)
377         CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
378       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
379     }
380 
381     return;
382   }
383 
384   assert(LLTy.isVector() && !PartLLT.isVector());
385 
386   LLT DstEltTy = LLTy.getElementType();
387 
388   // Pointer information was discarded. We'll need to coerce some register types
389   // to avoid violating type constraints.
390   LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
391 
392   assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
393 
394   if (DstEltTy == PartLLT) {
395     // Vector was trivially scalarized.
396 
397     if (RealDstEltTy.isPointer()) {
398       for (Register Reg : Regs)
399         MRI.setType(Reg, RealDstEltTy);
400     }
401 
402     B.buildBuildVector(OrigRegs[0], Regs);
403   } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
404     // Deal with vector with 64-bit elements decomposed to 32-bit
405     // registers. Need to create intermediate 64-bit elements.
406     SmallVector<Register, 8> EltMerges;
407     int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
408 
409     assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
410 
411     for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
412       auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
413       // Fix the type in case this is really a vector of pointers.
414       MRI.setType(Merge.getReg(0), RealDstEltTy);
415       EltMerges.push_back(Merge.getReg(0));
416       Regs = Regs.drop_front(PartsPerElt);
417     }
418 
419     B.buildBuildVector(OrigRegs[0], EltMerges);
420   } else {
421     // Vector was split, and elements promoted to a wider type.
422     // FIXME: Should handle floating point promotions.
423     LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
424     auto BV = B.buildBuildVector(BVType, Regs);
425     B.buildTrunc(OrigRegs[0], BV);
426   }
427 }
428 
429 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
430 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
431 /// contain the type of scalar value extension if necessary.
432 ///
433 /// This is used for outgoing values (vregs to physregs)
434 static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
435                             Register SrcReg, LLT SrcTy, LLT PartTy,
436                             unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
437   // We could just insert a regular copy, but this is unreachable at the moment.
438   assert(SrcTy != PartTy && "identical part types shouldn't reach here");
439 
440   const unsigned PartSize = PartTy.getSizeInBits();
441 
442   if (PartTy.isVector() == SrcTy.isVector() &&
443       PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
444     assert(DstRegs.size() == 1);
445     B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
446     return;
447   }
448 
449   if (SrcTy.isVector() && !PartTy.isVector() &&
450       PartSize > SrcTy.getElementType().getSizeInBits()) {
451     // Vector was scalarized, and the elements extended.
452     auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
453     for (int i = 0, e = DstRegs.size(); i != e; ++i)
454       B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
455     return;
456   }
457 
458   LLT GCDTy = getGCDType(SrcTy, PartTy);
459   if (GCDTy == PartTy) {
460     // If this already evenly divisible, we can create a simple unmerge.
461     B.buildUnmerge(DstRegs, SrcReg);
462     return;
463   }
464 
465   MachineRegisterInfo &MRI = *B.getMRI();
466   LLT DstTy = MRI.getType(DstRegs[0]);
467   LLT LCMTy = getLCMType(SrcTy, PartTy);
468 
469   const unsigned LCMSize = LCMTy.getSizeInBits();
470   const unsigned DstSize = DstTy.getSizeInBits();
471   const unsigned SrcSize = SrcTy.getSizeInBits();
472 
473   Register UnmergeSrc = SrcReg;
474   if (LCMSize != SrcSize) {
475     // Widen to the common type.
476     Register Undef = B.buildUndef(SrcTy).getReg(0);
477     SmallVector<Register, 8> MergeParts(1, SrcReg);
478     for (unsigned Size = SrcSize; Size != LCMSize; Size += SrcSize)
479       MergeParts.push_back(Undef);
480 
481     UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
482   }
483 
484   // Unmerge to the original registers and pad with dead defs.
485   SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
486   for (unsigned Size = DstSize * DstRegs.size(); Size != LCMSize;
487        Size += DstSize) {
488     UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
489   }
490 
491   B.buildUnmerge(UnmergeResults, UnmergeSrc);
492 }
493 
494 bool CallLowering::determineAndHandleAssignments(
495     ValueHandler &Handler, ValueAssigner &Assigner,
496     SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
497     CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
498   MachineFunction &MF = MIRBuilder.getMF();
499   const Function &F = MF.getFunction();
500   SmallVector<CCValAssign, 16> ArgLocs;
501 
502   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
503   if (!determineAssignments(Assigner, Args, CCInfo))
504     return false;
505 
506   return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
507                            ThisReturnReg);
508 }
509 
510 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
511   if (Flags.isSExt())
512     return TargetOpcode::G_SEXT;
513   if (Flags.isZExt())
514     return TargetOpcode::G_ZEXT;
515   return TargetOpcode::G_ANYEXT;
516 }
517 
518 bool CallLowering::determineAssignments(ValueAssigner &Assigner,
519                                         SmallVectorImpl<ArgInfo> &Args,
520                                         CCState &CCInfo) const {
521   LLVMContext &Ctx = CCInfo.getContext();
522   const CallingConv::ID CallConv = CCInfo.getCallingConv();
523 
524   unsigned NumArgs = Args.size();
525   for (unsigned i = 0; i != NumArgs; ++i) {
526     EVT CurVT = EVT::getEVT(Args[i].Ty);
527 
528     MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
529 
530     // If we need to split the type over multiple regs, check it's a scenario
531     // we currently support.
532     unsigned NumParts =
533         TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
534 
535     if (NumParts == 1) {
536       // Try to use the register type if we couldn't assign the VT.
537       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
538                              Args[i].Flags[0], CCInfo))
539         return false;
540       continue;
541     }
542 
543     // For incoming arguments (physregs to vregs), we could have values in
544     // physregs (or memlocs) which we want to extract and copy to vregs.
545     // During this, we might have to deal with the LLT being split across
546     // multiple regs, so we have to record this information for later.
547     //
548     // If we have outgoing args, then we have the opposite case. We have a
549     // vreg with an LLT which we want to assign to a physical location, and
550     // we might have to record that the value has to be split later.
551 
552     // We're handling an incoming arg which is split over multiple regs.
553     // E.g. passing an s128 on AArch64.
554     ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
555     Args[i].Flags.clear();
556 
557     for (unsigned Part = 0; Part < NumParts; ++Part) {
558       ISD::ArgFlagsTy Flags = OrigFlags;
559       if (Part == 0) {
560         Flags.setSplit();
561       } else {
562         Flags.setOrigAlign(Align(1));
563         if (Part == NumParts - 1)
564           Flags.setSplitEnd();
565       }
566 
567       if (!Assigner.isIncomingArgumentHandler()) {
568         // TODO: Also check if there is a valid extension that preserves the
569         // bits. However currently this call lowering doesn't support non-exact
570         // split parts, so that can't be tested.
571         if (OrigFlags.isReturned() &&
572             (NumParts * NewVT.getSizeInBits() != CurVT.getSizeInBits())) {
573           Flags.setReturned(false);
574         }
575       }
576 
577       Args[i].Flags.push_back(Flags);
578       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
579                              Args[i].Flags[Part], CCInfo)) {
580         // Still couldn't assign this smaller part type for some reason.
581         return false;
582       }
583     }
584   }
585 
586   return true;
587 }
588 
589 bool CallLowering::handleAssignments(ValueHandler &Handler,
590                                      SmallVectorImpl<ArgInfo> &Args,
591                                      CCState &CCInfo,
592                                      SmallVectorImpl<CCValAssign> &ArgLocs,
593                                      MachineIRBuilder &MIRBuilder,
594                                      Register ThisReturnReg) const {
595   MachineFunction &MF = MIRBuilder.getMF();
596   MachineRegisterInfo &MRI = MF.getRegInfo();
597   const Function &F = MF.getFunction();
598   const DataLayout &DL = F.getParent()->getDataLayout();
599 
600   const unsigned NumArgs = Args.size();
601 
602   for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
603     assert(j < ArgLocs.size() && "Skipped too many arg locs");
604     CCValAssign &VA = ArgLocs[j];
605     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
606 
607     if (VA.needsCustom()) {
608       unsigned NumArgRegs =
609           Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
610       if (!NumArgRegs)
611         return false;
612       j += NumArgRegs;
613       continue;
614     }
615 
616     const MVT ValVT = VA.getValVT();
617     const MVT LocVT = VA.getLocVT();
618 
619     const LLT LocTy(LocVT);
620     const LLT ValTy(ValVT);
621     const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
622     const EVT OrigVT = EVT::getEVT(Args[i].Ty);
623     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
624 
625     // Expected to be multiple regs for a single incoming arg.
626     // There should be Regs.size() ArgLocs per argument.
627     // This should be the same as getNumRegistersForCallingConv
628     const unsigned NumParts = Args[i].Flags.size();
629 
630     // Now split the registers into the assigned types.
631     Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
632 
633     if (NumParts != 1 || NewLLT != OrigTy) {
634       // If we can't directly assign the register, we need one or more
635       // intermediate values.
636       Args[i].Regs.resize(NumParts);
637 
638       // For each split register, create and assign a vreg that will store
639       // the incoming component of the larger value. These will later be
640       // merged to form the final vreg.
641       for (unsigned Part = 0; Part < NumParts; ++Part)
642         Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
643     }
644 
645     assert((j + (NumParts - 1)) < ArgLocs.size() &&
646            "Too many regs for number of args");
647 
648     // Coerce into outgoing value types before register assignment.
649     if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
650       assert(Args[i].OrigRegs.size() == 1);
651       buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
652                       ValTy, extendOpFromFlags(Args[i].Flags[0]));
653     }
654 
655     for (unsigned Part = 0; Part < NumParts; ++Part) {
656       Register ArgReg = Args[i].Regs[Part];
657       // There should be Regs.size() ArgLocs per argument.
658       VA = ArgLocs[j + Part];
659       const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
660 
661       if (VA.isMemLoc() && !Flags.isByVal()) {
662         // Individual pieces may have been spilled to the stack and others
663         // passed in registers.
664 
665         // TODO: The memory size may be larger than the value we need to
666         // store. We may need to adjust the offset for big endian targets.
667         uint64_t MemSize = Handler.getStackValueStoreSize(DL, VA);
668 
669         MachinePointerInfo MPO;
670         Register StackAddr =
671             Handler.getStackAddress(MemSize, VA.getLocMemOffset(), MPO, Flags);
672 
673         Handler.assignValueToAddress(Args[i], Part, StackAddr, MemSize, MPO,
674                                      VA);
675         continue;
676       }
677 
678       if (VA.isMemLoc() && Flags.isByVal()) {
679         assert(Args[i].Regs.size() == 1 &&
680                "didn't expect split byval pointer");
681 
682         if (Handler.isIncomingArgumentHandler()) {
683           // We just need to copy the frame index value to the pointer.
684           MachinePointerInfo MPO;
685           Register StackAddr = Handler.getStackAddress(
686               Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
687           MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
688         } else {
689           // For outgoing byval arguments, insert the implicit copy byval
690           // implies, such that writes in the callee do not modify the caller's
691           // value.
692           uint64_t MemSize = Flags.getByValSize();
693           int64_t Offset = VA.getLocMemOffset();
694 
695           MachinePointerInfo DstMPO;
696           Register StackAddr =
697               Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
698 
699           MachinePointerInfo SrcMPO(Args[i].OrigValue);
700           if (!Args[i].OrigValue) {
701             // We still need to accurately track the stack address space if we
702             // don't know the underlying value.
703             const LLT PtrTy = MRI.getType(StackAddr);
704             SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
705           }
706 
707           Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
708                                     inferAlignFromPtrInfo(MF, DstMPO));
709 
710           Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
711                                     inferAlignFromPtrInfo(MF, SrcMPO));
712 
713           Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
714                                      DstMPO, DstAlign, SrcMPO, SrcAlign,
715                                      MemSize, VA);
716         }
717         continue;
718       }
719 
720       assert(!VA.needsCustom() && "custom loc should have been handled already");
721 
722       if (i == 0 && ThisReturnReg.isValid() &&
723           Handler.isIncomingArgumentHandler() &&
724           isTypeIsValidForThisReturn(ValVT)) {
725         Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
726         continue;
727       }
728 
729       Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
730     }
731 
732     // Now that all pieces have been assigned, re-pack the register typed values
733     // into the original value typed registers.
734     if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
735       // Merge the split registers into the expected larger result vregs of
736       // the original call.
737       buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
738                         LocTy, Args[i].Flags[0]);
739     }
740 
741     j += NumParts - 1;
742   }
743 
744   return true;
745 }
746 
747 void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
748                                    ArrayRef<Register> VRegs, Register DemoteReg,
749                                    int FI) const {
750   MachineFunction &MF = MIRBuilder.getMF();
751   MachineRegisterInfo &MRI = MF.getRegInfo();
752   const DataLayout &DL = MF.getDataLayout();
753 
754   SmallVector<EVT, 4> SplitVTs;
755   SmallVector<uint64_t, 4> Offsets;
756   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
757 
758   assert(VRegs.size() == SplitVTs.size());
759 
760   unsigned NumValues = SplitVTs.size();
761   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
762   Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
763   LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
764 
765   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
766 
767   for (unsigned I = 0; I < NumValues; ++I) {
768     Register Addr;
769     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
770     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
771                                         MRI.getType(VRegs[I]).getSizeInBytes(),
772                                         commonAlignment(BaseAlign, Offsets[I]));
773     MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
774   }
775 }
776 
777 void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
778                                     ArrayRef<Register> VRegs,
779                                     Register DemoteReg) const {
780   MachineFunction &MF = MIRBuilder.getMF();
781   MachineRegisterInfo &MRI = MF.getRegInfo();
782   const DataLayout &DL = MF.getDataLayout();
783 
784   SmallVector<EVT, 4> SplitVTs;
785   SmallVector<uint64_t, 4> Offsets;
786   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
787 
788   assert(VRegs.size() == SplitVTs.size());
789 
790   unsigned NumValues = SplitVTs.size();
791   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
792   unsigned AS = DL.getAllocaAddrSpace();
793   LLT OffsetLLTy =
794       getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
795 
796   MachinePointerInfo PtrInfo(AS);
797 
798   for (unsigned I = 0; I < NumValues; ++I) {
799     Register Addr;
800     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
801     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
802                                         MRI.getType(VRegs[I]).getSizeInBytes(),
803                                         commonAlignment(BaseAlign, Offsets[I]));
804     MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
805   }
806 }
807 
808 void CallLowering::insertSRetIncomingArgument(
809     const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
810     MachineRegisterInfo &MRI, const DataLayout &DL) const {
811   unsigned AS = DL.getAllocaAddrSpace();
812   DemoteReg = MRI.createGenericVirtualRegister(
813       LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
814 
815   Type *PtrTy = PointerType::get(F.getReturnType(), AS);
816 
817   SmallVector<EVT, 1> ValueVTs;
818   ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
819 
820   // NOTE: Assume that a pointer won't get split into more than one VT.
821   assert(ValueVTs.size() == 1);
822 
823   ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()));
824   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
825   DemoteArg.Flags[0].setSRet();
826   SplitArgs.insert(SplitArgs.begin(), DemoteArg);
827 }
828 
829 void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
830                                               const CallBase &CB,
831                                               CallLoweringInfo &Info) const {
832   const DataLayout &DL = MIRBuilder.getDataLayout();
833   Type *RetTy = CB.getType();
834   unsigned AS = DL.getAllocaAddrSpace();
835   LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
836 
837   int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
838       DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
839 
840   Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
841   ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS));
842   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
843   DemoteArg.Flags[0].setSRet();
844 
845   Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
846   Info.DemoteStackIndex = FI;
847   Info.DemoteRegister = DemoteReg;
848 }
849 
850 bool CallLowering::checkReturn(CCState &CCInfo,
851                                SmallVectorImpl<BaseArgInfo> &Outs,
852                                CCAssignFn *Fn) const {
853   for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
854     MVT VT = MVT::getVT(Outs[I].Ty);
855     if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
856       return false;
857   }
858   return true;
859 }
860 
861 void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
862                                  AttributeList Attrs,
863                                  SmallVectorImpl<BaseArgInfo> &Outs,
864                                  const DataLayout &DL) const {
865   LLVMContext &Context = RetTy->getContext();
866   ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
867 
868   SmallVector<EVT, 4> SplitVTs;
869   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
870   addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
871 
872   for (EVT VT : SplitVTs) {
873     unsigned NumParts =
874         TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
875     MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
876     Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
877 
878     for (unsigned I = 0; I < NumParts; ++I) {
879       Outs.emplace_back(PartTy, Flags);
880     }
881   }
882 }
883 
884 bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
885   const auto &F = MF.getFunction();
886   Type *ReturnType = F.getReturnType();
887   CallingConv::ID CallConv = F.getCallingConv();
888 
889   SmallVector<BaseArgInfo, 4> SplitArgs;
890   getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
891                 MF.getDataLayout());
892   return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
893 }
894 
895 bool CallLowering::parametersInCSRMatch(
896     const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
897     const SmallVectorImpl<CCValAssign> &OutLocs,
898     const SmallVectorImpl<ArgInfo> &OutArgs) const {
899   for (unsigned i = 0; i < OutLocs.size(); ++i) {
900     auto &ArgLoc = OutLocs[i];
901     // If it's not a register, it's fine.
902     if (!ArgLoc.isRegLoc())
903       continue;
904 
905     MCRegister PhysReg = ArgLoc.getLocReg();
906 
907     // Only look at callee-saved registers.
908     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
909       continue;
910 
911     LLVM_DEBUG(
912         dbgs()
913         << "... Call has an argument passed in a callee-saved register.\n");
914 
915     // Check if it was copied from.
916     const ArgInfo &OutInfo = OutArgs[i];
917 
918     if (OutInfo.Regs.size() > 1) {
919       LLVM_DEBUG(
920           dbgs() << "... Cannot handle arguments in multiple registers.\n");
921       return false;
922     }
923 
924     // Check if we copy the register, walking through copies from virtual
925     // registers. Note that getDefIgnoringCopies does not ignore copies from
926     // physical registers.
927     MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
928     if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
929       LLVM_DEBUG(
930           dbgs()
931           << "... Parameter was not copied into a VReg, cannot tail call.\n");
932       return false;
933     }
934 
935     // Got a copy. Verify that it's the same as the register we want.
936     Register CopyRHS = RegDef->getOperand(1).getReg();
937     if (CopyRHS != PhysReg) {
938       LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
939                            "VReg, cannot tail call.\n");
940       return false;
941     }
942   }
943 
944   return true;
945 }
946 
947 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
948                                      MachineFunction &MF,
949                                      SmallVectorImpl<ArgInfo> &InArgs,
950                                      ValueAssigner &CalleeAssigner,
951                                      ValueAssigner &CallerAssigner) const {
952   const Function &F = MF.getFunction();
953   CallingConv::ID CalleeCC = Info.CallConv;
954   CallingConv::ID CallerCC = F.getCallingConv();
955 
956   if (CallerCC == CalleeCC)
957     return true;
958 
959   SmallVector<CCValAssign, 16> ArgLocs1;
960   CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
961   if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
962     return false;
963 
964   SmallVector<CCValAssign, 16> ArgLocs2;
965   CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
966   if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
967     return false;
968 
969   // We need the argument locations to match up exactly. If there's more in
970   // one than the other, then we are done.
971   if (ArgLocs1.size() != ArgLocs2.size())
972     return false;
973 
974   // Make sure that each location is passed in exactly the same way.
975   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
976     const CCValAssign &Loc1 = ArgLocs1[i];
977     const CCValAssign &Loc2 = ArgLocs2[i];
978 
979     // We need both of them to be the same. So if one is a register and one
980     // isn't, we're done.
981     if (Loc1.isRegLoc() != Loc2.isRegLoc())
982       return false;
983 
984     if (Loc1.isRegLoc()) {
985       // If they don't have the same register location, we're done.
986       if (Loc1.getLocReg() != Loc2.getLocReg())
987         return false;
988 
989       // They matched, so we can move to the next ArgLoc.
990       continue;
991     }
992 
993     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
994     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
995       return false;
996   }
997 
998   return true;
999 }
1000 
1001 uint64_t CallLowering::ValueHandler::getStackValueStoreSize(
1002     const DataLayout &DL, const CCValAssign &VA) const {
1003   const EVT ValVT = VA.getValVT();
1004   if (ValVT != MVT::iPTR)
1005     return ValVT.getStoreSize();
1006 
1007   /// FIXME: We need to get the correct pointer address space.
1008   return DL.getPointerSize();
1009 }
1010 
1011 void CallLowering::ValueHandler::copyArgumentMemory(
1012     const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1013     const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1014     const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1015     CCValAssign &VA) const {
1016   MachineFunction &MF = MIRBuilder.getMF();
1017   MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1018       SrcPtrInfo,
1019       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
1020       SrcAlign);
1021 
1022   MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1023       DstPtrInfo,
1024       MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1025       MemSize, DstAlign);
1026 
1027   const LLT PtrTy = MRI.getType(DstPtr);
1028   const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1029 
1030   auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1031   MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1032 }
1033 
1034 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1035                                                     CCValAssign &VA,
1036                                                     unsigned MaxSizeBits) {
1037   LLT LocTy{VA.getLocVT()};
1038   LLT ValTy{VA.getValVT()};
1039 
1040   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1041     return ValReg;
1042 
1043   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1044     if (MaxSizeBits <= ValTy.getSizeInBits())
1045       return ValReg;
1046     LocTy = LLT::scalar(MaxSizeBits);
1047   }
1048 
1049   switch (VA.getLocInfo()) {
1050   default: break;
1051   case CCValAssign::Full:
1052   case CCValAssign::BCvt:
1053     // FIXME: bitconverting between vector types may or may not be a
1054     // nop in big-endian situations.
1055     return ValReg;
1056   case CCValAssign::AExt: {
1057     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1058     return MIB.getReg(0);
1059   }
1060   case CCValAssign::SExt: {
1061     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1062     MIRBuilder.buildSExt(NewReg, ValReg);
1063     return NewReg;
1064   }
1065   case CCValAssign::ZExt: {
1066     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1067     MIRBuilder.buildZExt(NewReg, ValReg);
1068     return NewReg;
1069   }
1070   }
1071   llvm_unreachable("unable to extend register");
1072 }
1073 
1074 void CallLowering::ValueAssigner::anchor() {}
1075 
1076 Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
1077                                                                 Register SrcReg,
1078                                                                 LLT NarrowTy) {
1079   switch (VA.getLocInfo()) {
1080   case CCValAssign::LocInfo::ZExt: {
1081     return MIRBuilder
1082         .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1083                          NarrowTy.getScalarSizeInBits())
1084         .getReg(0);
1085   }
1086   case CCValAssign::LocInfo::SExt: {
1087     return MIRBuilder
1088         .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1089                          NarrowTy.getScalarSizeInBits())
1090         .getReg(0);
1091     break;
1092   }
1093   default:
1094     return SrcReg;
1095   }
1096 }
1097 
1098 /// Check if we can use a basic COPY instruction between the two types.
1099 ///
1100 /// We're currently building on top of the infrastructure using MVT, which loses
1101 /// pointer information in the CCValAssign. We accept copies from physical
1102 /// registers that have been reported as integers if it's to an equivalent sized
1103 /// pointer LLT.
1104 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1105   if (SrcTy == DstTy)
1106     return true;
1107 
1108   if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1109     return false;
1110 
1111   SrcTy = SrcTy.getScalarType();
1112   DstTy = DstTy.getScalarType();
1113 
1114   return (SrcTy.isPointer() && DstTy.isScalar()) ||
1115          (DstTy.isScalar() && SrcTy.isPointer());
1116 }
1117 
1118 void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg,
1119                                                           Register PhysReg,
1120                                                           CCValAssign &VA) {
1121   const MVT LocVT = VA.getLocVT();
1122   const LLT LocTy(LocVT);
1123   const LLT RegTy = MRI.getType(ValVReg);
1124 
1125   if (isCopyCompatibleType(RegTy, LocTy)) {
1126     MIRBuilder.buildCopy(ValVReg, PhysReg);
1127     return;
1128   }
1129 
1130   auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1131   auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1132   MIRBuilder.buildTrunc(ValVReg, Hint);
1133 }
1134