1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 /// Helper function which updates \p Flags when \p AttrFn returns true.
34 static void
35 addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
36                     const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37   if (AttrFn(Attribute::SExt))
38     Flags.setSExt();
39   if (AttrFn(Attribute::ZExt))
40     Flags.setZExt();
41   if (AttrFn(Attribute::InReg))
42     Flags.setInReg();
43   if (AttrFn(Attribute::StructRet))
44     Flags.setSRet();
45   if (AttrFn(Attribute::Nest))
46     Flags.setNest();
47   if (AttrFn(Attribute::ByVal))
48     Flags.setByVal();
49   if (AttrFn(Attribute::Preallocated))
50     Flags.setPreallocated();
51   if (AttrFn(Attribute::InAlloca))
52     Flags.setInAlloca();
53   if (AttrFn(Attribute::Returned))
54     Flags.setReturned();
55   if (AttrFn(Attribute::SwiftSelf))
56     Flags.setSwiftSelf();
57   if (AttrFn(Attribute::SwiftError))
58     Flags.setSwiftError();
59 }
60 
61 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
62                                                      unsigned ArgIdx) const {
63   ISD::ArgFlagsTy Flags;
64   addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
65     return Call.paramHasAttr(ArgIdx, Attr);
66   });
67   return Flags;
68 }
69 
70 void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
71                                              const AttributeList &Attrs,
72                                              unsigned OpIdx) const {
73   addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
74     return Attrs.hasAttribute(OpIdx, Attr);
75   });
76 }
77 
78 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
79                              ArrayRef<Register> ResRegs,
80                              ArrayRef<ArrayRef<Register>> ArgRegs,
81                              Register SwiftErrorVReg,
82                              std::function<unsigned()> GetCalleeReg) const {
83   CallLoweringInfo Info;
84   const DataLayout &DL = MIRBuilder.getDataLayout();
85   MachineFunction &MF = MIRBuilder.getMF();
86   bool CanBeTailCalled = CB.isTailCall() &&
87                          isInTailCallPosition(CB, MF.getTarget()) &&
88                          (MF.getFunction()
89                               .getFnAttribute("disable-tail-calls")
90                               .getValueAsString() != "true");
91 
92   CallingConv::ID CallConv = CB.getCallingConv();
93   Type *RetTy = CB.getType();
94   bool IsVarArg = CB.getFunctionType()->isVarArg();
95 
96   SmallVector<BaseArgInfo, 4> SplitArgs;
97   getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
98   Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
99 
100   if (!Info.CanLowerReturn) {
101     // Callee requires sret demotion.
102     insertSRetOutgoingArgument(MIRBuilder, CB, Info);
103 
104     // The sret demotion isn't compatible with tail-calls, since the sret
105     // argument points into the caller's stack frame.
106     CanBeTailCalled = false;
107   }
108 
109   // First step is to marshall all the function's parameters into the correct
110   // physregs and memory locations. Gather the sequence of argument types that
111   // we'll pass to the assigner function.
112   unsigned i = 0;
113   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
114   for (auto &Arg : CB.args()) {
115     ArgInfo OrigArg{ArgRegs[i], *Arg.get(), getAttributesForArgIdx(CB, i),
116                     i < NumFixedArgs};
117     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
118 
119     // If we have an explicit sret argument that is an Instruction, (i.e., it
120     // might point to function-local memory), we can't meaningfully tail-call.
121     if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
122       CanBeTailCalled = false;
123 
124     Info.OrigArgs.push_back(OrigArg);
125     ++i;
126   }
127 
128   // Try looking through a bitcast from one function type to another.
129   // Commonly happens with calls to objc_msgSend().
130   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
131   if (const Function *F = dyn_cast<Function>(CalleeV))
132     Info.Callee = MachineOperand::CreateGA(F, 0);
133   else
134     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
135 
136   Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}};
137   if (!Info.OrigRet.Ty->isVoidTy())
138     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
139 
140   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
141   Info.CallConv = CallConv;
142   Info.SwiftErrorVReg = SwiftErrorVReg;
143   Info.IsMustTailCall = CB.isMustTailCall();
144   Info.IsTailCall = CanBeTailCalled;
145   Info.IsVarArg = IsVarArg;
146   return lowerCall(MIRBuilder, Info);
147 }
148 
149 template <typename FuncInfoTy>
150 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
151                                const DataLayout &DL,
152                                const FuncInfoTy &FuncInfo) const {
153   auto &Flags = Arg.Flags[0];
154   const AttributeList &Attrs = FuncInfo.getAttributes();
155   addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
156 
157   Align MemAlign;
158   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
159     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
160 
161     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
162     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
163 
164     // For ByVal, alignment should be passed from FE.  BE will guess if
165     // this info is not there but there are cases it cannot get right.
166     if (auto ParamAlign = FuncInfo.getParamStackAlign(OpIdx - 1))
167       MemAlign = *ParamAlign;
168     else if ((ParamAlign = FuncInfo.getParamAlign(OpIdx - 1)))
169       MemAlign = *ParamAlign;
170     else
171       MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
172   } else if (auto ParamAlign = FuncInfo.getParamStackAlign(OpIdx - 1)) {
173     MemAlign = *ParamAlign;
174   } else {
175     MemAlign = Align(DL.getABITypeAlign(Arg.Ty));
176   }
177   Flags.setMemAlign(MemAlign);
178   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
179 
180   // Don't try to use the returned attribute if the argument is marked as
181   // swiftself, since it won't be passed in x0.
182   if (Flags.isSwiftSelf())
183     Flags.setReturned(false);
184 }
185 
186 template void
187 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
188                                     const DataLayout &DL,
189                                     const Function &FuncInfo) const;
190 
191 template void
192 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
193                                     const DataLayout &DL,
194                                     const CallBase &FuncInfo) const;
195 
196 void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
197                                      SmallVectorImpl<ArgInfo> &SplitArgs,
198                                      const DataLayout &DL,
199                                      CallingConv::ID CallConv) const {
200   LLVMContext &Ctx = OrigArg.Ty->getContext();
201 
202   SmallVector<EVT, 4> SplitVTs;
203   SmallVector<uint64_t, 4> Offsets;
204   ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
205 
206   if (SplitVTs.size() == 0)
207     return;
208 
209   if (SplitVTs.size() == 1) {
210     // No splitting to do, but we want to replace the original type (e.g. [1 x
211     // double] -> double).
212     SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
213                            OrigArg.Flags[0], OrigArg.IsFixed,
214                            OrigArg.OrigValue);
215     return;
216   }
217 
218   // Create one ArgInfo for each virtual register in the original ArgInfo.
219   assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
220 
221   bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
222       OrigArg.Ty, CallConv, false);
223   for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
224     Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
225     SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
226                            OrigArg.IsFixed);
227     if (NeedsRegBlock)
228       SplitArgs.back().Flags[0].setInConsecutiveRegs();
229   }
230 
231   SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
232 }
233 
234 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
235                               Type *PackedTy,
236                               MachineIRBuilder &MIRBuilder) const {
237   assert(DstRegs.size() > 1 && "Nothing to unpack");
238 
239   const DataLayout &DL = MIRBuilder.getDataLayout();
240 
241   SmallVector<LLT, 8> LLTs;
242   SmallVector<uint64_t, 8> Offsets;
243   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
244   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
245 
246   for (unsigned i = 0; i < DstRegs.size(); ++i)
247     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
248 }
249 
250 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
251 static MachineInstrBuilder
252 mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
253                             ArrayRef<Register> SrcRegs) {
254   MachineRegisterInfo &MRI = *B.getMRI();
255   LLT LLTy = MRI.getType(DstRegs[0]);
256   LLT PartLLT = MRI.getType(SrcRegs[0]);
257 
258   // Deal with v3s16 split into v2s16
259   LLT LCMTy = getLCMType(LLTy, PartLLT);
260   if (LCMTy == LLTy) {
261     // Common case where no padding is needed.
262     assert(DstRegs.size() == 1);
263     return B.buildConcatVectors(DstRegs[0], SrcRegs);
264   }
265 
266   // We need to create an unmerge to the result registers, which may require
267   // widening the original value.
268   Register UnmergeSrcReg;
269   if (LCMTy != PartLLT) {
270     // e.g. A <3 x s16> value was split to <2 x s16>
271     // %register_value0:_(<2 x s16>)
272     // %register_value1:_(<2 x s16>)
273     // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
274     // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
275     // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
276     const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
277     Register Undef = B.buildUndef(PartLLT).getReg(0);
278 
279     // Build vector of undefs.
280     SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
281 
282     // Replace the first sources with the real registers.
283     std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
284     UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
285   } else {
286     // We don't need to widen anything if we're extracting a scalar which was
287     // promoted to a vector e.g. s8 -> v4s8 -> s8
288     assert(SrcRegs.size() == 1);
289     UnmergeSrcReg = SrcRegs[0];
290   }
291 
292   int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
293 
294   SmallVector<Register, 8> PadDstRegs(NumDst);
295   std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
296 
297   // Create the excess dead defs for the unmerge.
298   for (int I = DstRegs.size(); I != NumDst; ++I)
299     PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
300 
301   return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
302 }
303 
304 /// Create a sequence of instructions to combine pieces split into register
305 /// typed values to the original IR value. \p OrigRegs contains the destination
306 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
307 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
308 static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
309                               ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT) {
310   MachineRegisterInfo &MRI = *B.getMRI();
311 
312   // We could just insert a regular copy, but this is unreachable at the moment.
313   assert(LLTy != PartLLT && "identical part types shouldn't reach here");
314 
315   if (PartLLT.isVector() == LLTy.isVector() &&
316       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits()) {
317     assert(OrigRegs.size() == 1 && Regs.size() == 1);
318     B.buildTrunc(OrigRegs[0], Regs[0]);
319     return;
320   }
321 
322   if (!LLTy.isVector() && !PartLLT.isVector()) {
323     assert(OrigRegs.size() == 1);
324     LLT OrigTy = MRI.getType(OrigRegs[0]);
325 
326     unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size();
327     if (SrcSize == OrigTy.getSizeInBits())
328       B.buildMerge(OrigRegs[0], Regs);
329     else {
330       auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
331       B.buildTrunc(OrigRegs[0], Widened);
332     }
333 
334     return;
335   }
336 
337   if (PartLLT.isVector()) {
338     assert(OrigRegs.size() == 1 &&
339            LLTy.getScalarType() == PartLLT.getElementType());
340     mergeVectorRegsToResultRegs(B, OrigRegs, Regs);
341     return;
342   }
343 
344   assert(LLTy.isVector() && !PartLLT.isVector());
345 
346   LLT DstEltTy = LLTy.getElementType();
347 
348   // Pointer information was discarded. We'll need to coerce some register types
349   // to avoid violating type constraints.
350   LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
351 
352   assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
353 
354   if (DstEltTy == PartLLT) {
355     // Vector was trivially scalarized.
356 
357     if (RealDstEltTy.isPointer()) {
358       for (Register Reg : Regs)
359         MRI.setType(Reg, RealDstEltTy);
360     }
361 
362     B.buildBuildVector(OrigRegs[0], Regs);
363   } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
364     // Deal with vector with 64-bit elements decomposed to 32-bit
365     // registers. Need to create intermediate 64-bit elements.
366     SmallVector<Register, 8> EltMerges;
367     int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
368 
369     assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
370 
371     for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
372       auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
373       // Fix the type in case this is really a vector of pointers.
374       MRI.setType(Merge.getReg(0), RealDstEltTy);
375       EltMerges.push_back(Merge.getReg(0));
376       Regs = Regs.drop_front(PartsPerElt);
377     }
378 
379     B.buildBuildVector(OrigRegs[0], EltMerges);
380   } else {
381     // Vector was split, and elements promoted to a wider type.
382     // FIXME: Should handle floating point promotions.
383     LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
384     auto BV = B.buildBuildVector(BVType, Regs);
385     B.buildTrunc(OrigRegs[0], BV);
386   }
387 }
388 
389 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
390 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
391 /// contain the type of scalar value extension if necessary.
392 ///
393 /// This is used for outgoing values (vregs to physregs)
394 static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
395                             Register SrcReg, LLT SrcTy, LLT PartTy,
396                             unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
397   // We could just insert a regular copy, but this is unreachable at the moment.
398   assert(SrcTy != PartTy && "identical part types shouldn't reach here");
399 
400   const unsigned PartSize = PartTy.getSizeInBits();
401 
402   if (PartTy.isVector() == SrcTy.isVector() &&
403       PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
404     assert(DstRegs.size() == 1);
405     B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
406     return;
407   }
408 
409   if (SrcTy.isVector() && !PartTy.isVector() &&
410       PartSize > SrcTy.getElementType().getSizeInBits()) {
411     // Vector was scalarized, and the elements extended.
412     auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
413     for (int i = 0, e = DstRegs.size(); i != e; ++i)
414       B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
415     return;
416   }
417 
418   LLT GCDTy = getGCDType(SrcTy, PartTy);
419   if (GCDTy == PartTy) {
420     // If this already evenly divisible, we can create a simple unmerge.
421     B.buildUnmerge(DstRegs, SrcReg);
422     return;
423   }
424 
425   MachineRegisterInfo &MRI = *B.getMRI();
426   LLT DstTy = MRI.getType(DstRegs[0]);
427   LLT LCMTy = getLCMType(SrcTy, PartTy);
428 
429   const unsigned LCMSize = LCMTy.getSizeInBits();
430   const unsigned DstSize = DstTy.getSizeInBits();
431   const unsigned SrcSize = SrcTy.getSizeInBits();
432 
433   Register UnmergeSrc = SrcReg;
434   if (LCMSize != SrcSize) {
435     // Widen to the common type.
436     Register Undef = B.buildUndef(SrcTy).getReg(0);
437     SmallVector<Register, 8> MergeParts(1, SrcReg);
438     for (unsigned Size = SrcSize; Size != LCMSize; Size += SrcSize)
439       MergeParts.push_back(Undef);
440 
441     UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
442   }
443 
444   // Unmerge to the original registers and pad with dead defs.
445   SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
446   for (unsigned Size = DstSize * DstRegs.size(); Size != LCMSize;
447        Size += DstSize) {
448     UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
449   }
450 
451   B.buildUnmerge(UnmergeResults, UnmergeSrc);
452 }
453 
454 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
455                                      SmallVectorImpl<ArgInfo> &Args,
456                                      ValueHandler &Handler,
457                                      CallingConv::ID CallConv, bool IsVarArg,
458                                      Register ThisReturnReg) const {
459   MachineFunction &MF = MIRBuilder.getMF();
460   const Function &F = MF.getFunction();
461   SmallVector<CCValAssign, 16> ArgLocs;
462 
463   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
464   return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler,
465                            ThisReturnReg);
466 }
467 
468 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
469   if (Flags.isSExt())
470     return TargetOpcode::G_SEXT;
471   if (Flags.isZExt())
472     return TargetOpcode::G_ZEXT;
473   return TargetOpcode::G_ANYEXT;
474 }
475 
476 bool CallLowering::handleAssignments(CCState &CCInfo,
477                                      SmallVectorImpl<CCValAssign> &ArgLocs,
478                                      MachineIRBuilder &MIRBuilder,
479                                      SmallVectorImpl<ArgInfo> &Args,
480                                      ValueHandler &Handler,
481                                      Register ThisReturnReg) const {
482   MachineFunction &MF = MIRBuilder.getMF();
483   MachineRegisterInfo &MRI = MF.getRegInfo();
484   const Function &F = MF.getFunction();
485   const DataLayout &DL = F.getParent()->getDataLayout();
486 
487   unsigned NumArgs = Args.size();
488   for (unsigned i = 0; i != NumArgs; ++i) {
489     EVT CurVT = EVT::getEVT(Args[i].Ty);
490     if (CurVT.isSimple() &&
491         !Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(),
492                            CCValAssign::Full, Args[i], Args[i].Flags[0],
493                            CCInfo))
494       continue;
495 
496     MVT NewVT = TLI->getRegisterTypeForCallingConv(
497         F.getContext(), CCInfo.getCallingConv(), EVT(CurVT));
498 
499     // If we need to split the type over multiple regs, check it's a scenario
500     // we currently support.
501     unsigned NumParts = TLI->getNumRegistersForCallingConv(
502         F.getContext(), CCInfo.getCallingConv(), CurVT);
503 
504     if (NumParts == 1) {
505       // Try to use the register type if we couldn't assign the VT.
506       if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
507                             Args[i].Flags[0], CCInfo))
508         return false;
509 
510       // If we couldn't directly assign this part, some casting may be
511       // necessary. Create the new register, but defer inserting the conversion
512       // instructions.
513       assert(Args[i].OrigRegs.empty());
514       Args[i].OrigRegs.push_back(Args[i].Regs[0]);
515       assert(Args[i].Regs.size() == 1);
516 
517       const LLT VATy(NewVT);
518       Args[i].Regs[0] = MRI.createGenericVirtualRegister(VATy);
519       continue;
520     }
521 
522     const LLT NewLLT(NewVT);
523 
524     // For incoming arguments (physregs to vregs), we could have values in
525     // physregs (or memlocs) which we want to extract and copy to vregs.
526     // During this, we might have to deal with the LLT being split across
527     // multiple regs, so we have to record this information for later.
528     //
529     // If we have outgoing args, then we have the opposite case. We have a
530     // vreg with an LLT which we want to assign to a physical location, and
531     // we might have to record that the value has to be split later.
532     if (Handler.isIncomingArgumentHandler()) {
533       // We're handling an incoming arg which is split over multiple regs.
534       // E.g. passing an s128 on AArch64.
535       ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
536       Args[i].OrigRegs.push_back(Args[i].Regs[0]);
537       Args[i].Regs.clear();
538       Args[i].Flags.clear();
539       // For each split register, create and assign a vreg that will store
540       // the incoming component of the larger value. These will later be
541       // merged to form the final vreg.
542       for (unsigned Part = 0; Part < NumParts; ++Part) {
543         Register Reg = MRI.createGenericVirtualRegister(NewLLT);
544         ISD::ArgFlagsTy Flags = OrigFlags;
545         if (Part == 0) {
546           Flags.setSplit();
547         } else {
548           Flags.setOrigAlign(Align(1));
549           if (Part == NumParts - 1)
550             Flags.setSplitEnd();
551         }
552         Args[i].Regs.push_back(Reg);
553         Args[i].Flags.push_back(Flags);
554         if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
555                               Args[i].Flags[Part], CCInfo)) {
556           // Still couldn't assign this smaller part type for some reason.
557           return false;
558         }
559       }
560     } else {
561       assert(Args[i].Regs.size() == 1);
562 
563       // This type is passed via multiple registers in the calling convention.
564       // We need to extract the individual parts.
565       assert(Args[i].OrigRegs.empty());
566       Args[i].OrigRegs.push_back(Args[i].Regs[0]);
567 
568       ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
569       // We're going to replace the regs and flags with the split ones.
570       Args[i].Regs.clear();
571       Args[i].Flags.clear();
572       for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
573         ISD::ArgFlagsTy Flags = OrigFlags;
574         if (PartIdx == 0) {
575           Flags.setSplit();
576         } else {
577           Flags.setOrigAlign(Align(1));
578           if (PartIdx == NumParts - 1)
579             Flags.setSplitEnd();
580         }
581 
582         // TODO: Also check if there is a valid extension that preserves the
583         // bits. However currently this call lowering doesn't support non-exact
584         // split parts, so that can't be tested.
585         if (OrigFlags.isReturned() &&
586             (NumParts * NewVT.getSizeInBits() != CurVT.getSizeInBits())) {
587           Flags.setReturned(false);
588         }
589 
590         Register NewReg = MRI.createGenericVirtualRegister(NewLLT);
591 
592         Args[i].Regs.push_back(NewReg);
593         Args[i].Flags.push_back(Flags);
594         if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full,
595                               Args[i], Args[i].Flags[PartIdx], CCInfo))
596           return false;
597       }
598     }
599   }
600 
601   for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
602     assert(j < ArgLocs.size() && "Skipped too many arg locs");
603 
604     CCValAssign &VA = ArgLocs[j];
605     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
606 
607     if (VA.needsCustom()) {
608       unsigned NumArgRegs =
609           Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
610       if (!NumArgRegs)
611         return false;
612       j += NumArgRegs;
613       continue;
614     }
615 
616     EVT VAVT = VA.getValVT();
617     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
618     const LLT VATy(VAVT.getSimpleVT());
619 
620     // Expected to be multiple regs for a single incoming arg.
621     // There should be Regs.size() ArgLocs per argument.
622     unsigned NumArgRegs = Args[i].Regs.size();
623     assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
624            "Too many regs for number of args");
625 
626     // Coerce into outgoing value types before register assignment.
627     if (!Handler.isIncomingArgumentHandler() && OrigTy != VATy) {
628       assert(Args[i].OrigRegs.size() == 1);
629       buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
630                       VATy, extendOpFromFlags(Args[i].Flags[0]));
631     }
632 
633     for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
634       Register ArgReg = Args[i].Regs[Part];
635       // There should be Regs.size() ArgLocs per argument.
636       VA = ArgLocs[j + Part];
637       const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
638 
639       if (VA.isMemLoc() && !Flags.isByVal()) {
640         // Individual pieces may have been spilled to the stack and others
641         // passed in registers.
642 
643         // FIXME: Use correct address space for pointer size
644         EVT LocVT = VA.getValVT();
645         unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize()
646                                               : LocVT.getStoreSize();
647         unsigned Offset = VA.getLocMemOffset();
648         MachinePointerInfo MPO;
649         Register StackAddr =
650             Handler.getStackAddress(MemSize, Offset, MPO, Flags);
651         Handler.assignValueToAddress(Args[i], Part, StackAddr, MemSize, MPO,
652                                      VA);
653         continue;
654       }
655 
656       if (VA.isMemLoc() && Flags.isByVal()) {
657         assert(Args[i].Regs.size() == 1 &&
658                "didn't expect split byval pointer");
659 
660         if (Handler.isIncomingArgumentHandler()) {
661           // We just need to copy the frame index value to the pointer.
662           MachinePointerInfo MPO;
663           Register StackAddr = Handler.getStackAddress(
664               Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
665           MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
666         } else {
667           // For outgoing byval arguments, insert the implicit copy byval
668           // implies, such that writes in the callee do not modify the caller's
669           // value.
670           uint64_t MemSize = Flags.getByValSize();
671           int64_t Offset = VA.getLocMemOffset();
672 
673           MachinePointerInfo DstMPO;
674           Register StackAddr =
675               Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
676 
677           MachinePointerInfo SrcMPO(Args[i].OrigValue);
678           if (!Args[i].OrigValue) {
679             // We still need to accurately track the stack address space if we
680             // don't know the underlying value.
681             const LLT PtrTy = MRI.getType(StackAddr);
682             SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
683           }
684 
685           Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
686                                     inferAlignFromPtrInfo(MF, DstMPO));
687 
688           Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
689                                     inferAlignFromPtrInfo(MF, SrcMPO));
690 
691           Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
692                                      DstMPO, DstAlign, SrcMPO, SrcAlign,
693                                      MemSize, VA);
694         }
695         continue;
696       }
697 
698       assert(!VA.needsCustom() && "custom loc should have been handled already");
699 
700       if (i == 0 && ThisReturnReg.isValid() &&
701           Handler.isIncomingArgumentHandler() &&
702           isTypeIsValidForThisReturn(VAVT)) {
703         Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
704         continue;
705       }
706 
707       Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
708     }
709 
710     // Now that all pieces have been assigned, re-pack the register typed values
711     // into the original value typed registers.
712     if (Handler.isIncomingArgumentHandler() && OrigTy != VATy) {
713       // Merge the split registers into the expected larger result vregs of
714       // the original call.
715       buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
716                         VATy);
717     }
718 
719     j += NumArgRegs - 1;
720   }
721 
722   return true;
723 }
724 
725 void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
726                                    ArrayRef<Register> VRegs, Register DemoteReg,
727                                    int FI) const {
728   MachineFunction &MF = MIRBuilder.getMF();
729   MachineRegisterInfo &MRI = MF.getRegInfo();
730   const DataLayout &DL = MF.getDataLayout();
731 
732   SmallVector<EVT, 4> SplitVTs;
733   SmallVector<uint64_t, 4> Offsets;
734   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
735 
736   assert(VRegs.size() == SplitVTs.size());
737 
738   unsigned NumValues = SplitVTs.size();
739   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
740   Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
741   LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
742 
743   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
744 
745   for (unsigned I = 0; I < NumValues; ++I) {
746     Register Addr;
747     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
748     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
749                                         MRI.getType(VRegs[I]).getSizeInBytes(),
750                                         commonAlignment(BaseAlign, Offsets[I]));
751     MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
752   }
753 }
754 
755 void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
756                                     ArrayRef<Register> VRegs,
757                                     Register DemoteReg) const {
758   MachineFunction &MF = MIRBuilder.getMF();
759   MachineRegisterInfo &MRI = MF.getRegInfo();
760   const DataLayout &DL = MF.getDataLayout();
761 
762   SmallVector<EVT, 4> SplitVTs;
763   SmallVector<uint64_t, 4> Offsets;
764   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
765 
766   assert(VRegs.size() == SplitVTs.size());
767 
768   unsigned NumValues = SplitVTs.size();
769   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
770   unsigned AS = DL.getAllocaAddrSpace();
771   LLT OffsetLLTy =
772       getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
773 
774   MachinePointerInfo PtrInfo(AS);
775 
776   for (unsigned I = 0; I < NumValues; ++I) {
777     Register Addr;
778     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
779     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
780                                         MRI.getType(VRegs[I]).getSizeInBytes(),
781                                         commonAlignment(BaseAlign, Offsets[I]));
782     MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
783   }
784 }
785 
786 void CallLowering::insertSRetIncomingArgument(
787     const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
788     MachineRegisterInfo &MRI, const DataLayout &DL) const {
789   unsigned AS = DL.getAllocaAddrSpace();
790   DemoteReg = MRI.createGenericVirtualRegister(
791       LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
792 
793   Type *PtrTy = PointerType::get(F.getReturnType(), AS);
794 
795   SmallVector<EVT, 1> ValueVTs;
796   ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
797 
798   // NOTE: Assume that a pointer won't get split into more than one VT.
799   assert(ValueVTs.size() == 1);
800 
801   ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()));
802   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
803   DemoteArg.Flags[0].setSRet();
804   SplitArgs.insert(SplitArgs.begin(), DemoteArg);
805 }
806 
807 void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
808                                               const CallBase &CB,
809                                               CallLoweringInfo &Info) const {
810   const DataLayout &DL = MIRBuilder.getDataLayout();
811   Type *RetTy = CB.getType();
812   unsigned AS = DL.getAllocaAddrSpace();
813   LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
814 
815   int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
816       DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
817 
818   Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
819   ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS));
820   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
821   DemoteArg.Flags[0].setSRet();
822 
823   Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
824   Info.DemoteStackIndex = FI;
825   Info.DemoteRegister = DemoteReg;
826 }
827 
828 bool CallLowering::checkReturn(CCState &CCInfo,
829                                SmallVectorImpl<BaseArgInfo> &Outs,
830                                CCAssignFn *Fn) const {
831   for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
832     MVT VT = MVT::getVT(Outs[I].Ty);
833     if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
834       return false;
835   }
836   return true;
837 }
838 
839 void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
840                                  AttributeList Attrs,
841                                  SmallVectorImpl<BaseArgInfo> &Outs,
842                                  const DataLayout &DL) const {
843   LLVMContext &Context = RetTy->getContext();
844   ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
845 
846   SmallVector<EVT, 4> SplitVTs;
847   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
848   addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
849 
850   for (EVT VT : SplitVTs) {
851     unsigned NumParts =
852         TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
853     MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
854     Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
855 
856     for (unsigned I = 0; I < NumParts; ++I) {
857       Outs.emplace_back(PartTy, Flags);
858     }
859   }
860 }
861 
862 bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
863   const auto &F = MF.getFunction();
864   Type *ReturnType = F.getReturnType();
865   CallingConv::ID CallConv = F.getCallingConv();
866 
867   SmallVector<BaseArgInfo, 4> SplitArgs;
868   getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
869                 MF.getDataLayout());
870   return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
871 }
872 
873 bool CallLowering::analyzeArgInfo(CCState &CCState,
874                                   SmallVectorImpl<ArgInfo> &Args,
875                                   CCAssignFn &AssignFnFixed,
876                                   CCAssignFn &AssignFnVarArg) const {
877   for (unsigned i = 0, e = Args.size(); i < e; ++i) {
878     MVT VT = MVT::getVT(Args[i].Ty);
879     CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
880     if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
881       // Bail out on anything we can't handle.
882       LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
883                         << " (arg number = " << i << "\n");
884       return false;
885     }
886   }
887   return true;
888 }
889 
890 bool CallLowering::parametersInCSRMatch(
891     const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
892     const SmallVectorImpl<CCValAssign> &OutLocs,
893     const SmallVectorImpl<ArgInfo> &OutArgs) const {
894   for (unsigned i = 0; i < OutLocs.size(); ++i) {
895     auto &ArgLoc = OutLocs[i];
896     // If it's not a register, it's fine.
897     if (!ArgLoc.isRegLoc())
898       continue;
899 
900     MCRegister PhysReg = ArgLoc.getLocReg();
901 
902     // Only look at callee-saved registers.
903     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
904       continue;
905 
906     LLVM_DEBUG(
907         dbgs()
908         << "... Call has an argument passed in a callee-saved register.\n");
909 
910     // Check if it was copied from.
911     const ArgInfo &OutInfo = OutArgs[i];
912 
913     if (OutInfo.Regs.size() > 1) {
914       LLVM_DEBUG(
915           dbgs() << "... Cannot handle arguments in multiple registers.\n");
916       return false;
917     }
918 
919     // Check if we copy the register, walking through copies from virtual
920     // registers. Note that getDefIgnoringCopies does not ignore copies from
921     // physical registers.
922     MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
923     if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
924       LLVM_DEBUG(
925           dbgs()
926           << "... Parameter was not copied into a VReg, cannot tail call.\n");
927       return false;
928     }
929 
930     // Got a copy. Verify that it's the same as the register we want.
931     Register CopyRHS = RegDef->getOperand(1).getReg();
932     if (CopyRHS != PhysReg) {
933       LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
934                            "VReg, cannot tail call.\n");
935       return false;
936     }
937   }
938 
939   return true;
940 }
941 
942 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
943                                      MachineFunction &MF,
944                                      SmallVectorImpl<ArgInfo> &InArgs,
945                                      CCAssignFn &CalleeAssignFnFixed,
946                                      CCAssignFn &CalleeAssignFnVarArg,
947                                      CCAssignFn &CallerAssignFnFixed,
948                                      CCAssignFn &CallerAssignFnVarArg) const {
949   const Function &F = MF.getFunction();
950   CallingConv::ID CalleeCC = Info.CallConv;
951   CallingConv::ID CallerCC = F.getCallingConv();
952 
953   if (CallerCC == CalleeCC)
954     return true;
955 
956   SmallVector<CCValAssign, 16> ArgLocs1;
957   CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
958   if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
959                       CalleeAssignFnVarArg))
960     return false;
961 
962   SmallVector<CCValAssign, 16> ArgLocs2;
963   CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
964   if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
965                       CalleeAssignFnVarArg))
966     return false;
967 
968   // We need the argument locations to match up exactly. If there's more in
969   // one than the other, then we are done.
970   if (ArgLocs1.size() != ArgLocs2.size())
971     return false;
972 
973   // Make sure that each location is passed in exactly the same way.
974   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
975     const CCValAssign &Loc1 = ArgLocs1[i];
976     const CCValAssign &Loc2 = ArgLocs2[i];
977 
978     // We need both of them to be the same. So if one is a register and one
979     // isn't, we're done.
980     if (Loc1.isRegLoc() != Loc2.isRegLoc())
981       return false;
982 
983     if (Loc1.isRegLoc()) {
984       // If they don't have the same register location, we're done.
985       if (Loc1.getLocReg() != Loc2.getLocReg())
986         return false;
987 
988       // They matched, so we can move to the next ArgLoc.
989       continue;
990     }
991 
992     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
993     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
994       return false;
995   }
996 
997   return true;
998 }
999 
1000 void CallLowering::ValueHandler::copyArgumentMemory(
1001     const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1002     const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1003     const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1004     CCValAssign &VA) const {
1005   MachineFunction &MF = MIRBuilder.getMF();
1006   MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1007       SrcPtrInfo,
1008       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
1009       SrcAlign);
1010 
1011   MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1012       DstPtrInfo,
1013       MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1014       MemSize, DstAlign);
1015 
1016   const LLT PtrTy = MRI.getType(DstPtr);
1017   const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1018 
1019   auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1020   MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1021 }
1022 
1023 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1024                                                     CCValAssign &VA,
1025                                                     unsigned MaxSizeBits) {
1026   LLT LocTy{VA.getLocVT()};
1027   LLT ValTy = MRI.getType(ValReg);
1028   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1029     return ValReg;
1030 
1031   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1032     if (MaxSizeBits <= ValTy.getSizeInBits())
1033       return ValReg;
1034     LocTy = LLT::scalar(MaxSizeBits);
1035   }
1036 
1037   switch (VA.getLocInfo()) {
1038   default: break;
1039   case CCValAssign::Full:
1040   case CCValAssign::BCvt:
1041     // FIXME: bitconverting between vector types may or may not be a
1042     // nop in big-endian situations.
1043     return ValReg;
1044   case CCValAssign::AExt: {
1045     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1046     return MIB.getReg(0);
1047   }
1048   case CCValAssign::SExt: {
1049     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1050     MIRBuilder.buildSExt(NewReg, ValReg);
1051     return NewReg;
1052   }
1053   case CCValAssign::ZExt: {
1054     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1055     MIRBuilder.buildZExt(NewReg, ValReg);
1056     return NewReg;
1057   }
1058   }
1059   llvm_unreachable("unable to extend register");
1060 }
1061 
1062 void CallLowering::ValueHandler::anchor() {}
1063 
1064 Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
1065                                                                 Register SrcReg,
1066                                                                 LLT NarrowTy) {
1067   switch (VA.getLocInfo()) {
1068   case CCValAssign::LocInfo::ZExt: {
1069     return MIRBuilder
1070         .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1071                          NarrowTy.getScalarSizeInBits())
1072         .getReg(0);
1073   }
1074   case CCValAssign::LocInfo::SExt: {
1075     return MIRBuilder
1076         .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1077                          NarrowTy.getScalarSizeInBits())
1078         .getReg(0);
1079     break;
1080   }
1081   default:
1082     return SrcReg;
1083   }
1084 }
1085 
1086 void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg,
1087                                                           Register PhysReg,
1088                                                           CCValAssign &VA) {
1089   const LLT LocTy(VA.getLocVT());
1090   const LLT ValTy = MRI.getType(ValVReg);
1091 
1092   if (ValTy.getSizeInBits() == LocTy.getSizeInBits()) {
1093     MIRBuilder.buildCopy(ValVReg, PhysReg);
1094     return;
1095   }
1096 
1097   auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1098   auto Hint = buildExtensionHint(VA, Copy.getReg(0), ValTy);
1099   MIRBuilder.buildTrunc(ValVReg, Hint);
1100 }
1101