1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 /// Helper function which updates \p Flags when \p AttrFn returns true.
34 static void
35 addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
36                     const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37   if (AttrFn(Attribute::SExt))
38     Flags.setSExt();
39   if (AttrFn(Attribute::ZExt))
40     Flags.setZExt();
41   if (AttrFn(Attribute::InReg))
42     Flags.setInReg();
43   if (AttrFn(Attribute::StructRet))
44     Flags.setSRet();
45   if (AttrFn(Attribute::Nest))
46     Flags.setNest();
47   if (AttrFn(Attribute::ByVal))
48     Flags.setByVal();
49   if (AttrFn(Attribute::Preallocated))
50     Flags.setPreallocated();
51   if (AttrFn(Attribute::InAlloca))
52     Flags.setInAlloca();
53   if (AttrFn(Attribute::Returned))
54     Flags.setReturned();
55   if (AttrFn(Attribute::SwiftSelf))
56     Flags.setSwiftSelf();
57   if (AttrFn(Attribute::SwiftAsync))
58     Flags.setSwiftAsync();
59   if (AttrFn(Attribute::SwiftError))
60     Flags.setSwiftError();
61 }
62 
63 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
64                                                      unsigned ArgIdx) const {
65   ISD::ArgFlagsTy Flags;
66   addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
67     return Call.paramHasAttr(ArgIdx, Attr);
68   });
69   return Flags;
70 }
71 
72 void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
73                                              const AttributeList &Attrs,
74                                              unsigned OpIdx) const {
75   addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
76     return Attrs.hasAttribute(OpIdx, Attr);
77   });
78 }
79 
80 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
81                              ArrayRef<Register> ResRegs,
82                              ArrayRef<ArrayRef<Register>> ArgRegs,
83                              Register SwiftErrorVReg,
84                              std::function<unsigned()> GetCalleeReg) const {
85   CallLoweringInfo Info;
86   const DataLayout &DL = MIRBuilder.getDataLayout();
87   MachineFunction &MF = MIRBuilder.getMF();
88   bool CanBeTailCalled = CB.isTailCall() &&
89                          isInTailCallPosition(CB, MF.getTarget()) &&
90                          (MF.getFunction()
91                               .getFnAttribute("disable-tail-calls")
92                               .getValueAsString() != "true");
93 
94   CallingConv::ID CallConv = CB.getCallingConv();
95   Type *RetTy = CB.getType();
96   bool IsVarArg = CB.getFunctionType()->isVarArg();
97 
98   SmallVector<BaseArgInfo, 4> SplitArgs;
99   getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
100   Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
101 
102   if (!Info.CanLowerReturn) {
103     // Callee requires sret demotion.
104     insertSRetOutgoingArgument(MIRBuilder, CB, Info);
105 
106     // The sret demotion isn't compatible with tail-calls, since the sret
107     // argument points into the caller's stack frame.
108     CanBeTailCalled = false;
109   }
110 
111   // First step is to marshall all the function's parameters into the correct
112   // physregs and memory locations. Gather the sequence of argument types that
113   // we'll pass to the assigner function.
114   unsigned i = 0;
115   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
116   for (auto &Arg : CB.args()) {
117     ArgInfo OrigArg{ArgRegs[i], *Arg.get(), getAttributesForArgIdx(CB, i),
118                     i < NumFixedArgs};
119     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
120 
121     // If we have an explicit sret argument that is an Instruction, (i.e., it
122     // might point to function-local memory), we can't meaningfully tail-call.
123     if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
124       CanBeTailCalled = false;
125 
126     Info.OrigArgs.push_back(OrigArg);
127     ++i;
128   }
129 
130   // Try looking through a bitcast from one function type to another.
131   // Commonly happens with calls to objc_msgSend().
132   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
133   if (const Function *F = dyn_cast<Function>(CalleeV))
134     Info.Callee = MachineOperand::CreateGA(F, 0);
135   else
136     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
137 
138   Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}};
139   if (!Info.OrigRet.Ty->isVoidTy())
140     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
141 
142   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
143   Info.CallConv = CallConv;
144   Info.SwiftErrorVReg = SwiftErrorVReg;
145   Info.IsMustTailCall = CB.isMustTailCall();
146   Info.IsTailCall = CanBeTailCalled;
147   Info.IsVarArg = IsVarArg;
148   return lowerCall(MIRBuilder, Info);
149 }
150 
151 template <typename FuncInfoTy>
152 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
153                                const DataLayout &DL,
154                                const FuncInfoTy &FuncInfo) const {
155   auto &Flags = Arg.Flags[0];
156   const AttributeList &Attrs = FuncInfo.getAttributes();
157   addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
158 
159   Align MemAlign = DL.getABITypeAlign(Arg.Ty);
160   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
161     assert(OpIdx >= AttributeList::FirstArgIndex);
162     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
163 
164     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
165     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
166 
167     // For ByVal, alignment should be passed from FE.  BE will guess if
168     // this info is not there but there are cases it cannot get right.
169     if (auto ParamAlign =
170             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
171       MemAlign = *ParamAlign;
172     else if ((ParamAlign =
173                   FuncInfo.getParamAlign(OpIdx - AttributeList::FirstArgIndex)))
174       MemAlign = *ParamAlign;
175     else
176       MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
177   } else if (OpIdx >= AttributeList::FirstArgIndex) {
178     if (auto ParamAlign =
179             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
180       MemAlign = *ParamAlign;
181   }
182   Flags.setMemAlign(MemAlign);
183   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
184 
185   // Don't try to use the returned attribute if the argument is marked as
186   // swiftself, since it won't be passed in x0.
187   if (Flags.isSwiftSelf())
188     Flags.setReturned(false);
189 }
190 
191 template void
192 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
193                                     const DataLayout &DL,
194                                     const Function &FuncInfo) const;
195 
196 template void
197 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
198                                     const DataLayout &DL,
199                                     const CallBase &FuncInfo) const;
200 
201 void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
202                                      SmallVectorImpl<ArgInfo> &SplitArgs,
203                                      const DataLayout &DL,
204                                      CallingConv::ID CallConv) const {
205   LLVMContext &Ctx = OrigArg.Ty->getContext();
206 
207   SmallVector<EVT, 4> SplitVTs;
208   SmallVector<uint64_t, 4> Offsets;
209   ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
210 
211   if (SplitVTs.size() == 0)
212     return;
213 
214   if (SplitVTs.size() == 1) {
215     // No splitting to do, but we want to replace the original type (e.g. [1 x
216     // double] -> double).
217     SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
218                            OrigArg.Flags[0], OrigArg.IsFixed,
219                            OrigArg.OrigValue);
220     return;
221   }
222 
223   // Create one ArgInfo for each virtual register in the original ArgInfo.
224   assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
225 
226   bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
227       OrigArg.Ty, CallConv, false);
228   for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
229     Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
230     SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
231                            OrigArg.IsFixed);
232     if (NeedsRegBlock)
233       SplitArgs.back().Flags[0].setInConsecutiveRegs();
234   }
235 
236   SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
237 }
238 
239 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
240                               Type *PackedTy,
241                               MachineIRBuilder &MIRBuilder) const {
242   assert(DstRegs.size() > 1 && "Nothing to unpack");
243 
244   const DataLayout &DL = MIRBuilder.getDataLayout();
245 
246   SmallVector<LLT, 8> LLTs;
247   SmallVector<uint64_t, 8> Offsets;
248   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
249   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
250 
251   for (unsigned i = 0; i < DstRegs.size(); ++i)
252     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
253 }
254 
255 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
256 static MachineInstrBuilder
257 mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
258                             ArrayRef<Register> SrcRegs) {
259   MachineRegisterInfo &MRI = *B.getMRI();
260   LLT LLTy = MRI.getType(DstRegs[0]);
261   LLT PartLLT = MRI.getType(SrcRegs[0]);
262 
263   // Deal with v3s16 split into v2s16
264   LLT LCMTy = getLCMType(LLTy, PartLLT);
265   if (LCMTy == LLTy) {
266     // Common case where no padding is needed.
267     assert(DstRegs.size() == 1);
268     return B.buildConcatVectors(DstRegs[0], SrcRegs);
269   }
270 
271   // We need to create an unmerge to the result registers, which may require
272   // widening the original value.
273   Register UnmergeSrcReg;
274   if (LCMTy != PartLLT) {
275     // e.g. A <3 x s16> value was split to <2 x s16>
276     // %register_value0:_(<2 x s16>)
277     // %register_value1:_(<2 x s16>)
278     // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
279     // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
280     // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
281     const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
282     Register Undef = B.buildUndef(PartLLT).getReg(0);
283 
284     // Build vector of undefs.
285     SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
286 
287     // Replace the first sources with the real registers.
288     std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
289     UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
290   } else {
291     // We don't need to widen anything if we're extracting a scalar which was
292     // promoted to a vector e.g. s8 -> v4s8 -> s8
293     assert(SrcRegs.size() == 1);
294     UnmergeSrcReg = SrcRegs[0];
295   }
296 
297   int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
298 
299   SmallVector<Register, 8> PadDstRegs(NumDst);
300   std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
301 
302   // Create the excess dead defs for the unmerge.
303   for (int I = DstRegs.size(); I != NumDst; ++I)
304     PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
305 
306   return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
307 }
308 
309 /// Create a sequence of instructions to combine pieces split into register
310 /// typed values to the original IR value. \p OrigRegs contains the destination
311 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
312 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
313 static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
314                               ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
315                               const ISD::ArgFlagsTy Flags) {
316   MachineRegisterInfo &MRI = *B.getMRI();
317 
318   if (PartLLT == LLTy) {
319     // We should have avoided introducing a new virtual register, and just
320     // directly assigned here.
321     assert(OrigRegs[0] == Regs[0]);
322     return;
323   }
324 
325   if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
326       Regs.size() == 1) {
327     B.buildBitcast(OrigRegs[0], Regs[0]);
328     return;
329   }
330 
331   if (PartLLT.isVector() == LLTy.isVector() &&
332       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
333       OrigRegs.size() == 1 && Regs.size() == 1) {
334     Register SrcReg = Regs[0];
335 
336     LLT LocTy = MRI.getType(SrcReg);
337 
338     if (Flags.isSExt()) {
339       SrcReg = B.buildAssertSExt(LocTy, SrcReg,
340                                  LLTy.getScalarSizeInBits()).getReg(0);
341     } else if (Flags.isZExt()) {
342       SrcReg = B.buildAssertZExt(LocTy, SrcReg,
343                                  LLTy.getScalarSizeInBits()).getReg(0);
344     }
345 
346     B.buildTrunc(OrigRegs[0], SrcReg);
347     return;
348   }
349 
350   if (!LLTy.isVector() && !PartLLT.isVector()) {
351     assert(OrigRegs.size() == 1);
352     LLT OrigTy = MRI.getType(OrigRegs[0]);
353 
354     unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size();
355     if (SrcSize == OrigTy.getSizeInBits())
356       B.buildMerge(OrigRegs[0], Regs);
357     else {
358       auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
359       B.buildTrunc(OrigRegs[0], Widened);
360     }
361 
362     return;
363   }
364 
365   if (PartLLT.isVector()) {
366     assert(OrigRegs.size() == 1);
367 
368     if (LLTy.getScalarType() == PartLLT.getElementType()) {
369       mergeVectorRegsToResultRegs(B, OrigRegs, Regs);
370     } else {
371       SmallVector<Register> CastRegs(Regs.size());
372       unsigned I = 0;
373       LLT GCDTy = getGCDType(LLTy, PartLLT);
374 
375       // We are both splitting a vector, and bitcasting its element types. Cast
376       // the source pieces into the appropriate number of pieces with the result
377       // element type.
378       for (Register SrcReg : Regs)
379         CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
380       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
381     }
382 
383     return;
384   }
385 
386   assert(LLTy.isVector() && !PartLLT.isVector());
387 
388   LLT DstEltTy = LLTy.getElementType();
389 
390   // Pointer information was discarded. We'll need to coerce some register types
391   // to avoid violating type constraints.
392   LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
393 
394   assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
395 
396   if (DstEltTy == PartLLT) {
397     // Vector was trivially scalarized.
398 
399     if (RealDstEltTy.isPointer()) {
400       for (Register Reg : Regs)
401         MRI.setType(Reg, RealDstEltTy);
402     }
403 
404     B.buildBuildVector(OrigRegs[0], Regs);
405   } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
406     // Deal with vector with 64-bit elements decomposed to 32-bit
407     // registers. Need to create intermediate 64-bit elements.
408     SmallVector<Register, 8> EltMerges;
409     int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
410 
411     assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
412 
413     for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
414       auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
415       // Fix the type in case this is really a vector of pointers.
416       MRI.setType(Merge.getReg(0), RealDstEltTy);
417       EltMerges.push_back(Merge.getReg(0));
418       Regs = Regs.drop_front(PartsPerElt);
419     }
420 
421     B.buildBuildVector(OrigRegs[0], EltMerges);
422   } else {
423     // Vector was split, and elements promoted to a wider type.
424     // FIXME: Should handle floating point promotions.
425     LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
426     auto BV = B.buildBuildVector(BVType, Regs);
427     B.buildTrunc(OrigRegs[0], BV);
428   }
429 }
430 
431 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
432 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
433 /// contain the type of scalar value extension if necessary.
434 ///
435 /// This is used for outgoing values (vregs to physregs)
436 static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
437                             Register SrcReg, LLT SrcTy, LLT PartTy,
438                             unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
439   // We could just insert a regular copy, but this is unreachable at the moment.
440   assert(SrcTy != PartTy && "identical part types shouldn't reach here");
441 
442   const unsigned PartSize = PartTy.getSizeInBits();
443 
444   if (PartTy.isVector() == SrcTy.isVector() &&
445       PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
446     assert(DstRegs.size() == 1);
447     B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
448     return;
449   }
450 
451   if (SrcTy.isVector() && !PartTy.isVector() &&
452       PartSize > SrcTy.getElementType().getSizeInBits()) {
453     // Vector was scalarized, and the elements extended.
454     auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
455     for (int i = 0, e = DstRegs.size(); i != e; ++i)
456       B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
457     return;
458   }
459 
460   LLT GCDTy = getGCDType(SrcTy, PartTy);
461   if (GCDTy == PartTy) {
462     // If this already evenly divisible, we can create a simple unmerge.
463     B.buildUnmerge(DstRegs, SrcReg);
464     return;
465   }
466 
467   MachineRegisterInfo &MRI = *B.getMRI();
468   LLT DstTy = MRI.getType(DstRegs[0]);
469   LLT LCMTy = getLCMType(SrcTy, PartTy);
470 
471   const unsigned LCMSize = LCMTy.getSizeInBits();
472   const unsigned DstSize = DstTy.getSizeInBits();
473   const unsigned SrcSize = SrcTy.getSizeInBits();
474 
475   Register UnmergeSrc = SrcReg;
476   if (LCMSize != SrcSize) {
477     // Widen to the common type.
478     Register Undef = B.buildUndef(SrcTy).getReg(0);
479     SmallVector<Register, 8> MergeParts(1, SrcReg);
480     for (unsigned Size = SrcSize; Size != LCMSize; Size += SrcSize)
481       MergeParts.push_back(Undef);
482 
483     UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
484   }
485 
486   // Unmerge to the original registers and pad with dead defs.
487   SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
488   for (unsigned Size = DstSize * DstRegs.size(); Size != LCMSize;
489        Size += DstSize) {
490     UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
491   }
492 
493   B.buildUnmerge(UnmergeResults, UnmergeSrc);
494 }
495 
496 bool CallLowering::determineAndHandleAssignments(
497     ValueHandler &Handler, ValueAssigner &Assigner,
498     SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
499     CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
500   MachineFunction &MF = MIRBuilder.getMF();
501   const Function &F = MF.getFunction();
502   SmallVector<CCValAssign, 16> ArgLocs;
503 
504   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
505   if (!determineAssignments(Assigner, Args, CCInfo))
506     return false;
507 
508   return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
509                            ThisReturnReg);
510 }
511 
512 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
513   if (Flags.isSExt())
514     return TargetOpcode::G_SEXT;
515   if (Flags.isZExt())
516     return TargetOpcode::G_ZEXT;
517   return TargetOpcode::G_ANYEXT;
518 }
519 
520 bool CallLowering::determineAssignments(ValueAssigner &Assigner,
521                                         SmallVectorImpl<ArgInfo> &Args,
522                                         CCState &CCInfo) const {
523   LLVMContext &Ctx = CCInfo.getContext();
524   const CallingConv::ID CallConv = CCInfo.getCallingConv();
525 
526   unsigned NumArgs = Args.size();
527   for (unsigned i = 0; i != NumArgs; ++i) {
528     EVT CurVT = EVT::getEVT(Args[i].Ty);
529 
530     MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
531 
532     // If we need to split the type over multiple regs, check it's a scenario
533     // we currently support.
534     unsigned NumParts =
535         TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
536 
537     if (NumParts == 1) {
538       // Try to use the register type if we couldn't assign the VT.
539       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
540                              Args[i].Flags[0], CCInfo))
541         return false;
542       continue;
543     }
544 
545     // For incoming arguments (physregs to vregs), we could have values in
546     // physregs (or memlocs) which we want to extract and copy to vregs.
547     // During this, we might have to deal with the LLT being split across
548     // multiple regs, so we have to record this information for later.
549     //
550     // If we have outgoing args, then we have the opposite case. We have a
551     // vreg with an LLT which we want to assign to a physical location, and
552     // we might have to record that the value has to be split later.
553 
554     // We're handling an incoming arg which is split over multiple regs.
555     // E.g. passing an s128 on AArch64.
556     ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
557     Args[i].Flags.clear();
558 
559     for (unsigned Part = 0; Part < NumParts; ++Part) {
560       ISD::ArgFlagsTy Flags = OrigFlags;
561       if (Part == 0) {
562         Flags.setSplit();
563       } else {
564         Flags.setOrigAlign(Align(1));
565         if (Part == NumParts - 1)
566           Flags.setSplitEnd();
567       }
568 
569       if (!Assigner.isIncomingArgumentHandler()) {
570         // TODO: Also check if there is a valid extension that preserves the
571         // bits. However currently this call lowering doesn't support non-exact
572         // split parts, so that can't be tested.
573         if (OrigFlags.isReturned() &&
574             (NumParts * NewVT.getSizeInBits() != CurVT.getSizeInBits())) {
575           Flags.setReturned(false);
576         }
577       }
578 
579       Args[i].Flags.push_back(Flags);
580       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
581                              Args[i].Flags[Part], CCInfo)) {
582         // Still couldn't assign this smaller part type for some reason.
583         return false;
584       }
585     }
586   }
587 
588   return true;
589 }
590 
591 bool CallLowering::handleAssignments(ValueHandler &Handler,
592                                      SmallVectorImpl<ArgInfo> &Args,
593                                      CCState &CCInfo,
594                                      SmallVectorImpl<CCValAssign> &ArgLocs,
595                                      MachineIRBuilder &MIRBuilder,
596                                      Register ThisReturnReg) const {
597   MachineFunction &MF = MIRBuilder.getMF();
598   MachineRegisterInfo &MRI = MF.getRegInfo();
599   const Function &F = MF.getFunction();
600   const DataLayout &DL = F.getParent()->getDataLayout();
601 
602   const unsigned NumArgs = Args.size();
603 
604   for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
605     assert(j < ArgLocs.size() && "Skipped too many arg locs");
606     CCValAssign &VA = ArgLocs[j];
607     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
608 
609     if (VA.needsCustom()) {
610       unsigned NumArgRegs =
611           Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
612       if (!NumArgRegs)
613         return false;
614       j += NumArgRegs;
615       continue;
616     }
617 
618     const MVT ValVT = VA.getValVT();
619     const MVT LocVT = VA.getLocVT();
620 
621     const LLT LocTy(LocVT);
622     const LLT ValTy(ValVT);
623     const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
624     const EVT OrigVT = EVT::getEVT(Args[i].Ty);
625     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
626 
627     // Expected to be multiple regs for a single incoming arg.
628     // There should be Regs.size() ArgLocs per argument.
629     // This should be the same as getNumRegistersForCallingConv
630     const unsigned NumParts = Args[i].Flags.size();
631 
632     // Now split the registers into the assigned types.
633     Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
634 
635     if (NumParts != 1 || NewLLT != OrigTy) {
636       // If we can't directly assign the register, we need one or more
637       // intermediate values.
638       Args[i].Regs.resize(NumParts);
639 
640       // For each split register, create and assign a vreg that will store
641       // the incoming component of the larger value. These will later be
642       // merged to form the final vreg.
643       for (unsigned Part = 0; Part < NumParts; ++Part)
644         Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
645     }
646 
647     assert((j + (NumParts - 1)) < ArgLocs.size() &&
648            "Too many regs for number of args");
649 
650     // Coerce into outgoing value types before register assignment.
651     if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
652       assert(Args[i].OrigRegs.size() == 1);
653       buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
654                       ValTy, extendOpFromFlags(Args[i].Flags[0]));
655     }
656 
657     for (unsigned Part = 0; Part < NumParts; ++Part) {
658       Register ArgReg = Args[i].Regs[Part];
659       // There should be Regs.size() ArgLocs per argument.
660       VA = ArgLocs[j + Part];
661       const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
662 
663       if (VA.isMemLoc() && !Flags.isByVal()) {
664         // Individual pieces may have been spilled to the stack and others
665         // passed in registers.
666 
667         // TODO: The memory size may be larger than the value we need to
668         // store. We may need to adjust the offset for big endian targets.
669         uint64_t MemSize = Handler.getStackValueStoreSize(DL, VA);
670 
671         MachinePointerInfo MPO;
672         Register StackAddr =
673             Handler.getStackAddress(MemSize, VA.getLocMemOffset(), MPO, Flags);
674 
675         Handler.assignValueToAddress(Args[i], Part, StackAddr, MemSize, MPO,
676                                      VA);
677         continue;
678       }
679 
680       if (VA.isMemLoc() && Flags.isByVal()) {
681         assert(Args[i].Regs.size() == 1 &&
682                "didn't expect split byval pointer");
683 
684         if (Handler.isIncomingArgumentHandler()) {
685           // We just need to copy the frame index value to the pointer.
686           MachinePointerInfo MPO;
687           Register StackAddr = Handler.getStackAddress(
688               Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
689           MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
690         } else {
691           // For outgoing byval arguments, insert the implicit copy byval
692           // implies, such that writes in the callee do not modify the caller's
693           // value.
694           uint64_t MemSize = Flags.getByValSize();
695           int64_t Offset = VA.getLocMemOffset();
696 
697           MachinePointerInfo DstMPO;
698           Register StackAddr =
699               Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
700 
701           MachinePointerInfo SrcMPO(Args[i].OrigValue);
702           if (!Args[i].OrigValue) {
703             // We still need to accurately track the stack address space if we
704             // don't know the underlying value.
705             const LLT PtrTy = MRI.getType(StackAddr);
706             SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
707           }
708 
709           Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
710                                     inferAlignFromPtrInfo(MF, DstMPO));
711 
712           Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
713                                     inferAlignFromPtrInfo(MF, SrcMPO));
714 
715           Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
716                                      DstMPO, DstAlign, SrcMPO, SrcAlign,
717                                      MemSize, VA);
718         }
719         continue;
720       }
721 
722       assert(!VA.needsCustom() && "custom loc should have been handled already");
723 
724       if (i == 0 && ThisReturnReg.isValid() &&
725           Handler.isIncomingArgumentHandler() &&
726           isTypeIsValidForThisReturn(ValVT)) {
727         Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
728         continue;
729       }
730 
731       Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
732     }
733 
734     // Now that all pieces have been assigned, re-pack the register typed values
735     // into the original value typed registers.
736     if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
737       // Merge the split registers into the expected larger result vregs of
738       // the original call.
739       buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
740                         LocTy, Args[i].Flags[0]);
741     }
742 
743     j += NumParts - 1;
744   }
745 
746   return true;
747 }
748 
749 void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
750                                    ArrayRef<Register> VRegs, Register DemoteReg,
751                                    int FI) const {
752   MachineFunction &MF = MIRBuilder.getMF();
753   MachineRegisterInfo &MRI = MF.getRegInfo();
754   const DataLayout &DL = MF.getDataLayout();
755 
756   SmallVector<EVT, 4> SplitVTs;
757   SmallVector<uint64_t, 4> Offsets;
758   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
759 
760   assert(VRegs.size() == SplitVTs.size());
761 
762   unsigned NumValues = SplitVTs.size();
763   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
764   Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
765   LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
766 
767   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
768 
769   for (unsigned I = 0; I < NumValues; ++I) {
770     Register Addr;
771     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
772     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
773                                         MRI.getType(VRegs[I]).getSizeInBytes(),
774                                         commonAlignment(BaseAlign, Offsets[I]));
775     MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
776   }
777 }
778 
779 void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
780                                     ArrayRef<Register> VRegs,
781                                     Register DemoteReg) const {
782   MachineFunction &MF = MIRBuilder.getMF();
783   MachineRegisterInfo &MRI = MF.getRegInfo();
784   const DataLayout &DL = MF.getDataLayout();
785 
786   SmallVector<EVT, 4> SplitVTs;
787   SmallVector<uint64_t, 4> Offsets;
788   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
789 
790   assert(VRegs.size() == SplitVTs.size());
791 
792   unsigned NumValues = SplitVTs.size();
793   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
794   unsigned AS = DL.getAllocaAddrSpace();
795   LLT OffsetLLTy =
796       getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
797 
798   MachinePointerInfo PtrInfo(AS);
799 
800   for (unsigned I = 0; I < NumValues; ++I) {
801     Register Addr;
802     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
803     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
804                                         MRI.getType(VRegs[I]).getSizeInBytes(),
805                                         commonAlignment(BaseAlign, Offsets[I]));
806     MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
807   }
808 }
809 
810 void CallLowering::insertSRetIncomingArgument(
811     const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
812     MachineRegisterInfo &MRI, const DataLayout &DL) const {
813   unsigned AS = DL.getAllocaAddrSpace();
814   DemoteReg = MRI.createGenericVirtualRegister(
815       LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
816 
817   Type *PtrTy = PointerType::get(F.getReturnType(), AS);
818 
819   SmallVector<EVT, 1> ValueVTs;
820   ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
821 
822   // NOTE: Assume that a pointer won't get split into more than one VT.
823   assert(ValueVTs.size() == 1);
824 
825   ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()));
826   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
827   DemoteArg.Flags[0].setSRet();
828   SplitArgs.insert(SplitArgs.begin(), DemoteArg);
829 }
830 
831 void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
832                                               const CallBase &CB,
833                                               CallLoweringInfo &Info) const {
834   const DataLayout &DL = MIRBuilder.getDataLayout();
835   Type *RetTy = CB.getType();
836   unsigned AS = DL.getAllocaAddrSpace();
837   LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
838 
839   int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
840       DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
841 
842   Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
843   ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS));
844   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
845   DemoteArg.Flags[0].setSRet();
846 
847   Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
848   Info.DemoteStackIndex = FI;
849   Info.DemoteRegister = DemoteReg;
850 }
851 
852 bool CallLowering::checkReturn(CCState &CCInfo,
853                                SmallVectorImpl<BaseArgInfo> &Outs,
854                                CCAssignFn *Fn) const {
855   for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
856     MVT VT = MVT::getVT(Outs[I].Ty);
857     if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
858       return false;
859   }
860   return true;
861 }
862 
863 void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
864                                  AttributeList Attrs,
865                                  SmallVectorImpl<BaseArgInfo> &Outs,
866                                  const DataLayout &DL) const {
867   LLVMContext &Context = RetTy->getContext();
868   ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
869 
870   SmallVector<EVT, 4> SplitVTs;
871   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
872   addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
873 
874   for (EVT VT : SplitVTs) {
875     unsigned NumParts =
876         TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
877     MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
878     Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
879 
880     for (unsigned I = 0; I < NumParts; ++I) {
881       Outs.emplace_back(PartTy, Flags);
882     }
883   }
884 }
885 
886 bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
887   const auto &F = MF.getFunction();
888   Type *ReturnType = F.getReturnType();
889   CallingConv::ID CallConv = F.getCallingConv();
890 
891   SmallVector<BaseArgInfo, 4> SplitArgs;
892   getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
893                 MF.getDataLayout());
894   return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
895 }
896 
897 bool CallLowering::parametersInCSRMatch(
898     const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
899     const SmallVectorImpl<CCValAssign> &OutLocs,
900     const SmallVectorImpl<ArgInfo> &OutArgs) const {
901   for (unsigned i = 0; i < OutLocs.size(); ++i) {
902     auto &ArgLoc = OutLocs[i];
903     // If it's not a register, it's fine.
904     if (!ArgLoc.isRegLoc())
905       continue;
906 
907     MCRegister PhysReg = ArgLoc.getLocReg();
908 
909     // Only look at callee-saved registers.
910     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
911       continue;
912 
913     LLVM_DEBUG(
914         dbgs()
915         << "... Call has an argument passed in a callee-saved register.\n");
916 
917     // Check if it was copied from.
918     const ArgInfo &OutInfo = OutArgs[i];
919 
920     if (OutInfo.Regs.size() > 1) {
921       LLVM_DEBUG(
922           dbgs() << "... Cannot handle arguments in multiple registers.\n");
923       return false;
924     }
925 
926     // Check if we copy the register, walking through copies from virtual
927     // registers. Note that getDefIgnoringCopies does not ignore copies from
928     // physical registers.
929     MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
930     if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
931       LLVM_DEBUG(
932           dbgs()
933           << "... Parameter was not copied into a VReg, cannot tail call.\n");
934       return false;
935     }
936 
937     // Got a copy. Verify that it's the same as the register we want.
938     Register CopyRHS = RegDef->getOperand(1).getReg();
939     if (CopyRHS != PhysReg) {
940       LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
941                            "VReg, cannot tail call.\n");
942       return false;
943     }
944   }
945 
946   return true;
947 }
948 
949 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
950                                      MachineFunction &MF,
951                                      SmallVectorImpl<ArgInfo> &InArgs,
952                                      ValueAssigner &CalleeAssigner,
953                                      ValueAssigner &CallerAssigner) const {
954   const Function &F = MF.getFunction();
955   CallingConv::ID CalleeCC = Info.CallConv;
956   CallingConv::ID CallerCC = F.getCallingConv();
957 
958   if (CallerCC == CalleeCC)
959     return true;
960 
961   SmallVector<CCValAssign, 16> ArgLocs1;
962   CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
963   if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
964     return false;
965 
966   SmallVector<CCValAssign, 16> ArgLocs2;
967   CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
968   if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
969     return false;
970 
971   // We need the argument locations to match up exactly. If there's more in
972   // one than the other, then we are done.
973   if (ArgLocs1.size() != ArgLocs2.size())
974     return false;
975 
976   // Make sure that each location is passed in exactly the same way.
977   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
978     const CCValAssign &Loc1 = ArgLocs1[i];
979     const CCValAssign &Loc2 = ArgLocs2[i];
980 
981     // We need both of them to be the same. So if one is a register and one
982     // isn't, we're done.
983     if (Loc1.isRegLoc() != Loc2.isRegLoc())
984       return false;
985 
986     if (Loc1.isRegLoc()) {
987       // If they don't have the same register location, we're done.
988       if (Loc1.getLocReg() != Loc2.getLocReg())
989         return false;
990 
991       // They matched, so we can move to the next ArgLoc.
992       continue;
993     }
994 
995     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
996     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
997       return false;
998   }
999 
1000   return true;
1001 }
1002 
1003 uint64_t CallLowering::ValueHandler::getStackValueStoreSize(
1004     const DataLayout &DL, const CCValAssign &VA) const {
1005   const EVT ValVT = VA.getValVT();
1006   if (ValVT != MVT::iPTR)
1007     return ValVT.getStoreSize();
1008 
1009   /// FIXME: We need to get the correct pointer address space.
1010   return DL.getPointerSize();
1011 }
1012 
1013 void CallLowering::ValueHandler::copyArgumentMemory(
1014     const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1015     const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1016     const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1017     CCValAssign &VA) const {
1018   MachineFunction &MF = MIRBuilder.getMF();
1019   MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1020       SrcPtrInfo,
1021       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
1022       SrcAlign);
1023 
1024   MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1025       DstPtrInfo,
1026       MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1027       MemSize, DstAlign);
1028 
1029   const LLT PtrTy = MRI.getType(DstPtr);
1030   const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1031 
1032   auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1033   MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1034 }
1035 
1036 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1037                                                     CCValAssign &VA,
1038                                                     unsigned MaxSizeBits) {
1039   LLT LocTy{VA.getLocVT()};
1040   LLT ValTy{VA.getValVT()};
1041 
1042   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1043     return ValReg;
1044 
1045   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1046     if (MaxSizeBits <= ValTy.getSizeInBits())
1047       return ValReg;
1048     LocTy = LLT::scalar(MaxSizeBits);
1049   }
1050 
1051   switch (VA.getLocInfo()) {
1052   default: break;
1053   case CCValAssign::Full:
1054   case CCValAssign::BCvt:
1055     // FIXME: bitconverting between vector types may or may not be a
1056     // nop in big-endian situations.
1057     return ValReg;
1058   case CCValAssign::AExt: {
1059     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1060     return MIB.getReg(0);
1061   }
1062   case CCValAssign::SExt: {
1063     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1064     MIRBuilder.buildSExt(NewReg, ValReg);
1065     return NewReg;
1066   }
1067   case CCValAssign::ZExt: {
1068     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1069     MIRBuilder.buildZExt(NewReg, ValReg);
1070     return NewReg;
1071   }
1072   }
1073   llvm_unreachable("unable to extend register");
1074 }
1075 
1076 void CallLowering::ValueAssigner::anchor() {}
1077 
1078 Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
1079                                                                 Register SrcReg,
1080                                                                 LLT NarrowTy) {
1081   switch (VA.getLocInfo()) {
1082   case CCValAssign::LocInfo::ZExt: {
1083     return MIRBuilder
1084         .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1085                          NarrowTy.getScalarSizeInBits())
1086         .getReg(0);
1087   }
1088   case CCValAssign::LocInfo::SExt: {
1089     return MIRBuilder
1090         .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1091                          NarrowTy.getScalarSizeInBits())
1092         .getReg(0);
1093     break;
1094   }
1095   default:
1096     return SrcReg;
1097   }
1098 }
1099 
1100 /// Check if we can use a basic COPY instruction between the two types.
1101 ///
1102 /// We're currently building on top of the infrastructure using MVT, which loses
1103 /// pointer information in the CCValAssign. We accept copies from physical
1104 /// registers that have been reported as integers if it's to an equivalent sized
1105 /// pointer LLT.
1106 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1107   if (SrcTy == DstTy)
1108     return true;
1109 
1110   if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1111     return false;
1112 
1113   SrcTy = SrcTy.getScalarType();
1114   DstTy = DstTy.getScalarType();
1115 
1116   return (SrcTy.isPointer() && DstTy.isScalar()) ||
1117          (DstTy.isScalar() && SrcTy.isPointer());
1118 }
1119 
1120 void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg,
1121                                                           Register PhysReg,
1122                                                           CCValAssign &VA) {
1123   const MVT LocVT = VA.getLocVT();
1124   const LLT LocTy(LocVT);
1125   const LLT RegTy = MRI.getType(ValVReg);
1126 
1127   if (isCopyCompatibleType(RegTy, LocTy)) {
1128     MIRBuilder.buildCopy(ValVReg, PhysReg);
1129     return;
1130   }
1131 
1132   auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1133   auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1134   MIRBuilder.buildTrunc(ValVReg, Hint);
1135 }
1136