1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements some simple delegations needed for call lowering.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "call-lowering"
28 
29 using namespace llvm;
30 
31 void CallLowering::anchor() {}
32 
33 /// Helper function which updates \p Flags when \p AttrFn returns true.
34 static void
35 addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
36                     const std::function<bool(Attribute::AttrKind)> &AttrFn) {
37   if (AttrFn(Attribute::SExt))
38     Flags.setSExt();
39   if (AttrFn(Attribute::ZExt))
40     Flags.setZExt();
41   if (AttrFn(Attribute::InReg))
42     Flags.setInReg();
43   if (AttrFn(Attribute::StructRet))
44     Flags.setSRet();
45   if (AttrFn(Attribute::Nest))
46     Flags.setNest();
47   if (AttrFn(Attribute::ByVal))
48     Flags.setByVal();
49   if (AttrFn(Attribute::Preallocated))
50     Flags.setPreallocated();
51   if (AttrFn(Attribute::InAlloca))
52     Flags.setInAlloca();
53   if (AttrFn(Attribute::Returned))
54     Flags.setReturned();
55   if (AttrFn(Attribute::SwiftSelf))
56     Flags.setSwiftSelf();
57   if (AttrFn(Attribute::SwiftAsync))
58     Flags.setSwiftAsync();
59   if (AttrFn(Attribute::SwiftError))
60     Flags.setSwiftError();
61 }
62 
63 ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
64                                                      unsigned ArgIdx) const {
65   ISD::ArgFlagsTy Flags;
66   addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
67     return Call.paramHasAttr(ArgIdx, Attr);
68   });
69   return Flags;
70 }
71 
72 void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
73                                              const AttributeList &Attrs,
74                                              unsigned OpIdx) const {
75   addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
76     return Attrs.hasAttribute(OpIdx, Attr);
77   });
78 }
79 
80 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
81                              ArrayRef<Register> ResRegs,
82                              ArrayRef<ArrayRef<Register>> ArgRegs,
83                              Register SwiftErrorVReg,
84                              std::function<unsigned()> GetCalleeReg) const {
85   CallLoweringInfo Info;
86   const DataLayout &DL = MIRBuilder.getDataLayout();
87   MachineFunction &MF = MIRBuilder.getMF();
88   bool CanBeTailCalled = CB.isTailCall() &&
89                          isInTailCallPosition(CB, MF.getTarget()) &&
90                          (MF.getFunction()
91                               .getFnAttribute("disable-tail-calls")
92                               .getValueAsString() != "true");
93 
94   CallingConv::ID CallConv = CB.getCallingConv();
95   Type *RetTy = CB.getType();
96   bool IsVarArg = CB.getFunctionType()->isVarArg();
97 
98   SmallVector<BaseArgInfo, 4> SplitArgs;
99   getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
100   Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
101 
102   if (!Info.CanLowerReturn) {
103     // Callee requires sret demotion.
104     insertSRetOutgoingArgument(MIRBuilder, CB, Info);
105 
106     // The sret demotion isn't compatible with tail-calls, since the sret
107     // argument points into the caller's stack frame.
108     CanBeTailCalled = false;
109   }
110 
111   // First step is to marshall all the function's parameters into the correct
112   // physregs and memory locations. Gather the sequence of argument types that
113   // we'll pass to the assigner function.
114   unsigned i = 0;
115   unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
116   for (auto &Arg : CB.args()) {
117     ArgInfo OrigArg{ArgRegs[i], *Arg.get(), getAttributesForArgIdx(CB, i),
118                     i < NumFixedArgs};
119     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
120 
121     // If we have an explicit sret argument that is an Instruction, (i.e., it
122     // might point to function-local memory), we can't meaningfully tail-call.
123     if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
124       CanBeTailCalled = false;
125 
126     Info.OrigArgs.push_back(OrigArg);
127     ++i;
128   }
129 
130   // Try looking through a bitcast from one function type to another.
131   // Commonly happens with calls to objc_msgSend().
132   const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
133   if (const Function *F = dyn_cast<Function>(CalleeV))
134     Info.Callee = MachineOperand::CreateGA(F, 0);
135   else
136     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
137 
138   Info.OrigRet = ArgInfo{ResRegs, RetTy, ISD::ArgFlagsTy{}};
139   if (!Info.OrigRet.Ty->isVoidTy())
140     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
141 
142   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
143   Info.CallConv = CallConv;
144   Info.SwiftErrorVReg = SwiftErrorVReg;
145   Info.IsMustTailCall = CB.isMustTailCall();
146   Info.IsTailCall = CanBeTailCalled;
147   Info.IsVarArg = IsVarArg;
148   return lowerCall(MIRBuilder, Info);
149 }
150 
151 template <typename FuncInfoTy>
152 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
153                                const DataLayout &DL,
154                                const FuncInfoTy &FuncInfo) const {
155   auto &Flags = Arg.Flags[0];
156   const AttributeList &Attrs = FuncInfo.getAttributes();
157   addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
158 
159   Align MemAlign = DL.getABITypeAlign(Arg.Ty);
160   if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
161     assert(OpIdx >= AttributeList::FirstArgIndex);
162     Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
163 
164     auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
165     Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
166 
167     // For ByVal, alignment should be passed from FE.  BE will guess if
168     // this info is not there but there are cases it cannot get right.
169     if (auto ParamAlign =
170             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
171       MemAlign = *ParamAlign;
172     else if ((ParamAlign =
173                   FuncInfo.getParamAlign(OpIdx - AttributeList::FirstArgIndex)))
174       MemAlign = *ParamAlign;
175     else
176       MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
177   } else if (OpIdx >= AttributeList::FirstArgIndex) {
178     if (auto ParamAlign =
179             FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
180       MemAlign = *ParamAlign;
181   }
182   Flags.setMemAlign(MemAlign);
183   Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
184 
185   // Don't try to use the returned attribute if the argument is marked as
186   // swiftself, since it won't be passed in x0.
187   if (Flags.isSwiftSelf())
188     Flags.setReturned(false);
189 }
190 
191 template void
192 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
193                                     const DataLayout &DL,
194                                     const Function &FuncInfo) const;
195 
196 template void
197 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
198                                     const DataLayout &DL,
199                                     const CallBase &FuncInfo) const;
200 
201 void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
202                                      SmallVectorImpl<ArgInfo> &SplitArgs,
203                                      const DataLayout &DL,
204                                      CallingConv::ID CallConv) const {
205   LLVMContext &Ctx = OrigArg.Ty->getContext();
206 
207   SmallVector<EVT, 4> SplitVTs;
208   SmallVector<uint64_t, 4> Offsets;
209   ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
210 
211   if (SplitVTs.size() == 0)
212     return;
213 
214   if (SplitVTs.size() == 1) {
215     // No splitting to do, but we want to replace the original type (e.g. [1 x
216     // double] -> double).
217     SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
218                            OrigArg.Flags[0], OrigArg.IsFixed,
219                            OrigArg.OrigValue);
220     return;
221   }
222 
223   // Create one ArgInfo for each virtual register in the original ArgInfo.
224   assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
225 
226   bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
227       OrigArg.Ty, CallConv, false, DL);
228   for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
229     Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
230     SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
231                            OrigArg.IsFixed);
232     if (NeedsRegBlock)
233       SplitArgs.back().Flags[0].setInConsecutiveRegs();
234   }
235 
236   SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
237 }
238 
239 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
240                               Type *PackedTy,
241                               MachineIRBuilder &MIRBuilder) const {
242   assert(DstRegs.size() > 1 && "Nothing to unpack");
243 
244   const DataLayout &DL = MIRBuilder.getDataLayout();
245 
246   SmallVector<LLT, 8> LLTs;
247   SmallVector<uint64_t, 8> Offsets;
248   computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
249   assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
250 
251   for (unsigned i = 0; i < DstRegs.size(); ++i)
252     MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
253 }
254 
255 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
256 static MachineInstrBuilder
257 mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
258                             ArrayRef<Register> SrcRegs) {
259   MachineRegisterInfo &MRI = *B.getMRI();
260   LLT LLTy = MRI.getType(DstRegs[0]);
261   LLT PartLLT = MRI.getType(SrcRegs[0]);
262 
263   // Deal with v3s16 split into v2s16
264   LLT LCMTy = getLCMType(LLTy, PartLLT);
265   if (LCMTy == LLTy) {
266     // Common case where no padding is needed.
267     assert(DstRegs.size() == 1);
268     return B.buildConcatVectors(DstRegs[0], SrcRegs);
269   }
270 
271   // We need to create an unmerge to the result registers, which may require
272   // widening the original value.
273   Register UnmergeSrcReg;
274   if (LCMTy != PartLLT) {
275     // e.g. A <3 x s16> value was split to <2 x s16>
276     // %register_value0:_(<2 x s16>)
277     // %register_value1:_(<2 x s16>)
278     // %undef:_(<2 x s16>) = G_IMPLICIT_DEF
279     // %concat:_<6 x s16>) = G_CONCAT_VECTORS %reg_value0, %reg_value1, %undef
280     // %dst_reg:_(<3 x s16>), %dead:_(<3 x s16>) = G_UNMERGE_VALUES %concat
281     const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
282     Register Undef = B.buildUndef(PartLLT).getReg(0);
283 
284     // Build vector of undefs.
285     SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
286 
287     // Replace the first sources with the real registers.
288     std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
289     UnmergeSrcReg = B.buildConcatVectors(LCMTy, WidenedSrcs).getReg(0);
290   } else {
291     // We don't need to widen anything if we're extracting a scalar which was
292     // promoted to a vector e.g. s8 -> v4s8 -> s8
293     assert(SrcRegs.size() == 1);
294     UnmergeSrcReg = SrcRegs[0];
295   }
296 
297   int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
298 
299   SmallVector<Register, 8> PadDstRegs(NumDst);
300   std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
301 
302   // Create the excess dead defs for the unmerge.
303   for (int I = DstRegs.size(); I != NumDst; ++I)
304     PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
305 
306   return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
307 }
308 
309 /// Create a sequence of instructions to combine pieces split into register
310 /// typed values to the original IR value. \p OrigRegs contains the destination
311 /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
312 /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
313 static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
314                               ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
315                               const ISD::ArgFlagsTy Flags) {
316   MachineRegisterInfo &MRI = *B.getMRI();
317 
318   if (PartLLT == LLTy) {
319     // We should have avoided introducing a new virtual register, and just
320     // directly assigned here.
321     assert(OrigRegs[0] == Regs[0]);
322     return;
323   }
324 
325   if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
326       Regs.size() == 1) {
327     B.buildBitcast(OrigRegs[0], Regs[0]);
328     return;
329   }
330 
331   // A vector PartLLT needs extending to LLTy's element size.
332   // E.g. <2 x s64> = G_SEXT <2 x s32>.
333   if (PartLLT.isVector() == LLTy.isVector() &&
334       PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
335       (!PartLLT.isVector() ||
336        PartLLT.getNumElements() == LLTy.getNumElements()) &&
337       OrigRegs.size() == 1 && Regs.size() == 1) {
338     Register SrcReg = Regs[0];
339 
340     LLT LocTy = MRI.getType(SrcReg);
341 
342     if (Flags.isSExt()) {
343       SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
344                    .getReg(0);
345     } else if (Flags.isZExt()) {
346       SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
347                    .getReg(0);
348     }
349 
350     B.buildTrunc(OrigRegs[0], SrcReg);
351     return;
352   }
353 
354   if (!LLTy.isVector() && !PartLLT.isVector()) {
355     assert(OrigRegs.size() == 1);
356     LLT OrigTy = MRI.getType(OrigRegs[0]);
357 
358     unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
359     if (SrcSize == OrigTy.getSizeInBits())
360       B.buildMerge(OrigRegs[0], Regs);
361     else {
362       auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
363       B.buildTrunc(OrigRegs[0], Widened);
364     }
365 
366     return;
367   }
368 
369   if (PartLLT.isVector()) {
370     assert(OrigRegs.size() == 1);
371     SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
372 
373     // If PartLLT is a mismatched vector in both number of elements and element
374     // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
375     // have the same elt type, i.e. v4s32.
376     if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
377         PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
378         Regs.size() == 1) {
379       LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
380                       .changeElementCount(PartLLT.getElementCount() * 2);
381       CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
382       PartLLT = NewTy;
383     }
384 
385     if (LLTy.getScalarType() == PartLLT.getElementType()) {
386       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
387     } else {
388       unsigned I = 0;
389       LLT GCDTy = getGCDType(LLTy, PartLLT);
390 
391       // We are both splitting a vector, and bitcasting its element types. Cast
392       // the source pieces into the appropriate number of pieces with the result
393       // element type.
394       for (Register SrcReg : CastRegs)
395         CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
396       mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
397     }
398 
399     return;
400   }
401 
402   assert(LLTy.isVector() && !PartLLT.isVector());
403 
404   LLT DstEltTy = LLTy.getElementType();
405 
406   // Pointer information was discarded. We'll need to coerce some register types
407   // to avoid violating type constraints.
408   LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
409 
410   assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
411 
412   if (DstEltTy == PartLLT) {
413     // Vector was trivially scalarized.
414 
415     if (RealDstEltTy.isPointer()) {
416       for (Register Reg : Regs)
417         MRI.setType(Reg, RealDstEltTy);
418     }
419 
420     B.buildBuildVector(OrigRegs[0], Regs);
421   } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
422     // Deal with vector with 64-bit elements decomposed to 32-bit
423     // registers. Need to create intermediate 64-bit elements.
424     SmallVector<Register, 8> EltMerges;
425     int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
426 
427     assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
428 
429     for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
430       auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
431       // Fix the type in case this is really a vector of pointers.
432       MRI.setType(Merge.getReg(0), RealDstEltTy);
433       EltMerges.push_back(Merge.getReg(0));
434       Regs = Regs.drop_front(PartsPerElt);
435     }
436 
437     B.buildBuildVector(OrigRegs[0], EltMerges);
438   } else {
439     // Vector was split, and elements promoted to a wider type.
440     // FIXME: Should handle floating point promotions.
441     LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
442     auto BV = B.buildBuildVector(BVType, Regs);
443     B.buildTrunc(OrigRegs[0], BV);
444   }
445 }
446 
447 /// Create a sequence of instructions to expand the value in \p SrcReg (of type
448 /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
449 /// contain the type of scalar value extension if necessary.
450 ///
451 /// This is used for outgoing values (vregs to physregs)
452 static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
453                             Register SrcReg, LLT SrcTy, LLT PartTy,
454                             unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
455   // We could just insert a regular copy, but this is unreachable at the moment.
456   assert(SrcTy != PartTy && "identical part types shouldn't reach here");
457 
458   const unsigned PartSize = PartTy.getSizeInBits();
459 
460   if (PartTy.isVector() == SrcTy.isVector() &&
461       PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
462     assert(DstRegs.size() == 1);
463     B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
464     return;
465   }
466 
467   if (SrcTy.isVector() && !PartTy.isVector() &&
468       PartSize > SrcTy.getElementType().getSizeInBits()) {
469     // Vector was scalarized, and the elements extended.
470     auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
471     for (int i = 0, e = DstRegs.size(); i != e; ++i)
472       B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
473     return;
474   }
475 
476   LLT GCDTy = getGCDType(SrcTy, PartTy);
477   if (GCDTy == PartTy) {
478     // If this already evenly divisible, we can create a simple unmerge.
479     B.buildUnmerge(DstRegs, SrcReg);
480     return;
481   }
482 
483   MachineRegisterInfo &MRI = *B.getMRI();
484   LLT DstTy = MRI.getType(DstRegs[0]);
485   LLT LCMTy = getLCMType(SrcTy, PartTy);
486 
487   const unsigned LCMSize = LCMTy.getSizeInBits();
488   const unsigned DstSize = DstTy.getSizeInBits();
489   const unsigned SrcSize = SrcTy.getSizeInBits();
490 
491   Register UnmergeSrc = SrcReg;
492   if (LCMSize != SrcSize) {
493     // Widen to the common type.
494     Register Undef = B.buildUndef(SrcTy).getReg(0);
495     SmallVector<Register, 8> MergeParts(1, SrcReg);
496     for (unsigned Size = SrcSize; Size != LCMSize; Size += SrcSize)
497       MergeParts.push_back(Undef);
498 
499     UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
500   }
501 
502   // Unmerge to the original registers and pad with dead defs.
503   SmallVector<Register, 8> UnmergeResults(DstRegs.begin(), DstRegs.end());
504   for (unsigned Size = DstSize * DstRegs.size(); Size != LCMSize;
505        Size += DstSize) {
506     UnmergeResults.push_back(MRI.createGenericVirtualRegister(DstTy));
507   }
508 
509   B.buildUnmerge(UnmergeResults, UnmergeSrc);
510 }
511 
512 bool CallLowering::determineAndHandleAssignments(
513     ValueHandler &Handler, ValueAssigner &Assigner,
514     SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
515     CallingConv::ID CallConv, bool IsVarArg, Register ThisReturnReg) const {
516   MachineFunction &MF = MIRBuilder.getMF();
517   const Function &F = MF.getFunction();
518   SmallVector<CCValAssign, 16> ArgLocs;
519 
520   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
521   if (!determineAssignments(Assigner, Args, CCInfo))
522     return false;
523 
524   return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
525                            ThisReturnReg);
526 }
527 
528 static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
529   if (Flags.isSExt())
530     return TargetOpcode::G_SEXT;
531   if (Flags.isZExt())
532     return TargetOpcode::G_ZEXT;
533   return TargetOpcode::G_ANYEXT;
534 }
535 
536 bool CallLowering::determineAssignments(ValueAssigner &Assigner,
537                                         SmallVectorImpl<ArgInfo> &Args,
538                                         CCState &CCInfo) const {
539   LLVMContext &Ctx = CCInfo.getContext();
540   const CallingConv::ID CallConv = CCInfo.getCallingConv();
541 
542   unsigned NumArgs = Args.size();
543   for (unsigned i = 0; i != NumArgs; ++i) {
544     EVT CurVT = EVT::getEVT(Args[i].Ty);
545 
546     MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
547 
548     // If we need to split the type over multiple regs, check it's a scenario
549     // we currently support.
550     unsigned NumParts =
551         TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
552 
553     if (NumParts == 1) {
554       // Try to use the register type if we couldn't assign the VT.
555       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
556                              Args[i].Flags[0], CCInfo))
557         return false;
558       continue;
559     }
560 
561     // For incoming arguments (physregs to vregs), we could have values in
562     // physregs (or memlocs) which we want to extract and copy to vregs.
563     // During this, we might have to deal with the LLT being split across
564     // multiple regs, so we have to record this information for later.
565     //
566     // If we have outgoing args, then we have the opposite case. We have a
567     // vreg with an LLT which we want to assign to a physical location, and
568     // we might have to record that the value has to be split later.
569 
570     // We're handling an incoming arg which is split over multiple regs.
571     // E.g. passing an s128 on AArch64.
572     ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
573     Args[i].Flags.clear();
574 
575     for (unsigned Part = 0; Part < NumParts; ++Part) {
576       ISD::ArgFlagsTy Flags = OrigFlags;
577       if (Part == 0) {
578         Flags.setSplit();
579       } else {
580         Flags.setOrigAlign(Align(1));
581         if (Part == NumParts - 1)
582           Flags.setSplitEnd();
583       }
584 
585       if (!Assigner.isIncomingArgumentHandler()) {
586         // TODO: Also check if there is a valid extension that preserves the
587         // bits. However currently this call lowering doesn't support non-exact
588         // split parts, so that can't be tested.
589         if (OrigFlags.isReturned() &&
590             (NumParts * NewVT.getSizeInBits() != CurVT.getSizeInBits())) {
591           Flags.setReturned(false);
592         }
593       }
594 
595       Args[i].Flags.push_back(Flags);
596       if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
597                              Args[i].Flags[Part], CCInfo)) {
598         // Still couldn't assign this smaller part type for some reason.
599         return false;
600       }
601     }
602   }
603 
604   return true;
605 }
606 
607 bool CallLowering::handleAssignments(ValueHandler &Handler,
608                                      SmallVectorImpl<ArgInfo> &Args,
609                                      CCState &CCInfo,
610                                      SmallVectorImpl<CCValAssign> &ArgLocs,
611                                      MachineIRBuilder &MIRBuilder,
612                                      Register ThisReturnReg) const {
613   MachineFunction &MF = MIRBuilder.getMF();
614   MachineRegisterInfo &MRI = MF.getRegInfo();
615   const Function &F = MF.getFunction();
616   const DataLayout &DL = F.getParent()->getDataLayout();
617 
618   const unsigned NumArgs = Args.size();
619 
620   for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
621     assert(j < ArgLocs.size() && "Skipped too many arg locs");
622     CCValAssign &VA = ArgLocs[j];
623     assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
624 
625     if (VA.needsCustom()) {
626       unsigned NumArgRegs =
627           Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
628       if (!NumArgRegs)
629         return false;
630       j += NumArgRegs;
631       continue;
632     }
633 
634     const MVT ValVT = VA.getValVT();
635     const MVT LocVT = VA.getLocVT();
636 
637     const LLT LocTy(LocVT);
638     const LLT ValTy(ValVT);
639     const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
640     const EVT OrigVT = EVT::getEVT(Args[i].Ty);
641     const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
642 
643     // Expected to be multiple regs for a single incoming arg.
644     // There should be Regs.size() ArgLocs per argument.
645     // This should be the same as getNumRegistersForCallingConv
646     const unsigned NumParts = Args[i].Flags.size();
647 
648     // Now split the registers into the assigned types.
649     Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
650 
651     if (NumParts != 1 || NewLLT != OrigTy) {
652       // If we can't directly assign the register, we need one or more
653       // intermediate values.
654       Args[i].Regs.resize(NumParts);
655 
656       // For each split register, create and assign a vreg that will store
657       // the incoming component of the larger value. These will later be
658       // merged to form the final vreg.
659       for (unsigned Part = 0; Part < NumParts; ++Part)
660         Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
661     }
662 
663     assert((j + (NumParts - 1)) < ArgLocs.size() &&
664            "Too many regs for number of args");
665 
666     // Coerce into outgoing value types before register assignment.
667     if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
668       assert(Args[i].OrigRegs.size() == 1);
669       buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
670                       ValTy, extendOpFromFlags(Args[i].Flags[0]));
671     }
672 
673     for (unsigned Part = 0; Part < NumParts; ++Part) {
674       Register ArgReg = Args[i].Regs[Part];
675       // There should be Regs.size() ArgLocs per argument.
676       VA = ArgLocs[j + Part];
677       const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
678 
679       if (VA.isMemLoc() && !Flags.isByVal()) {
680         // Individual pieces may have been spilled to the stack and others
681         // passed in registers.
682 
683         // TODO: The memory size may be larger than the value we need to
684         // store. We may need to adjust the offset for big endian targets.
685         LLT MemTy = Handler.getStackValueStoreType(DL, VA);
686 
687         MachinePointerInfo MPO;
688         Register StackAddr = Handler.getStackAddress(
689             MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
690 
691         Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
692         continue;
693       }
694 
695       if (VA.isMemLoc() && Flags.isByVal()) {
696         assert(Args[i].Regs.size() == 1 &&
697                "didn't expect split byval pointer");
698 
699         if (Handler.isIncomingArgumentHandler()) {
700           // We just need to copy the frame index value to the pointer.
701           MachinePointerInfo MPO;
702           Register StackAddr = Handler.getStackAddress(
703               Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
704           MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
705         } else {
706           // For outgoing byval arguments, insert the implicit copy byval
707           // implies, such that writes in the callee do not modify the caller's
708           // value.
709           uint64_t MemSize = Flags.getByValSize();
710           int64_t Offset = VA.getLocMemOffset();
711 
712           MachinePointerInfo DstMPO;
713           Register StackAddr =
714               Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
715 
716           MachinePointerInfo SrcMPO(Args[i].OrigValue);
717           if (!Args[i].OrigValue) {
718             // We still need to accurately track the stack address space if we
719             // don't know the underlying value.
720             const LLT PtrTy = MRI.getType(StackAddr);
721             SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
722           }
723 
724           Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
725                                     inferAlignFromPtrInfo(MF, DstMPO));
726 
727           Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
728                                     inferAlignFromPtrInfo(MF, SrcMPO));
729 
730           Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
731                                      DstMPO, DstAlign, SrcMPO, SrcAlign,
732                                      MemSize, VA);
733         }
734         continue;
735       }
736 
737       assert(!VA.needsCustom() && "custom loc should have been handled already");
738 
739       if (i == 0 && ThisReturnReg.isValid() &&
740           Handler.isIncomingArgumentHandler() &&
741           isTypeIsValidForThisReturn(ValVT)) {
742         Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
743         continue;
744       }
745 
746       Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
747     }
748 
749     // Now that all pieces have been assigned, re-pack the register typed values
750     // into the original value typed registers.
751     if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
752       // Merge the split registers into the expected larger result vregs of
753       // the original call.
754       buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
755                         LocTy, Args[i].Flags[0]);
756     }
757 
758     j += NumParts - 1;
759   }
760 
761   return true;
762 }
763 
764 void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
765                                    ArrayRef<Register> VRegs, Register DemoteReg,
766                                    int FI) const {
767   MachineFunction &MF = MIRBuilder.getMF();
768   MachineRegisterInfo &MRI = MF.getRegInfo();
769   const DataLayout &DL = MF.getDataLayout();
770 
771   SmallVector<EVT, 4> SplitVTs;
772   SmallVector<uint64_t, 4> Offsets;
773   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
774 
775   assert(VRegs.size() == SplitVTs.size());
776 
777   unsigned NumValues = SplitVTs.size();
778   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
779   Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
780   LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
781 
782   MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
783 
784   for (unsigned I = 0; I < NumValues; ++I) {
785     Register Addr;
786     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
787     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
788                                         MRI.getType(VRegs[I]).getSizeInBytes(),
789                                         commonAlignment(BaseAlign, Offsets[I]));
790     MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
791   }
792 }
793 
794 void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
795                                     ArrayRef<Register> VRegs,
796                                     Register DemoteReg) const {
797   MachineFunction &MF = MIRBuilder.getMF();
798   MachineRegisterInfo &MRI = MF.getRegInfo();
799   const DataLayout &DL = MF.getDataLayout();
800 
801   SmallVector<EVT, 4> SplitVTs;
802   SmallVector<uint64_t, 4> Offsets;
803   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
804 
805   assert(VRegs.size() == SplitVTs.size());
806 
807   unsigned NumValues = SplitVTs.size();
808   Align BaseAlign = DL.getPrefTypeAlign(RetTy);
809   unsigned AS = DL.getAllocaAddrSpace();
810   LLT OffsetLLTy =
811       getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
812 
813   MachinePointerInfo PtrInfo(AS);
814 
815   for (unsigned I = 0; I < NumValues; ++I) {
816     Register Addr;
817     MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
818     auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
819                                         MRI.getType(VRegs[I]).getSizeInBytes(),
820                                         commonAlignment(BaseAlign, Offsets[I]));
821     MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
822   }
823 }
824 
825 void CallLowering::insertSRetIncomingArgument(
826     const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
827     MachineRegisterInfo &MRI, const DataLayout &DL) const {
828   unsigned AS = DL.getAllocaAddrSpace();
829   DemoteReg = MRI.createGenericVirtualRegister(
830       LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
831 
832   Type *PtrTy = PointerType::get(F.getReturnType(), AS);
833 
834   SmallVector<EVT, 1> ValueVTs;
835   ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
836 
837   // NOTE: Assume that a pointer won't get split into more than one VT.
838   assert(ValueVTs.size() == 1);
839 
840   ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()));
841   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
842   DemoteArg.Flags[0].setSRet();
843   SplitArgs.insert(SplitArgs.begin(), DemoteArg);
844 }
845 
846 void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
847                                               const CallBase &CB,
848                                               CallLoweringInfo &Info) const {
849   const DataLayout &DL = MIRBuilder.getDataLayout();
850   Type *RetTy = CB.getType();
851   unsigned AS = DL.getAllocaAddrSpace();
852   LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
853 
854   int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
855       DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
856 
857   Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
858   ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS));
859   setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
860   DemoteArg.Flags[0].setSRet();
861 
862   Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
863   Info.DemoteStackIndex = FI;
864   Info.DemoteRegister = DemoteReg;
865 }
866 
867 bool CallLowering::checkReturn(CCState &CCInfo,
868                                SmallVectorImpl<BaseArgInfo> &Outs,
869                                CCAssignFn *Fn) const {
870   for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
871     MVT VT = MVT::getVT(Outs[I].Ty);
872     if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
873       return false;
874   }
875   return true;
876 }
877 
878 void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
879                                  AttributeList Attrs,
880                                  SmallVectorImpl<BaseArgInfo> &Outs,
881                                  const DataLayout &DL) const {
882   LLVMContext &Context = RetTy->getContext();
883   ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
884 
885   SmallVector<EVT, 4> SplitVTs;
886   ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
887   addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
888 
889   for (EVT VT : SplitVTs) {
890     unsigned NumParts =
891         TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
892     MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
893     Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
894 
895     for (unsigned I = 0; I < NumParts; ++I) {
896       Outs.emplace_back(PartTy, Flags);
897     }
898   }
899 }
900 
901 bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
902   const auto &F = MF.getFunction();
903   Type *ReturnType = F.getReturnType();
904   CallingConv::ID CallConv = F.getCallingConv();
905 
906   SmallVector<BaseArgInfo, 4> SplitArgs;
907   getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
908                 MF.getDataLayout());
909   return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
910 }
911 
912 bool CallLowering::parametersInCSRMatch(
913     const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
914     const SmallVectorImpl<CCValAssign> &OutLocs,
915     const SmallVectorImpl<ArgInfo> &OutArgs) const {
916   for (unsigned i = 0; i < OutLocs.size(); ++i) {
917     auto &ArgLoc = OutLocs[i];
918     // If it's not a register, it's fine.
919     if (!ArgLoc.isRegLoc())
920       continue;
921 
922     MCRegister PhysReg = ArgLoc.getLocReg();
923 
924     // Only look at callee-saved registers.
925     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
926       continue;
927 
928     LLVM_DEBUG(
929         dbgs()
930         << "... Call has an argument passed in a callee-saved register.\n");
931 
932     // Check if it was copied from.
933     const ArgInfo &OutInfo = OutArgs[i];
934 
935     if (OutInfo.Regs.size() > 1) {
936       LLVM_DEBUG(
937           dbgs() << "... Cannot handle arguments in multiple registers.\n");
938       return false;
939     }
940 
941     // Check if we copy the register, walking through copies from virtual
942     // registers. Note that getDefIgnoringCopies does not ignore copies from
943     // physical registers.
944     MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
945     if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
946       LLVM_DEBUG(
947           dbgs()
948           << "... Parameter was not copied into a VReg, cannot tail call.\n");
949       return false;
950     }
951 
952     // Got a copy. Verify that it's the same as the register we want.
953     Register CopyRHS = RegDef->getOperand(1).getReg();
954     if (CopyRHS != PhysReg) {
955       LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
956                            "VReg, cannot tail call.\n");
957       return false;
958     }
959   }
960 
961   return true;
962 }
963 
964 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
965                                      MachineFunction &MF,
966                                      SmallVectorImpl<ArgInfo> &InArgs,
967                                      ValueAssigner &CalleeAssigner,
968                                      ValueAssigner &CallerAssigner) const {
969   const Function &F = MF.getFunction();
970   CallingConv::ID CalleeCC = Info.CallConv;
971   CallingConv::ID CallerCC = F.getCallingConv();
972 
973   if (CallerCC == CalleeCC)
974     return true;
975 
976   SmallVector<CCValAssign, 16> ArgLocs1;
977   CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
978   if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
979     return false;
980 
981   SmallVector<CCValAssign, 16> ArgLocs2;
982   CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
983   if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
984     return false;
985 
986   // We need the argument locations to match up exactly. If there's more in
987   // one than the other, then we are done.
988   if (ArgLocs1.size() != ArgLocs2.size())
989     return false;
990 
991   // Make sure that each location is passed in exactly the same way.
992   for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
993     const CCValAssign &Loc1 = ArgLocs1[i];
994     const CCValAssign &Loc2 = ArgLocs2[i];
995 
996     // We need both of them to be the same. So if one is a register and one
997     // isn't, we're done.
998     if (Loc1.isRegLoc() != Loc2.isRegLoc())
999       return false;
1000 
1001     if (Loc1.isRegLoc()) {
1002       // If they don't have the same register location, we're done.
1003       if (Loc1.getLocReg() != Loc2.getLocReg())
1004         return false;
1005 
1006       // They matched, so we can move to the next ArgLoc.
1007       continue;
1008     }
1009 
1010     // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
1011     if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
1012       return false;
1013   }
1014 
1015   return true;
1016 }
1017 
1018 LLT CallLowering::ValueHandler::getStackValueStoreType(
1019     const DataLayout &DL, const CCValAssign &VA) const {
1020   const MVT ValVT = VA.getValVT();
1021   if (ValVT != MVT::iPTR)
1022     return LLT(ValVT);
1023 
1024   /// FIXME: We need to get the correct pointer address space.
1025   return LLT::pointer(0, DL.getPointerSize(0));
1026 }
1027 
1028 void CallLowering::ValueHandler::copyArgumentMemory(
1029     const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
1030     const MachinePointerInfo &DstPtrInfo, Align DstAlign,
1031     const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
1032     CCValAssign &VA) const {
1033   MachineFunction &MF = MIRBuilder.getMF();
1034   MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
1035       SrcPtrInfo,
1036       MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
1037       SrcAlign);
1038 
1039   MachineMemOperand *DstMMO = MF.getMachineMemOperand(
1040       DstPtrInfo,
1041       MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
1042       MemSize, DstAlign);
1043 
1044   const LLT PtrTy = MRI.getType(DstPtr);
1045   const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
1046 
1047   auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
1048   MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
1049 }
1050 
1051 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1052                                                     CCValAssign &VA,
1053                                                     unsigned MaxSizeBits) {
1054   LLT LocTy{VA.getLocVT()};
1055   LLT ValTy{VA.getValVT()};
1056 
1057   if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
1058     return ValReg;
1059 
1060   if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1061     if (MaxSizeBits <= ValTy.getSizeInBits())
1062       return ValReg;
1063     LocTy = LLT::scalar(MaxSizeBits);
1064   }
1065 
1066   switch (VA.getLocInfo()) {
1067   default: break;
1068   case CCValAssign::Full:
1069   case CCValAssign::BCvt:
1070     // FIXME: bitconverting between vector types may or may not be a
1071     // nop in big-endian situations.
1072     return ValReg;
1073   case CCValAssign::AExt: {
1074     auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1075     return MIB.getReg(0);
1076   }
1077   case CCValAssign::SExt: {
1078     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1079     MIRBuilder.buildSExt(NewReg, ValReg);
1080     return NewReg;
1081   }
1082   case CCValAssign::ZExt: {
1083     Register NewReg = MRI.createGenericVirtualRegister(LocTy);
1084     MIRBuilder.buildZExt(NewReg, ValReg);
1085     return NewReg;
1086   }
1087   }
1088   llvm_unreachable("unable to extend register");
1089 }
1090 
1091 void CallLowering::ValueAssigner::anchor() {}
1092 
1093 Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
1094                                                                 Register SrcReg,
1095                                                                 LLT NarrowTy) {
1096   switch (VA.getLocInfo()) {
1097   case CCValAssign::LocInfo::ZExt: {
1098     return MIRBuilder
1099         .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1100                          NarrowTy.getScalarSizeInBits())
1101         .getReg(0);
1102   }
1103   case CCValAssign::LocInfo::SExt: {
1104     return MIRBuilder
1105         .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
1106                          NarrowTy.getScalarSizeInBits())
1107         .getReg(0);
1108     break;
1109   }
1110   default:
1111     return SrcReg;
1112   }
1113 }
1114 
1115 /// Check if we can use a basic COPY instruction between the two types.
1116 ///
1117 /// We're currently building on top of the infrastructure using MVT, which loses
1118 /// pointer information in the CCValAssign. We accept copies from physical
1119 /// registers that have been reported as integers if it's to an equivalent sized
1120 /// pointer LLT.
1121 static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1122   if (SrcTy == DstTy)
1123     return true;
1124 
1125   if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1126     return false;
1127 
1128   SrcTy = SrcTy.getScalarType();
1129   DstTy = DstTy.getScalarType();
1130 
1131   return (SrcTy.isPointer() && DstTy.isScalar()) ||
1132          (DstTy.isScalar() && SrcTy.isPointer());
1133 }
1134 
1135 void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg,
1136                                                           Register PhysReg,
1137                                                           CCValAssign &VA) {
1138   const MVT LocVT = VA.getLocVT();
1139   const LLT LocTy(LocVT);
1140   const LLT RegTy = MRI.getType(ValVReg);
1141 
1142   if (isCopyCompatibleType(RegTy, LocTy)) {
1143     MIRBuilder.buildCopy(ValVReg, PhysReg);
1144     return;
1145   }
1146 
1147   auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1148   auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
1149   MIRBuilder.buildTrunc(ValVReg, Hint);
1150 }
1151