1fe5f89baSTim Northover //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
2fe5f89baSTim Northover //
32946cd70SChandler Carruth // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
42946cd70SChandler Carruth // See https://llvm.org/LICENSE.txt for license information.
52946cd70SChandler Carruth // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6fe5f89baSTim Northover //
7fe5f89baSTim Northover //===----------------------------------------------------------------------===//
8fe5f89baSTim Northover ///
9fe5f89baSTim Northover /// \file
10fe5f89baSTim Northover /// This file implements some simple delegations needed for call lowering.
11fe5f89baSTim Northover ///
12fe5f89baSTim Northover //===----------------------------------------------------------------------===//
13fe5f89baSTim Northover
14ed98c1b3Sserge-sans-paille #include "llvm/CodeGen/GlobalISel/CallLowering.h"
15c3dbe239SDiana Picus #include "llvm/CodeGen/Analysis.h"
168bde5e58SAmara Emerson #include "llvm/CodeGen/CallingConvLower.h"
17a278250bSNico Weber #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18ed98c1b3Sserge-sans-paille #include "llvm/CodeGen/GlobalISel/Utils.h"
19989f1c72Sserge-sans-paille #include "llvm/CodeGen/MachineFrameInfo.h"
20fe5f89baSTim Northover #include "llvm/CodeGen/MachineOperand.h"
212d9adbf5SDiana Picus #include "llvm/CodeGen/MachineRegisterInfo.h"
22b3bde2eaSDavid Blaikie #include "llvm/CodeGen/TargetLowering.h"
239a467183STim Northover #include "llvm/IR/DataLayout.h"
247b8d3eb9SMark Lacey #include "llvm/IR/LLVMContext.h"
259a467183STim Northover #include "llvm/IR/Module.h"
26fe0006c8SSimon Pilgrim #include "llvm/Target/TargetMachine.h"
27fe5f89baSTim Northover
282b523f81SAmara Emerson #define DEBUG_TYPE "call-lowering"
292b523f81SAmara Emerson
30fe5f89baSTim Northover using namespace llvm;
31fe5f89baSTim Northover
anchor()32a87b70d1SRichard Trieu void CallLowering::anchor() {}
33a87b70d1SRichard Trieu
34bf36e902SJessica Paquette /// Helper function which updates \p Flags when \p AttrFn returns true.
35bf36e902SJessica Paquette static void
addFlagsUsingAttrFn(ISD::ArgFlagsTy & Flags,const std::function<bool (Attribute::AttrKind)> & AttrFn)36bf36e902SJessica Paquette addFlagsUsingAttrFn(ISD::ArgFlagsTy &Flags,
37bf36e902SJessica Paquette const std::function<bool(Attribute::AttrKind)> &AttrFn) {
38bf36e902SJessica Paquette if (AttrFn(Attribute::SExt))
39bf36e902SJessica Paquette Flags.setSExt();
40bf36e902SJessica Paquette if (AttrFn(Attribute::ZExt))
41bf36e902SJessica Paquette Flags.setZExt();
42bf36e902SJessica Paquette if (AttrFn(Attribute::InReg))
43bf36e902SJessica Paquette Flags.setInReg();
44bf36e902SJessica Paquette if (AttrFn(Attribute::StructRet))
45bf36e902SJessica Paquette Flags.setSRet();
46bf36e902SJessica Paquette if (AttrFn(Attribute::Nest))
47bf36e902SJessica Paquette Flags.setNest();
48bf36e902SJessica Paquette if (AttrFn(Attribute::ByVal))
49bf36e902SJessica Paquette Flags.setByVal();
50bf36e902SJessica Paquette if (AttrFn(Attribute::Preallocated))
51bf36e902SJessica Paquette Flags.setPreallocated();
52bf36e902SJessica Paquette if (AttrFn(Attribute::InAlloca))
53bf36e902SJessica Paquette Flags.setInAlloca();
54bf36e902SJessica Paquette if (AttrFn(Attribute::Returned))
55bf36e902SJessica Paquette Flags.setReturned();
56bf36e902SJessica Paquette if (AttrFn(Attribute::SwiftSelf))
57bf36e902SJessica Paquette Flags.setSwiftSelf();
58ea0eec69STim Northover if (AttrFn(Attribute::SwiftAsync))
59ea0eec69STim Northover Flags.setSwiftAsync();
60bf36e902SJessica Paquette if (AttrFn(Attribute::SwiftError))
61bf36e902SJessica Paquette Flags.setSwiftError();
62bf36e902SJessica Paquette }
63bf36e902SJessica Paquette
getAttributesForArgIdx(const CallBase & Call,unsigned ArgIdx) const64224a8c63SJessica Paquette ISD::ArgFlagsTy CallLowering::getAttributesForArgIdx(const CallBase &Call,
65224a8c63SJessica Paquette unsigned ArgIdx) const {
66224a8c63SJessica Paquette ISD::ArgFlagsTy Flags;
67bf36e902SJessica Paquette addFlagsUsingAttrFn(Flags, [&Call, &ArgIdx](Attribute::AttrKind Attr) {
68bf36e902SJessica Paquette return Call.paramHasAttr(ArgIdx, Attr);
69bf36e902SJessica Paquette });
70224a8c63SJessica Paquette return Flags;
71224a8c63SJessica Paquette }
72224a8c63SJessica Paquette
addArgFlagsFromAttributes(ISD::ArgFlagsTy & Flags,const AttributeList & Attrs,unsigned OpIdx) const73bf36e902SJessica Paquette void CallLowering::addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
74bf36e902SJessica Paquette const AttributeList &Attrs,
75bf36e902SJessica Paquette unsigned OpIdx) const {
76bf36e902SJessica Paquette addFlagsUsingAttrFn(Flags, [&Attrs, &OpIdx](Attribute::AttrKind Attr) {
7752e6d70cSArthur Eubanks return Attrs.hasAttributeAtIndex(OpIdx, Attr);
78bf36e902SJessica Paquette });
79bf36e902SJessica Paquette }
80bf36e902SJessica Paquette
lowerCall(MachineIRBuilder & MIRBuilder,const CallBase & CB,ArrayRef<Register> ResRegs,ArrayRef<ArrayRef<Register>> ArgRegs,Register SwiftErrorVReg,std::function<unsigned ()> GetCalleeReg) const8168eb0864SCraig Topper bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
8281389961SDiana Picus ArrayRef<Register> ResRegs,
8343fb5ae5SDiana Picus ArrayRef<ArrayRef<Register>> ArgRegs,
84e3a676e9SMatt Arsenault Register SwiftErrorVReg,
853b2157aeSTim Northover std::function<unsigned()> GetCalleeReg) const {
86e1a5f668STim Northover CallLoweringInfo Info;
87cc3b5590SMatt Arsenault const DataLayout &DL = MIRBuilder.getDataLayout();
88f29e6277SJessica Paquette MachineFunction &MF = MIRBuilder.getMF();
8999e8e173SMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
90f29e6277SJessica Paquette bool CanBeTailCalled = CB.isTailCall() &&
91f29e6277SJessica Paquette isInTailCallPosition(CB, MF.getTarget()) &&
92f29e6277SJessica Paquette (MF.getFunction()
93f29e6277SJessica Paquette .getFnAttribute("disable-tail-calls")
94f29e6277SJessica Paquette .getValueAsString() != "true");
959a467183STim Northover
96d68458bdSChristudasan Devadasan CallingConv::ID CallConv = CB.getCallingConv();
97d68458bdSChristudasan Devadasan Type *RetTy = CB.getType();
98d68458bdSChristudasan Devadasan bool IsVarArg = CB.getFunctionType()->isVarArg();
99d68458bdSChristudasan Devadasan
100d68458bdSChristudasan Devadasan SmallVector<BaseArgInfo, 4> SplitArgs;
101d68458bdSChristudasan Devadasan getReturnInfo(CallConv, RetTy, CB.getAttributes(), SplitArgs, DL);
102ae25a397SChristudasan Devadasan Info.CanLowerReturn = canLowerReturn(MF, CallConv, SplitArgs, IsVarArg);
103d68458bdSChristudasan Devadasan
104d68458bdSChristudasan Devadasan if (!Info.CanLowerReturn) {
105d68458bdSChristudasan Devadasan // Callee requires sret demotion.
106d68458bdSChristudasan Devadasan insertSRetOutgoingArgument(MIRBuilder, CB, Info);
107d68458bdSChristudasan Devadasan
108d68458bdSChristudasan Devadasan // The sret demotion isn't compatible with tail-calls, since the sret
109d68458bdSChristudasan Devadasan // argument points into the caller's stack frame.
110d68458bdSChristudasan Devadasan CanBeTailCalled = false;
111d68458bdSChristudasan Devadasan }
112d68458bdSChristudasan Devadasan
11399e8e173SMatt Arsenault
114fe5f89baSTim Northover // First step is to marshall all the function's parameters into the correct
115fe5f89baSTim Northover // physregs and memory locations. Gather the sequence of argument types that
116fe5f89baSTim Northover // we'll pass to the assigner function.
1179a467183STim Northover unsigned i = 0;
11868eb0864SCraig Topper unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
119*9e6d1f4bSKazu Hirata for (const auto &Arg : CB.args()) {
1209b057f64SMatt Arsenault ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
121d9433542STim Northover i < NumFixedArgs};
12268eb0864SCraig Topper setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
123f29e6277SJessica Paquette
124f29e6277SJessica Paquette // If we have an explicit sret argument that is an Instruction, (i.e., it
125f29e6277SJessica Paquette // might point to function-local memory), we can't meaningfully tail-call.
126f29e6277SJessica Paquette if (OrigArg.Flags[0].isSRet() && isa<Instruction>(&Arg))
127f29e6277SJessica Paquette CanBeTailCalled = false;
128f29e6277SJessica Paquette
129e1a5f668STim Northover Info.OrigArgs.push_back(OrigArg);
1309a467183STim Northover ++i;
1319a467183STim Northover }
132fe5f89baSTim Northover
13335c63d66SAmara Emerson // Try looking through a bitcast from one function type to another.
13435c63d66SAmara Emerson // Commonly happens with calls to objc_msgSend().
135a58b62b4SCraig Topper const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
136067dd9c6SAmara Emerson if (const Function *F = dyn_cast<Function>(CalleeV))
13735c63d66SAmara Emerson Info.Callee = MachineOperand::CreateGA(F, 0);
138067dd9c6SAmara Emerson else
139e1a5f668STim Northover Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
140fe5f89baSTim Northover
14199e8e173SMatt Arsenault Register ReturnHintAlignReg;
14299e8e173SMatt Arsenault Align ReturnHintAlign;
14399e8e173SMatt Arsenault
1449b057f64SMatt Arsenault Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
14599e8e173SMatt Arsenault
14699e8e173SMatt Arsenault if (!Info.OrigRet.Ty->isVoidTy()) {
14768eb0864SCraig Topper setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
1489a467183STim Northover
14999e8e173SMatt Arsenault if (MaybeAlign Alignment = CB.getRetAlign()) {
15099e8e173SMatt Arsenault if (*Alignment > Align(1)) {
15199e8e173SMatt Arsenault ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);
15299e8e173SMatt Arsenault Info.OrigRet.Regs[0] = ReturnHintAlignReg;
15399e8e173SMatt Arsenault ReturnHintAlign = *Alignment;
15499e8e173SMatt Arsenault }
15599e8e173SMatt Arsenault }
15699e8e173SMatt Arsenault }
15799e8e173SMatt Arsenault
1583fdcd9bbSMatt Arsenault Info.CB = &CB;
15968eb0864SCraig Topper Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
160d68458bdSChristudasan Devadasan Info.CallConv = CallConv;
161e1a5f668STim Northover Info.SwiftErrorVReg = SwiftErrorVReg;
16268eb0864SCraig Topper Info.IsMustTailCall = CB.isMustTailCall();
163f29e6277SJessica Paquette Info.IsTailCall = CanBeTailCalled;
164d68458bdSChristudasan Devadasan Info.IsVarArg = IsVarArg;
16599e8e173SMatt Arsenault if (!lowerCall(MIRBuilder, Info))
16699e8e173SMatt Arsenault return false;
16799e8e173SMatt Arsenault
16899e8e173SMatt Arsenault if (ReturnHintAlignReg && !Info.IsTailCall) {
16999e8e173SMatt Arsenault MIRBuilder.buildAssertAlign(ResRegs[0], ReturnHintAlignReg,
17099e8e173SMatt Arsenault ReturnHintAlign);
17199e8e173SMatt Arsenault }
17299e8e173SMatt Arsenault
17399e8e173SMatt Arsenault return true;
174fe5f89baSTim Northover }
1759a467183STim Northover
1769a467183STim Northover template <typename FuncInfoTy>
setArgFlags(CallLowering::ArgInfo & Arg,unsigned OpIdx,const DataLayout & DL,const FuncInfoTy & FuncInfo) const1779a467183STim Northover void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
1789a467183STim Northover const DataLayout &DL,
1799a467183STim Northover const FuncInfoTy &FuncInfo) const {
180fbaf425bSAmara Emerson auto &Flags = Arg.Flags[0];
181b518054bSReid Kleckner const AttributeList &Attrs = FuncInfo.getAttributes();
182bf36e902SJessica Paquette addArgFlagsFromAttributes(Flags, Attrs, OpIdx);
1839a467183STim Northover
184e91da668SMatt Arsenault PointerType *PtrTy = dyn_cast<PointerType>(Arg.Ty->getScalarType());
185e91da668SMatt Arsenault if (PtrTy) {
186e91da668SMatt Arsenault Flags.setPointer();
187e91da668SMatt Arsenault Flags.setPointerAddrSpace(PtrTy->getPointerAddressSpace());
188e91da668SMatt Arsenault }
189e91da668SMatt Arsenault
190f3139b20SMomchil Velikov Align MemAlign = DL.getABITypeAlign(Arg.Ty);
1918a887556SArthur Eubanks if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
192f3139b20SMomchil Velikov assert(OpIdx >= AttributeList::FirstArgIndex);
19314afbe94SNikita Popov unsigned ParamIdx = OpIdx - AttributeList::FirstArgIndex;
194b7141207STim Northover
19514afbe94SNikita Popov Type *ElementTy = FuncInfo.getParamByValType(ParamIdx);
19614afbe94SNikita Popov if (!ElementTy)
19714afbe94SNikita Popov ElementTy = FuncInfo.getParamInAllocaType(ParamIdx);
19814afbe94SNikita Popov if (!ElementTy)
19914afbe94SNikita Popov ElementTy = FuncInfo.getParamPreallocatedType(ParamIdx);
20014afbe94SNikita Popov assert(ElementTy && "Must have byval, inalloca or preallocated type");
20114afbe94SNikita Popov Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
202b7141207STim Northover
2039a467183STim Northover // For ByVal, alignment should be passed from FE. BE will guess if
2049a467183STim Northover // this info is not there but there are cases it cannot get right.
20514afbe94SNikita Popov if (auto ParamAlign = FuncInfo.getParamStackAlign(ParamIdx))
206f9d932e6SMomchil Velikov MemAlign = *ParamAlign;
20714afbe94SNikita Popov else if ((ParamAlign = FuncInfo.getParamAlign(ParamIdx)))
208f9d932e6SMomchil Velikov MemAlign = *ParamAlign;
2099a467183STim Northover else
210f9d932e6SMomchil Velikov MemAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
211f3139b20SMomchil Velikov } else if (OpIdx >= AttributeList::FirstArgIndex) {
212f3139b20SMomchil Velikov if (auto ParamAlign =
213f3139b20SMomchil Velikov FuncInfo.getParamStackAlign(OpIdx - AttributeList::FirstArgIndex))
214f9d932e6SMomchil Velikov MemAlign = *ParamAlign;
2159a467183STim Northover }
216f9d932e6SMomchil Velikov Flags.setMemAlign(MemAlign);
217d3085c25SGuillaume Chatelet Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
218ec41ed5bSAmara Emerson
219ec41ed5bSAmara Emerson // Don't try to use the returned attribute if the argument is marked as
220ec41ed5bSAmara Emerson // swiftself, since it won't be passed in x0.
221ec41ed5bSAmara Emerson if (Flags.isSwiftSelf())
222ec41ed5bSAmara Emerson Flags.setReturned(false);
2239a467183STim Northover }
2249a467183STim Northover
2259a467183STim Northover template void
2269a467183STim Northover CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
2279a467183STim Northover const DataLayout &DL,
2289a467183STim Northover const Function &FuncInfo) const;
2299a467183STim Northover
2309a467183STim Northover template void
23168eb0864SCraig Topper CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
2329a467183STim Northover const DataLayout &DL,
23368eb0864SCraig Topper const CallBase &FuncInfo) const;
234f11f042eSDiana Picus
splitToValueTypes(const ArgInfo & OrigArg,SmallVectorImpl<ArgInfo> & SplitArgs,const DataLayout & DL,CallingConv::ID CallConv,SmallVectorImpl<uint64_t> * Offsets) const2356c260d3bSMatt Arsenault void CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
2366c260d3bSMatt Arsenault SmallVectorImpl<ArgInfo> &SplitArgs,
2376c260d3bSMatt Arsenault const DataLayout &DL,
238a2d7ace3SMatt Arsenault CallingConv::ID CallConv,
239a2d7ace3SMatt Arsenault SmallVectorImpl<uint64_t> *Offsets) const {
2406c260d3bSMatt Arsenault LLVMContext &Ctx = OrigArg.Ty->getContext();
2416c260d3bSMatt Arsenault
2426c260d3bSMatt Arsenault SmallVector<EVT, 4> SplitVTs;
243a2d7ace3SMatt Arsenault ComputeValueVTs(*TLI, DL, OrigArg.Ty, SplitVTs, Offsets, 0);
2446c260d3bSMatt Arsenault
2456c260d3bSMatt Arsenault if (SplitVTs.size() == 0)
2466c260d3bSMatt Arsenault return;
2476c260d3bSMatt Arsenault
2486c260d3bSMatt Arsenault if (SplitVTs.size() == 1) {
2496c260d3bSMatt Arsenault // No splitting to do, but we want to replace the original type (e.g. [1 x
2506c260d3bSMatt Arsenault // double] -> double).
2516c260d3bSMatt Arsenault SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
2529b057f64SMatt Arsenault OrigArg.OrigArgIndex, OrigArg.Flags[0],
2539b057f64SMatt Arsenault OrigArg.IsFixed, OrigArg.OrigValue);
2546c260d3bSMatt Arsenault return;
2556c260d3bSMatt Arsenault }
2566c260d3bSMatt Arsenault
2576c260d3bSMatt Arsenault // Create one ArgInfo for each virtual register in the original ArgInfo.
2586c260d3bSMatt Arsenault assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
2596c260d3bSMatt Arsenault
2606c260d3bSMatt Arsenault bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
261e4ecd83fSDavid Spickett OrigArg.Ty, CallConv, false, DL);
2626c260d3bSMatt Arsenault for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
2636c260d3bSMatt Arsenault Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
2649b057f64SMatt Arsenault SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.OrigArgIndex,
2659b057f64SMatt Arsenault OrigArg.Flags[0], OrigArg.IsFixed);
2666c260d3bSMatt Arsenault if (NeedsRegBlock)
2676c260d3bSMatt Arsenault SplitArgs.back().Flags[0].setInConsecutiveRegs();
2686c260d3bSMatt Arsenault }
2696c260d3bSMatt Arsenault
2706c260d3bSMatt Arsenault SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
2716c260d3bSMatt Arsenault }
2726c260d3bSMatt Arsenault
27362d946e1SMatt Arsenault /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
27462d946e1SMatt Arsenault static MachineInstrBuilder
mergeVectorRegsToResultRegs(MachineIRBuilder & B,ArrayRef<Register> DstRegs,ArrayRef<Register> SrcRegs)27562d946e1SMatt Arsenault mergeVectorRegsToResultRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
27662d946e1SMatt Arsenault ArrayRef<Register> SrcRegs) {
27762d946e1SMatt Arsenault MachineRegisterInfo &MRI = *B.getMRI();
27862d946e1SMatt Arsenault LLT LLTy = MRI.getType(DstRegs[0]);
27962d946e1SMatt Arsenault LLT PartLLT = MRI.getType(SrcRegs[0]);
28062d946e1SMatt Arsenault
28162d946e1SMatt Arsenault // Deal with v3s16 split into v2s16
28229f88b93SPetar Avramovic LLT LCMTy = getCoverTy(LLTy, PartLLT);
28362d946e1SMatt Arsenault if (LCMTy == LLTy) {
28462d946e1SMatt Arsenault // Common case where no padding is needed.
28562d946e1SMatt Arsenault assert(DstRegs.size() == 1);
28662d946e1SMatt Arsenault return B.buildConcatVectors(DstRegs[0], SrcRegs);
28762d946e1SMatt Arsenault }
28862d946e1SMatt Arsenault
289fd82cbcfSMatt Arsenault // We need to create an unmerge to the result registers, which may require
290fd82cbcfSMatt Arsenault // widening the original value.
291fd82cbcfSMatt Arsenault Register UnmergeSrcReg;
292fd82cbcfSMatt Arsenault if (LCMTy != PartLLT) {
29329f88b93SPetar Avramovic assert(DstRegs.size() == 1);
29429f88b93SPetar Avramovic return B.buildDeleteTrailingVectorElements(DstRegs[0],
29529f88b93SPetar Avramovic B.buildMerge(LCMTy, SrcRegs));
296fd82cbcfSMatt Arsenault } else {
297fd82cbcfSMatt Arsenault // We don't need to widen anything if we're extracting a scalar which was
298fd82cbcfSMatt Arsenault // promoted to a vector e.g. s8 -> v4s8 -> s8
299fd82cbcfSMatt Arsenault assert(SrcRegs.size() == 1);
300fd82cbcfSMatt Arsenault UnmergeSrcReg = SrcRegs[0];
301fd82cbcfSMatt Arsenault }
30262d946e1SMatt Arsenault
30362d946e1SMatt Arsenault int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
30462d946e1SMatt Arsenault
30562d946e1SMatt Arsenault SmallVector<Register, 8> PadDstRegs(NumDst);
30662d946e1SMatt Arsenault std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
30762d946e1SMatt Arsenault
30862d946e1SMatt Arsenault // Create the excess dead defs for the unmerge.
30962d946e1SMatt Arsenault for (int I = DstRegs.size(); I != NumDst; ++I)
31062d946e1SMatt Arsenault PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
31162d946e1SMatt Arsenault
31229f88b93SPetar Avramovic if (PadDstRegs.size() == 1)
31329f88b93SPetar Avramovic return B.buildDeleteTrailingVectorElements(DstRegs[0], UnmergeSrcReg);
314fd82cbcfSMatt Arsenault return B.buildUnmerge(PadDstRegs, UnmergeSrcReg);
31562d946e1SMatt Arsenault }
31662d946e1SMatt Arsenault
31762d946e1SMatt Arsenault /// Create a sequence of instructions to combine pieces split into register
31862d946e1SMatt Arsenault /// typed values to the original IR value. \p OrigRegs contains the destination
31962d946e1SMatt Arsenault /// value registers of type \p LLTy, and \p Regs contains the legalized pieces
320fd82cbcfSMatt Arsenault /// with type \p PartLLT. This is used for incoming values (physregs to vregs).
buildCopyFromRegs(MachineIRBuilder & B,ArrayRef<Register> OrigRegs,ArrayRef<Register> Regs,LLT LLTy,LLT PartLLT,const ISD::ArgFlagsTy Flags)321fd82cbcfSMatt Arsenault static void buildCopyFromRegs(MachineIRBuilder &B, ArrayRef<Register> OrigRegs,
322fa0b93b5SMatt Arsenault ArrayRef<Register> Regs, LLT LLTy, LLT PartLLT,
323fa0b93b5SMatt Arsenault const ISD::ArgFlagsTy Flags) {
32462d946e1SMatt Arsenault MachineRegisterInfo &MRI = *B.getMRI();
32562d946e1SMatt Arsenault
326fa0b93b5SMatt Arsenault if (PartLLT == LLTy) {
327fa0b93b5SMatt Arsenault // We should have avoided introducing a new virtual register, and just
328fa0b93b5SMatt Arsenault // directly assigned here.
329fa0b93b5SMatt Arsenault assert(OrigRegs[0] == Regs[0]);
330fa0b93b5SMatt Arsenault return;
331fa0b93b5SMatt Arsenault }
332fa0b93b5SMatt Arsenault
333fa0b93b5SMatt Arsenault if (PartLLT.getSizeInBits() == LLTy.getSizeInBits() && OrigRegs.size() == 1 &&
334fa0b93b5SMatt Arsenault Regs.size() == 1) {
335fa0b93b5SMatt Arsenault B.buildBitcast(OrigRegs[0], Regs[0]);
336fa0b93b5SMatt Arsenault return;
337fa0b93b5SMatt Arsenault }
338fd82cbcfSMatt Arsenault
33980c534a8SAmara Emerson // A vector PartLLT needs extending to LLTy's element size.
34080c534a8SAmara Emerson // E.g. <2 x s64> = G_SEXT <2 x s32>.
341fd82cbcfSMatt Arsenault if (PartLLT.isVector() == LLTy.isVector() &&
342fa0b93b5SMatt Arsenault PartLLT.getScalarSizeInBits() > LLTy.getScalarSizeInBits() &&
34380c534a8SAmara Emerson (!PartLLT.isVector() ||
34480c534a8SAmara Emerson PartLLT.getNumElements() == LLTy.getNumElements()) &&
345fa0b93b5SMatt Arsenault OrigRegs.size() == 1 && Regs.size() == 1) {
346fa0b93b5SMatt Arsenault Register SrcReg = Regs[0];
347fa0b93b5SMatt Arsenault
348fa0b93b5SMatt Arsenault LLT LocTy = MRI.getType(SrcReg);
349fa0b93b5SMatt Arsenault
350fa0b93b5SMatt Arsenault if (Flags.isSExt()) {
35180c534a8SAmara Emerson SrcReg = B.buildAssertSExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
35280c534a8SAmara Emerson .getReg(0);
353fa0b93b5SMatt Arsenault } else if (Flags.isZExt()) {
35480c534a8SAmara Emerson SrcReg = B.buildAssertZExt(LocTy, SrcReg, LLTy.getScalarSizeInBits())
35580c534a8SAmara Emerson .getReg(0);
356fa0b93b5SMatt Arsenault }
357fa0b93b5SMatt Arsenault
358e91da668SMatt Arsenault // Sometimes pointers are passed zero extended.
359e91da668SMatt Arsenault LLT OrigTy = MRI.getType(OrigRegs[0]);
360e91da668SMatt Arsenault if (OrigTy.isPointer()) {
361e91da668SMatt Arsenault LLT IntPtrTy = LLT::scalar(OrigTy.getSizeInBits());
362e91da668SMatt Arsenault B.buildIntToPtr(OrigRegs[0], B.buildTrunc(IntPtrTy, SrcReg));
363e91da668SMatt Arsenault return;
364e91da668SMatt Arsenault }
365e91da668SMatt Arsenault
366fa0b93b5SMatt Arsenault B.buildTrunc(OrigRegs[0], SrcReg);
367fd82cbcfSMatt Arsenault return;
368fd82cbcfSMatt Arsenault }
369fd82cbcfSMatt Arsenault
37062d946e1SMatt Arsenault if (!LLTy.isVector() && !PartLLT.isVector()) {
37162d946e1SMatt Arsenault assert(OrigRegs.size() == 1);
37262d946e1SMatt Arsenault LLT OrigTy = MRI.getType(OrigRegs[0]);
37362d946e1SMatt Arsenault
3740e09d18cSSander de Smalen unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size();
37562d946e1SMatt Arsenault if (SrcSize == OrigTy.getSizeInBits())
37662d946e1SMatt Arsenault B.buildMerge(OrigRegs[0], Regs);
37762d946e1SMatt Arsenault else {
37862d946e1SMatt Arsenault auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
37962d946e1SMatt Arsenault B.buildTrunc(OrigRegs[0], Widened);
38062d946e1SMatt Arsenault }
38162d946e1SMatt Arsenault
38262d946e1SMatt Arsenault return;
38362d946e1SMatt Arsenault }
38462d946e1SMatt Arsenault
385fd82cbcfSMatt Arsenault if (PartLLT.isVector()) {
386fa0b93b5SMatt Arsenault assert(OrigRegs.size() == 1);
38780c534a8SAmara Emerson SmallVector<Register> CastRegs(Regs.begin(), Regs.end());
38880c534a8SAmara Emerson
38980c534a8SAmara Emerson // If PartLLT is a mismatched vector in both number of elements and element
39080c534a8SAmara Emerson // size, e.g. PartLLT == v2s64 and LLTy is v3s32, then first coerce it to
39180c534a8SAmara Emerson // have the same elt type, i.e. v4s32.
39280c534a8SAmara Emerson if (PartLLT.getSizeInBits() > LLTy.getSizeInBits() &&
39380c534a8SAmara Emerson PartLLT.getScalarSizeInBits() == LLTy.getScalarSizeInBits() * 2 &&
39480c534a8SAmara Emerson Regs.size() == 1) {
39580c534a8SAmara Emerson LLT NewTy = PartLLT.changeElementType(LLTy.getElementType())
396c9acd2f3SSander de Smalen .changeElementCount(PartLLT.getElementCount() * 2);
39780c534a8SAmara Emerson CastRegs[0] = B.buildBitcast(NewTy, Regs[0]).getReg(0);
39880c534a8SAmara Emerson PartLLT = NewTy;
39980c534a8SAmara Emerson }
400fa0b93b5SMatt Arsenault
401fa0b93b5SMatt Arsenault if (LLTy.getScalarType() == PartLLT.getElementType()) {
40280c534a8SAmara Emerson mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
403fa0b93b5SMatt Arsenault } else {
404fa0b93b5SMatt Arsenault unsigned I = 0;
405fa0b93b5SMatt Arsenault LLT GCDTy = getGCDType(LLTy, PartLLT);
406fa0b93b5SMatt Arsenault
407fa0b93b5SMatt Arsenault // We are both splitting a vector, and bitcasting its element types. Cast
408fa0b93b5SMatt Arsenault // the source pieces into the appropriate number of pieces with the result
409fa0b93b5SMatt Arsenault // element type.
41080c534a8SAmara Emerson for (Register SrcReg : CastRegs)
411fa0b93b5SMatt Arsenault CastRegs[I++] = B.buildBitcast(GCDTy, SrcReg).getReg(0);
412fa0b93b5SMatt Arsenault mergeVectorRegsToResultRegs(B, OrigRegs, CastRegs);
413fa0b93b5SMatt Arsenault }
414fa0b93b5SMatt Arsenault
41562d946e1SMatt Arsenault return;
41662d946e1SMatt Arsenault }
41762d946e1SMatt Arsenault
41862d946e1SMatt Arsenault assert(LLTy.isVector() && !PartLLT.isVector());
41962d946e1SMatt Arsenault
42062d946e1SMatt Arsenault LLT DstEltTy = LLTy.getElementType();
42162d946e1SMatt Arsenault
42262d946e1SMatt Arsenault // Pointer information was discarded. We'll need to coerce some register types
42362d946e1SMatt Arsenault // to avoid violating type constraints.
42462d946e1SMatt Arsenault LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
42562d946e1SMatt Arsenault
42662d946e1SMatt Arsenault assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
42762d946e1SMatt Arsenault
42862d946e1SMatt Arsenault if (DstEltTy == PartLLT) {
42962d946e1SMatt Arsenault // Vector was trivially scalarized.
43062d946e1SMatt Arsenault
43162d946e1SMatt Arsenault if (RealDstEltTy.isPointer()) {
43262d946e1SMatt Arsenault for (Register Reg : Regs)
43362d946e1SMatt Arsenault MRI.setType(Reg, RealDstEltTy);
43462d946e1SMatt Arsenault }
43562d946e1SMatt Arsenault
43662d946e1SMatt Arsenault B.buildBuildVector(OrigRegs[0], Regs);
43762d946e1SMatt Arsenault } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
43862d946e1SMatt Arsenault // Deal with vector with 64-bit elements decomposed to 32-bit
43962d946e1SMatt Arsenault // registers. Need to create intermediate 64-bit elements.
44062d946e1SMatt Arsenault SmallVector<Register, 8> EltMerges;
44162d946e1SMatt Arsenault int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
44262d946e1SMatt Arsenault
44362d946e1SMatt Arsenault assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
44462d946e1SMatt Arsenault
44562d946e1SMatt Arsenault for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
44662d946e1SMatt Arsenault auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
44762d946e1SMatt Arsenault // Fix the type in case this is really a vector of pointers.
44862d946e1SMatt Arsenault MRI.setType(Merge.getReg(0), RealDstEltTy);
44962d946e1SMatt Arsenault EltMerges.push_back(Merge.getReg(0));
45062d946e1SMatt Arsenault Regs = Regs.drop_front(PartsPerElt);
45162d946e1SMatt Arsenault }
45262d946e1SMatt Arsenault
45362d946e1SMatt Arsenault B.buildBuildVector(OrigRegs[0], EltMerges);
45462d946e1SMatt Arsenault } else {
45562d946e1SMatt Arsenault // Vector was split, and elements promoted to a wider type.
45662d946e1SMatt Arsenault // FIXME: Should handle floating point promotions.
457d5e14ba8SSander de Smalen LLT BVType = LLT::fixed_vector(LLTy.getNumElements(), PartLLT);
45862d946e1SMatt Arsenault auto BV = B.buildBuildVector(BVType, Regs);
45962d946e1SMatt Arsenault B.buildTrunc(OrigRegs[0], BV);
46062d946e1SMatt Arsenault }
46162d946e1SMatt Arsenault }
46262d946e1SMatt Arsenault
463fd82cbcfSMatt Arsenault /// Create a sequence of instructions to expand the value in \p SrcReg (of type
464fd82cbcfSMatt Arsenault /// \p SrcTy) to the types in \p DstRegs (of type \p PartTy). \p ExtendOp should
465fd82cbcfSMatt Arsenault /// contain the type of scalar value extension if necessary.
466fd82cbcfSMatt Arsenault ///
467fd82cbcfSMatt Arsenault /// This is used for outgoing values (vregs to physregs)
buildCopyToRegs(MachineIRBuilder & B,ArrayRef<Register> DstRegs,Register SrcReg,LLT SrcTy,LLT PartTy,unsigned ExtendOp=TargetOpcode::G_ANYEXT)468fd82cbcfSMatt Arsenault static void buildCopyToRegs(MachineIRBuilder &B, ArrayRef<Register> DstRegs,
469fd82cbcfSMatt Arsenault Register SrcReg, LLT SrcTy, LLT PartTy,
470fd82cbcfSMatt Arsenault unsigned ExtendOp = TargetOpcode::G_ANYEXT) {
471fd82cbcfSMatt Arsenault // We could just insert a regular copy, but this is unreachable at the moment.
472fd82cbcfSMatt Arsenault assert(SrcTy != PartTy && "identical part types shouldn't reach here");
473fd82cbcfSMatt Arsenault
474fd82cbcfSMatt Arsenault const unsigned PartSize = PartTy.getSizeInBits();
475fd82cbcfSMatt Arsenault
476fd82cbcfSMatt Arsenault if (PartTy.isVector() == SrcTy.isVector() &&
477fd82cbcfSMatt Arsenault PartTy.getScalarSizeInBits() > SrcTy.getScalarSizeInBits()) {
478fd82cbcfSMatt Arsenault assert(DstRegs.size() == 1);
479fd82cbcfSMatt Arsenault B.buildInstr(ExtendOp, {DstRegs[0]}, {SrcReg});
480fd82cbcfSMatt Arsenault return;
481fd82cbcfSMatt Arsenault }
482fd82cbcfSMatt Arsenault
483fd82cbcfSMatt Arsenault if (SrcTy.isVector() && !PartTy.isVector() &&
484fd82cbcfSMatt Arsenault PartSize > SrcTy.getElementType().getSizeInBits()) {
485fd82cbcfSMatt Arsenault // Vector was scalarized, and the elements extended.
486fd82cbcfSMatt Arsenault auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(), SrcReg);
487fd82cbcfSMatt Arsenault for (int i = 0, e = DstRegs.size(); i != e; ++i)
488fd82cbcfSMatt Arsenault B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
489fd82cbcfSMatt Arsenault return;
490fd82cbcfSMatt Arsenault }
491fd82cbcfSMatt Arsenault
492fd82cbcfSMatt Arsenault LLT GCDTy = getGCDType(SrcTy, PartTy);
493fd82cbcfSMatt Arsenault if (GCDTy == PartTy) {
494fd82cbcfSMatt Arsenault // If this already evenly divisible, we can create a simple unmerge.
495fd82cbcfSMatt Arsenault B.buildUnmerge(DstRegs, SrcReg);
496fd82cbcfSMatt Arsenault return;
497fd82cbcfSMatt Arsenault }
498fd82cbcfSMatt Arsenault
499fd82cbcfSMatt Arsenault MachineRegisterInfo &MRI = *B.getMRI();
500fd82cbcfSMatt Arsenault LLT DstTy = MRI.getType(DstRegs[0]);
50129f88b93SPetar Avramovic LLT LCMTy = getCoverTy(SrcTy, PartTy);
502fd82cbcfSMatt Arsenault
5032483f43dSPetar Avramovic if (PartTy.isVector() && LCMTy == PartTy) {
5042483f43dSPetar Avramovic assert(DstRegs.size() == 1);
5052483f43dSPetar Avramovic B.buildPadVectorWithUndefElements(DstRegs[0], SrcReg);
5062483f43dSPetar Avramovic return;
5072483f43dSPetar Avramovic }
5082483f43dSPetar Avramovic
509fd82cbcfSMatt Arsenault const unsigned DstSize = DstTy.getSizeInBits();
510fd82cbcfSMatt Arsenault const unsigned SrcSize = SrcTy.getSizeInBits();
511222fde1eSMatt Arsenault unsigned CoveringSize = LCMTy.getSizeInBits();
512fd82cbcfSMatt Arsenault
513fd82cbcfSMatt Arsenault Register UnmergeSrc = SrcReg;
514222fde1eSMatt Arsenault
51529f88b93SPetar Avramovic if (!LCMTy.isVector() && CoveringSize != SrcSize) {
516222fde1eSMatt Arsenault // For scalars, it's common to be able to use a simple extension.
517222fde1eSMatt Arsenault if (SrcTy.isScalar() && DstTy.isScalar()) {
518222fde1eSMatt Arsenault CoveringSize = alignTo(SrcSize, DstSize);
519222fde1eSMatt Arsenault LLT CoverTy = LLT::scalar(CoveringSize);
520222fde1eSMatt Arsenault UnmergeSrc = B.buildInstr(ExtendOp, {CoverTy}, {SrcReg}).getReg(0);
521222fde1eSMatt Arsenault } else {
522fd82cbcfSMatt Arsenault // Widen to the common type.
523222fde1eSMatt Arsenault // FIXME: This should respect the extend type
524fd82cbcfSMatt Arsenault Register Undef = B.buildUndef(SrcTy).getReg(0);
525fd82cbcfSMatt Arsenault SmallVector<Register, 8> MergeParts(1, SrcReg);
526222fde1eSMatt Arsenault for (unsigned Size = SrcSize; Size != CoveringSize; Size += SrcSize)
527fd82cbcfSMatt Arsenault MergeParts.push_back(Undef);
528fd82cbcfSMatt Arsenault UnmergeSrc = B.buildMerge(LCMTy, MergeParts).getReg(0);
529fd82cbcfSMatt Arsenault }
530222fde1eSMatt Arsenault }
531fd82cbcfSMatt Arsenault
53229f88b93SPetar Avramovic if (LCMTy.isVector() && CoveringSize != SrcSize)
53329f88b93SPetar Avramovic UnmergeSrc = B.buildPadVectorWithUndefElements(LCMTy, SrcReg).getReg(0);
534fd82cbcfSMatt Arsenault
53529f88b93SPetar Avramovic B.buildUnmerge(DstRegs, UnmergeSrc);
536fd82cbcfSMatt Arsenault }
537fd82cbcfSMatt Arsenault
determineAndHandleAssignments(ValueHandler & Handler,ValueAssigner & Assigner,SmallVectorImpl<ArgInfo> & Args,MachineIRBuilder & MIRBuilder,CallingConv::ID CallConv,bool IsVarArg,ArrayRef<Register> ThisReturnRegs) const53824e2e5dfSMatt Arsenault bool CallLowering::determineAndHandleAssignments(
53924e2e5dfSMatt Arsenault ValueHandler &Handler, ValueAssigner &Assigner,
54024e2e5dfSMatt Arsenault SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
5410d1308a7SNikita Popov CallingConv::ID CallConv, bool IsVarArg,
5420d1308a7SNikita Popov ArrayRef<Register> ThisReturnRegs) const {
543f11f042eSDiana Picus MachineFunction &MF = MIRBuilder.getMF();
544f1caa283SMatthias Braun const Function &F = MF.getFunction();
545f11f042eSDiana Picus SmallVector<CCValAssign, 16> ArgLocs;
546b72a2365SMatt Arsenault
547b72a2365SMatt Arsenault CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, F.getContext());
54824e2e5dfSMatt Arsenault if (!determineAssignments(Assigner, Args, CCInfo))
54924e2e5dfSMatt Arsenault return false;
55024e2e5dfSMatt Arsenault
55124e2e5dfSMatt Arsenault return handleAssignments(Handler, Args, CCInfo, ArgLocs, MIRBuilder,
5520d1308a7SNikita Popov ThisReturnRegs);
5531c3f4ec7SMatt Arsenault }
5541c3f4ec7SMatt Arsenault
extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags)555fd82cbcfSMatt Arsenault static unsigned extendOpFromFlags(llvm::ISD::ArgFlagsTy Flags) {
556fd82cbcfSMatt Arsenault if (Flags.isSExt())
557fd82cbcfSMatt Arsenault return TargetOpcode::G_SEXT;
558fd82cbcfSMatt Arsenault if (Flags.isZExt())
559fd82cbcfSMatt Arsenault return TargetOpcode::G_ZEXT;
560fd82cbcfSMatt Arsenault return TargetOpcode::G_ANYEXT;
561fd82cbcfSMatt Arsenault }
562fd82cbcfSMatt Arsenault
determineAssignments(ValueAssigner & Assigner,SmallVectorImpl<ArgInfo> & Args,CCState & CCInfo) const56324e2e5dfSMatt Arsenault bool CallLowering::determineAssignments(ValueAssigner &Assigner,
564fbaf425bSAmara Emerson SmallVectorImpl<ArgInfo> &Args,
56524e2e5dfSMatt Arsenault CCState &CCInfo) const {
56624e2e5dfSMatt Arsenault LLVMContext &Ctx = CCInfo.getContext();
56724e2e5dfSMatt Arsenault const CallingConv::ID CallConv = CCInfo.getCallingConv();
568f11f042eSDiana Picus
569f11f042eSDiana Picus unsigned NumArgs = Args.size();
570f11f042eSDiana Picus for (unsigned i = 0; i != NumArgs; ++i) {
57123157f3bSMatt Arsenault EVT CurVT = EVT::getEVT(Args[i].Ty);
572a275acc4SMatt Arsenault
57324e2e5dfSMatt Arsenault MVT NewVT = TLI->getRegisterTypeForCallingConv(Ctx, CallConv, CurVT);
574fbaf425bSAmara Emerson
575fbaf425bSAmara Emerson // If we need to split the type over multiple regs, check it's a scenario
576fbaf425bSAmara Emerson // we currently support.
57724e2e5dfSMatt Arsenault unsigned NumParts =
57824e2e5dfSMatt Arsenault TLI->getNumRegistersForCallingConv(Ctx, CallConv, CurVT);
579e7e7d371SMatt Arsenault
580e7e7d371SMatt Arsenault if (NumParts == 1) {
581e7e7d371SMatt Arsenault // Try to use the register type if we couldn't assign the VT.
58224e2e5dfSMatt Arsenault if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
583e7e7d371SMatt Arsenault Args[i].Flags[0], CCInfo))
584e7e7d371SMatt Arsenault return false;
585e7e7d371SMatt Arsenault continue;
586e7e7d371SMatt Arsenault }
587e7e7d371SMatt Arsenault
5889c36ec59SQuentin Colombet // For incoming arguments (physregs to vregs), we could have values in
589fbaf425bSAmara Emerson // physregs (or memlocs) which we want to extract and copy to vregs.
590fbaf425bSAmara Emerson // During this, we might have to deal with the LLT being split across
591fbaf425bSAmara Emerson // multiple regs, so we have to record this information for later.
592fbaf425bSAmara Emerson //
593fbaf425bSAmara Emerson // If we have outgoing args, then we have the opposite case. We have a
594fbaf425bSAmara Emerson // vreg with an LLT which we want to assign to a physical location, and
595fbaf425bSAmara Emerson // we might have to record that the value has to be split later.
596620fdb96SMatt Arsenault
597fbaf425bSAmara Emerson // We're handling an incoming arg which is split over multiple regs.
5989c36ec59SQuentin Colombet // E.g. passing an s128 on AArch64.
599fbaf425bSAmara Emerson ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
600fbaf425bSAmara Emerson Args[i].Flags.clear();
601620fdb96SMatt Arsenault
602fbaf425bSAmara Emerson for (unsigned Part = 0; Part < NumParts; ++Part) {
603fbaf425bSAmara Emerson ISD::ArgFlagsTy Flags = OrigFlags;
604fbaf425bSAmara Emerson if (Part == 0) {
605fbaf425bSAmara Emerson Flags.setSplit();
606fbaf425bSAmara Emerson } else {
607805c157eSGuillaume Chatelet Flags.setOrigAlign(Align(1));
608fbaf425bSAmara Emerson if (Part == NumParts - 1)
609fbaf425bSAmara Emerson Flags.setSplitEnd();
610fbaf425bSAmara Emerson }
611fd82cbcfSMatt Arsenault
612fbaf425bSAmara Emerson Args[i].Flags.push_back(Flags);
61324e2e5dfSMatt Arsenault if (Assigner.assignArg(i, CurVT, NewVT, NewVT, CCValAssign::Full, Args[i],
614620fdb96SMatt Arsenault Args[i].Flags[Part], CCInfo)) {
615620fdb96SMatt Arsenault // Still couldn't assign this smaller part type for some reason.
616fbaf425bSAmara Emerson return false;
617fbaf425bSAmara Emerson }
618fbaf425bSAmara Emerson }
619f11f042eSDiana Picus }
620f11f042eSDiana Picus
62124e2e5dfSMatt Arsenault return true;
62224e2e5dfSMatt Arsenault }
62324e2e5dfSMatt Arsenault
handleAssignments(ValueHandler & Handler,SmallVectorImpl<ArgInfo> & Args,CCState & CCInfo,SmallVectorImpl<CCValAssign> & ArgLocs,MachineIRBuilder & MIRBuilder,ArrayRef<Register> ThisReturnRegs) const62424e2e5dfSMatt Arsenault bool CallLowering::handleAssignments(ValueHandler &Handler,
62524e2e5dfSMatt Arsenault SmallVectorImpl<ArgInfo> &Args,
62624e2e5dfSMatt Arsenault CCState &CCInfo,
62724e2e5dfSMatt Arsenault SmallVectorImpl<CCValAssign> &ArgLocs,
62824e2e5dfSMatt Arsenault MachineIRBuilder &MIRBuilder,
6290d1308a7SNikita Popov ArrayRef<Register> ThisReturnRegs) const {
63024e2e5dfSMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
63124e2e5dfSMatt Arsenault MachineRegisterInfo &MRI = MF.getRegInfo();
63224e2e5dfSMatt Arsenault const Function &F = MF.getFunction();
63324e2e5dfSMatt Arsenault const DataLayout &DL = F.getParent()->getDataLayout();
63424e2e5dfSMatt Arsenault
63524e2e5dfSMatt Arsenault const unsigned NumArgs = Args.size();
63624e2e5dfSMatt Arsenault
6378bde5e58SAmara Emerson // Stores thunks for outgoing register assignments. This is used so we delay
6388bde5e58SAmara Emerson // generating register copies until mem loc assignments are done. We do this
6398bde5e58SAmara Emerson // so that if the target is using the delayed stack protector feature, we can
6408bde5e58SAmara Emerson // find the split point of the block accurately. E.g. if we have:
6418bde5e58SAmara Emerson // G_STORE %val, %memloc
6428bde5e58SAmara Emerson // $x0 = COPY %foo
6438bde5e58SAmara Emerson // $x1 = COPY %bar
6448bde5e58SAmara Emerson // CALL func
6458bde5e58SAmara Emerson // ... then the split point for the block will correctly be at, and including,
6468bde5e58SAmara Emerson // the copy to $x0. If instead the G_STORE instruction immediately precedes
6478bde5e58SAmara Emerson // the CALL, then we'd prematurely choose the CALL as the split point, thus
6488bde5e58SAmara Emerson // generating a split block with a CALL that uses undefined physregs.
6498bde5e58SAmara Emerson SmallVector<std::function<void()>> DelayedOutgoingRegAssignments;
6508bde5e58SAmara Emerson
651620fdb96SMatt Arsenault for (unsigned i = 0, j = 0; i != NumArgs; ++i, ++j) {
652ca6a890dSDiana Picus assert(j < ArgLocs.size() && "Skipped too many arg locs");
653ca6a890dSDiana Picus CCValAssign &VA = ArgLocs[j];
654ca6a890dSDiana Picus assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
655ca6a890dSDiana Picus
656ca6a890dSDiana Picus if (VA.needsCustom()) {
6578bde5e58SAmara Emerson std::function<void()> Thunk;
6588bde5e58SAmara Emerson unsigned NumArgRegs = Handler.assignCustomValue(
6598bde5e58SAmara Emerson Args[i], makeArrayRef(ArgLocs).slice(j), &Thunk);
6608bde5e58SAmara Emerson if (Thunk)
6618bde5e58SAmara Emerson DelayedOutgoingRegAssignments.emplace_back(Thunk);
662a255931cSLucas Prates if (!NumArgRegs)
663a255931cSLucas Prates return false;
664a255931cSLucas Prates j += NumArgRegs;
665ca6a890dSDiana Picus continue;
666ca6a890dSDiana Picus }
667f11f042eSDiana Picus
668fa0b93b5SMatt Arsenault const MVT ValVT = VA.getValVT();
669fa0b93b5SMatt Arsenault const MVT LocVT = VA.getLocVT();
670fa0b93b5SMatt Arsenault
671fa0b93b5SMatt Arsenault const LLT LocTy(LocVT);
672fa0b93b5SMatt Arsenault const LLT ValTy(ValVT);
673fa0b93b5SMatt Arsenault const LLT NewLLT = Handler.isIncomingArgumentHandler() ? LocTy : ValTy;
674fa0b93b5SMatt Arsenault const EVT OrigVT = EVT::getEVT(Args[i].Ty);
67523157f3bSMatt Arsenault const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
67623157f3bSMatt Arsenault
677fbaf425bSAmara Emerson // Expected to be multiple regs for a single incoming arg.
678b98f902fSMatt Arsenault // There should be Regs.size() ArgLocs per argument.
679620fdb96SMatt Arsenault // This should be the same as getNumRegistersForCallingConv
680620fdb96SMatt Arsenault const unsigned NumParts = Args[i].Flags.size();
681620fdb96SMatt Arsenault
682620fdb96SMatt Arsenault // Now split the registers into the assigned types.
683620fdb96SMatt Arsenault Args[i].OrigRegs.assign(Args[i].Regs.begin(), Args[i].Regs.end());
684620fdb96SMatt Arsenault
685620fdb96SMatt Arsenault if (NumParts != 1 || NewLLT != OrigTy) {
686620fdb96SMatt Arsenault // If we can't directly assign the register, we need one or more
687620fdb96SMatt Arsenault // intermediate values.
688620fdb96SMatt Arsenault Args[i].Regs.resize(NumParts);
689620fdb96SMatt Arsenault
690620fdb96SMatt Arsenault // For each split register, create and assign a vreg that will store
691620fdb96SMatt Arsenault // the incoming component of the larger value. These will later be
692620fdb96SMatt Arsenault // merged to form the final vreg.
693620fdb96SMatt Arsenault for (unsigned Part = 0; Part < NumParts; ++Part)
694620fdb96SMatt Arsenault Args[i].Regs[Part] = MRI.createGenericVirtualRegister(NewLLT);
695620fdb96SMatt Arsenault }
696620fdb96SMatt Arsenault
697620fdb96SMatt Arsenault assert((j + (NumParts - 1)) < ArgLocs.size() &&
698fbaf425bSAmara Emerson "Too many regs for number of args");
699fd82cbcfSMatt Arsenault
700fd82cbcfSMatt Arsenault // Coerce into outgoing value types before register assignment.
701fa0b93b5SMatt Arsenault if (!Handler.isIncomingArgumentHandler() && OrigTy != ValTy) {
702fd82cbcfSMatt Arsenault assert(Args[i].OrigRegs.size() == 1);
703fd82cbcfSMatt Arsenault buildCopyToRegs(MIRBuilder, Args[i].Regs, Args[i].OrigRegs[0], OrigTy,
704fa0b93b5SMatt Arsenault ValTy, extendOpFromFlags(Args[i].Flags[0]));
705fd82cbcfSMatt Arsenault }
706fd82cbcfSMatt Arsenault
70776c83e74SSheng bool BigEndianPartOrdering = TLI->hasBigEndianPartOrdering(OrigVT, DL);
708620fdb96SMatt Arsenault for (unsigned Part = 0; Part < NumParts; ++Part) {
709212d6a95SAmara Emerson Register ArgReg = Args[i].Regs[Part];
710fbaf425bSAmara Emerson // There should be Regs.size() ArgLocs per argument.
71176c83e74SSheng unsigned Idx = BigEndianPartOrdering ? NumParts - 1 - Part : Part;
71276c83e74SSheng CCValAssign &VA = ArgLocs[j + Idx];
71334471c30SMatt Arsenault const ISD::ArgFlagsTy Flags = Args[i].Flags[Part];
71434471c30SMatt Arsenault
71534471c30SMatt Arsenault if (VA.isMemLoc() && !Flags.isByVal()) {
716392e0fcfSMatt Arsenault // Individual pieces may have been spilled to the stack and others
717392e0fcfSMatt Arsenault // passed in registers.
718b98f902fSMatt Arsenault
719fa0b93b5SMatt Arsenault // TODO: The memory size may be larger than the value we need to
720fa0b93b5SMatt Arsenault // store. We may need to adjust the offset for big endian targets.
721e91da668SMatt Arsenault LLT MemTy = Handler.getStackValueStoreType(DL, VA, Flags);
722fa0b93b5SMatt Arsenault
723b98f902fSMatt Arsenault MachinePointerInfo MPO;
72499c7e918SMatt Arsenault Register StackAddr = Handler.getStackAddress(
72599c7e918SMatt Arsenault MemTy.getSizeInBytes(), VA.getLocMemOffset(), MPO, Flags);
726fa0b93b5SMatt Arsenault
72799c7e918SMatt Arsenault Handler.assignValueToAddress(Args[i], Part, StackAddr, MemTy, MPO, VA);
728fbaf425bSAmara Emerson continue;
729fbaf425bSAmara Emerson }
730b98f902fSMatt Arsenault
73134471c30SMatt Arsenault if (VA.isMemLoc() && Flags.isByVal()) {
73261f834ccSMatt Arsenault assert(Args[i].Regs.size() == 1 &&
73361f834ccSMatt Arsenault "didn't expect split byval pointer");
73434471c30SMatt Arsenault
73561f834ccSMatt Arsenault if (Handler.isIncomingArgumentHandler()) {
73661f834ccSMatt Arsenault // We just need to copy the frame index value to the pointer.
73734471c30SMatt Arsenault MachinePointerInfo MPO;
7386b76d828SMatt Arsenault Register StackAddr = Handler.getStackAddress(
7396b76d828SMatt Arsenault Flags.getByValSize(), VA.getLocMemOffset(), MPO, Flags);
74034471c30SMatt Arsenault MIRBuilder.buildCopy(Args[i].Regs[0], StackAddr);
74161f834ccSMatt Arsenault } else {
74261f834ccSMatt Arsenault // For outgoing byval arguments, insert the implicit copy byval
74361f834ccSMatt Arsenault // implies, such that writes in the callee do not modify the caller's
74461f834ccSMatt Arsenault // value.
74561f834ccSMatt Arsenault uint64_t MemSize = Flags.getByValSize();
74661f834ccSMatt Arsenault int64_t Offset = VA.getLocMemOffset();
74761f834ccSMatt Arsenault
74861f834ccSMatt Arsenault MachinePointerInfo DstMPO;
74961f834ccSMatt Arsenault Register StackAddr =
75061f834ccSMatt Arsenault Handler.getStackAddress(MemSize, Offset, DstMPO, Flags);
75161f834ccSMatt Arsenault
752b9a03849SMatt Arsenault MachinePointerInfo SrcMPO(Args[i].OrigValue);
753b9a03849SMatt Arsenault if (!Args[i].OrigValue) {
754b9a03849SMatt Arsenault // We still need to accurately track the stack address space if we
755b9a03849SMatt Arsenault // don't know the underlying value.
75661f834ccSMatt Arsenault const LLT PtrTy = MRI.getType(StackAddr);
757b9a03849SMatt Arsenault SrcMPO = MachinePointerInfo(PtrTy.getAddressSpace());
758b9a03849SMatt Arsenault }
75961f834ccSMatt Arsenault
76061f834ccSMatt Arsenault Align DstAlign = std::max(Flags.getNonZeroByValAlign(),
76161f834ccSMatt Arsenault inferAlignFromPtrInfo(MF, DstMPO));
76261f834ccSMatt Arsenault
763b9a03849SMatt Arsenault Align SrcAlign = std::max(Flags.getNonZeroByValAlign(),
764b9a03849SMatt Arsenault inferAlignFromPtrInfo(MF, SrcMPO));
76561f834ccSMatt Arsenault
76661f834ccSMatt Arsenault Handler.copyArgumentMemory(Args[i], StackAddr, Args[i].Regs[0],
76761f834ccSMatt Arsenault DstMPO, DstAlign, SrcMPO, SrcAlign,
76861f834ccSMatt Arsenault MemSize, VA);
76961f834ccSMatt Arsenault }
77034471c30SMatt Arsenault continue;
77134471c30SMatt Arsenault }
77234471c30SMatt Arsenault
77334471c30SMatt Arsenault assert(!VA.needsCustom() && "custom loc should have been handled already");
774b98f902fSMatt Arsenault
7750d1308a7SNikita Popov if (i == 0 && !ThisReturnRegs.empty() &&
776ec41ed5bSAmara Emerson Handler.isIncomingArgumentHandler() &&
777fa0b93b5SMatt Arsenault isTypeIsValidForThisReturn(ValVT)) {
7780d1308a7SNikita Popov Handler.assignValueToReg(ArgReg, ThisReturnRegs[Part], VA);
779ec41ed5bSAmara Emerson continue;
780ec41ed5bSAmara Emerson }
781ec41ed5bSAmara Emerson
7828bde5e58SAmara Emerson if (Handler.isIncomingArgumentHandler())
783fd82cbcfSMatt Arsenault Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
7848bde5e58SAmara Emerson else {
7858bde5e58SAmara Emerson DelayedOutgoingRegAssignments.emplace_back([=, &Handler]() {
7868bde5e58SAmara Emerson Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
7878bde5e58SAmara Emerson });
7888bde5e58SAmara Emerson }
789b98f902fSMatt Arsenault }
790b98f902fSMatt Arsenault
791fd82cbcfSMatt Arsenault // Now that all pieces have been assigned, re-pack the register typed values
792fd82cbcfSMatt Arsenault // into the original value typed registers.
793fa0b93b5SMatt Arsenault if (Handler.isIncomingArgumentHandler() && OrigVT != LocVT) {
79462d946e1SMatt Arsenault // Merge the split registers into the expected larger result vregs of
79562d946e1SMatt Arsenault // the original call.
796fd82cbcfSMatt Arsenault buildCopyFromRegs(MIRBuilder, Args[i].OrigRegs, Args[i].Regs, OrigTy,
797fa0b93b5SMatt Arsenault LocTy, Args[i].Flags[0]);
798f11f042eSDiana Picus }
799b98f902fSMatt Arsenault
800620fdb96SMatt Arsenault j += NumParts - 1;
801b98f902fSMatt Arsenault }
8028bde5e58SAmara Emerson for (auto &Fn : DelayedOutgoingRegAssignments)
8038bde5e58SAmara Emerson Fn();
804b98f902fSMatt Arsenault
805f11f042eSDiana Picus return true;
806f11f042eSDiana Picus }
8072d9adbf5SDiana Picus
insertSRetLoads(MachineIRBuilder & MIRBuilder,Type * RetTy,ArrayRef<Register> VRegs,Register DemoteReg,int FI) const808d68458bdSChristudasan Devadasan void CallLowering::insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
809d68458bdSChristudasan Devadasan ArrayRef<Register> VRegs, Register DemoteReg,
810d68458bdSChristudasan Devadasan int FI) const {
811d68458bdSChristudasan Devadasan MachineFunction &MF = MIRBuilder.getMF();
812d68458bdSChristudasan Devadasan MachineRegisterInfo &MRI = MF.getRegInfo();
813d68458bdSChristudasan Devadasan const DataLayout &DL = MF.getDataLayout();
814d68458bdSChristudasan Devadasan
815d68458bdSChristudasan Devadasan SmallVector<EVT, 4> SplitVTs;
816d68458bdSChristudasan Devadasan SmallVector<uint64_t, 4> Offsets;
817d68458bdSChristudasan Devadasan ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
818d68458bdSChristudasan Devadasan
819d68458bdSChristudasan Devadasan assert(VRegs.size() == SplitVTs.size());
820d68458bdSChristudasan Devadasan
821d68458bdSChristudasan Devadasan unsigned NumValues = SplitVTs.size();
822d68458bdSChristudasan Devadasan Align BaseAlign = DL.getPrefTypeAlign(RetTy);
823d68458bdSChristudasan Devadasan Type *RetPtrTy = RetTy->getPointerTo(DL.getAllocaAddrSpace());
824d68458bdSChristudasan Devadasan LLT OffsetLLTy = getLLTForType(*DL.getIntPtrType(RetPtrTy), DL);
825d68458bdSChristudasan Devadasan
826d68458bdSChristudasan Devadasan MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
827d68458bdSChristudasan Devadasan
828d68458bdSChristudasan Devadasan for (unsigned I = 0; I < NumValues; ++I) {
829d68458bdSChristudasan Devadasan Register Addr;
830d68458bdSChristudasan Devadasan MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
831d68458bdSChristudasan Devadasan auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad,
83267d61324SMatt Arsenault MRI.getType(VRegs[I]),
833d68458bdSChristudasan Devadasan commonAlignment(BaseAlign, Offsets[I]));
834d68458bdSChristudasan Devadasan MIRBuilder.buildLoad(VRegs[I], Addr, *MMO);
835d68458bdSChristudasan Devadasan }
836d68458bdSChristudasan Devadasan }
837d68458bdSChristudasan Devadasan
insertSRetStores(MachineIRBuilder & MIRBuilder,Type * RetTy,ArrayRef<Register> VRegs,Register DemoteReg) const838d68458bdSChristudasan Devadasan void CallLowering::insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
839d68458bdSChristudasan Devadasan ArrayRef<Register> VRegs,
840d68458bdSChristudasan Devadasan Register DemoteReg) const {
841d68458bdSChristudasan Devadasan MachineFunction &MF = MIRBuilder.getMF();
842d68458bdSChristudasan Devadasan MachineRegisterInfo &MRI = MF.getRegInfo();
843d68458bdSChristudasan Devadasan const DataLayout &DL = MF.getDataLayout();
844d68458bdSChristudasan Devadasan
845d68458bdSChristudasan Devadasan SmallVector<EVT, 4> SplitVTs;
846d68458bdSChristudasan Devadasan SmallVector<uint64_t, 4> Offsets;
847d68458bdSChristudasan Devadasan ComputeValueVTs(*TLI, DL, RetTy, SplitVTs, &Offsets, 0);
848d68458bdSChristudasan Devadasan
849d68458bdSChristudasan Devadasan assert(VRegs.size() == SplitVTs.size());
850d68458bdSChristudasan Devadasan
851d68458bdSChristudasan Devadasan unsigned NumValues = SplitVTs.size();
852d68458bdSChristudasan Devadasan Align BaseAlign = DL.getPrefTypeAlign(RetTy);
853d68458bdSChristudasan Devadasan unsigned AS = DL.getAllocaAddrSpace();
854d68458bdSChristudasan Devadasan LLT OffsetLLTy =
855d68458bdSChristudasan Devadasan getLLTForType(*DL.getIntPtrType(RetTy->getPointerTo(AS)), DL);
856d68458bdSChristudasan Devadasan
857d68458bdSChristudasan Devadasan MachinePointerInfo PtrInfo(AS);
858d68458bdSChristudasan Devadasan
859d68458bdSChristudasan Devadasan for (unsigned I = 0; I < NumValues; ++I) {
860d68458bdSChristudasan Devadasan Register Addr;
861d68458bdSChristudasan Devadasan MIRBuilder.materializePtrAdd(Addr, DemoteReg, OffsetLLTy, Offsets[I]);
862d68458bdSChristudasan Devadasan auto *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
86367d61324SMatt Arsenault MRI.getType(VRegs[I]),
864d68458bdSChristudasan Devadasan commonAlignment(BaseAlign, Offsets[I]));
865d68458bdSChristudasan Devadasan MIRBuilder.buildStore(VRegs[I], Addr, *MMO);
866d68458bdSChristudasan Devadasan }
867d68458bdSChristudasan Devadasan }
868d68458bdSChristudasan Devadasan
insertSRetIncomingArgument(const Function & F,SmallVectorImpl<ArgInfo> & SplitArgs,Register & DemoteReg,MachineRegisterInfo & MRI,const DataLayout & DL) const869d68458bdSChristudasan Devadasan void CallLowering::insertSRetIncomingArgument(
870d68458bdSChristudasan Devadasan const Function &F, SmallVectorImpl<ArgInfo> &SplitArgs, Register &DemoteReg,
871d68458bdSChristudasan Devadasan MachineRegisterInfo &MRI, const DataLayout &DL) const {
872d68458bdSChristudasan Devadasan unsigned AS = DL.getAllocaAddrSpace();
873d68458bdSChristudasan Devadasan DemoteReg = MRI.createGenericVirtualRegister(
874d68458bdSChristudasan Devadasan LLT::pointer(AS, DL.getPointerSizeInBits(AS)));
875d68458bdSChristudasan Devadasan
876d68458bdSChristudasan Devadasan Type *PtrTy = PointerType::get(F.getReturnType(), AS);
877d68458bdSChristudasan Devadasan
878d68458bdSChristudasan Devadasan SmallVector<EVT, 1> ValueVTs;
879d68458bdSChristudasan Devadasan ComputeValueVTs(*TLI, DL, PtrTy, ValueVTs);
880d68458bdSChristudasan Devadasan
881d68458bdSChristudasan Devadasan // NOTE: Assume that a pointer won't get split into more than one VT.
882d68458bdSChristudasan Devadasan assert(ValueVTs.size() == 1);
883d68458bdSChristudasan Devadasan
8849b057f64SMatt Arsenault ArgInfo DemoteArg(DemoteReg, ValueVTs[0].getTypeForEVT(PtrTy->getContext()),
8859b057f64SMatt Arsenault ArgInfo::NoArgIndex);
886d68458bdSChristudasan Devadasan setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, F);
887d68458bdSChristudasan Devadasan DemoteArg.Flags[0].setSRet();
888d68458bdSChristudasan Devadasan SplitArgs.insert(SplitArgs.begin(), DemoteArg);
889d68458bdSChristudasan Devadasan }
890d68458bdSChristudasan Devadasan
insertSRetOutgoingArgument(MachineIRBuilder & MIRBuilder,const CallBase & CB,CallLoweringInfo & Info) const891d68458bdSChristudasan Devadasan void CallLowering::insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
892d68458bdSChristudasan Devadasan const CallBase &CB,
893d68458bdSChristudasan Devadasan CallLoweringInfo &Info) const {
894d68458bdSChristudasan Devadasan const DataLayout &DL = MIRBuilder.getDataLayout();
895d68458bdSChristudasan Devadasan Type *RetTy = CB.getType();
896d68458bdSChristudasan Devadasan unsigned AS = DL.getAllocaAddrSpace();
897d68458bdSChristudasan Devadasan LLT FramePtrTy = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
898d68458bdSChristudasan Devadasan
899d68458bdSChristudasan Devadasan int FI = MIRBuilder.getMF().getFrameInfo().CreateStackObject(
900d68458bdSChristudasan Devadasan DL.getTypeAllocSize(RetTy), DL.getPrefTypeAlign(RetTy), false);
901d68458bdSChristudasan Devadasan
902d68458bdSChristudasan Devadasan Register DemoteReg = MIRBuilder.buildFrameIndex(FramePtrTy, FI).getReg(0);
9039b057f64SMatt Arsenault ArgInfo DemoteArg(DemoteReg, PointerType::get(RetTy, AS),
9049b057f64SMatt Arsenault ArgInfo::NoArgIndex);
905d68458bdSChristudasan Devadasan setArgFlags(DemoteArg, AttributeList::ReturnIndex, DL, CB);
906d68458bdSChristudasan Devadasan DemoteArg.Flags[0].setSRet();
907d68458bdSChristudasan Devadasan
908d68458bdSChristudasan Devadasan Info.OrigArgs.insert(Info.OrigArgs.begin(), DemoteArg);
909d68458bdSChristudasan Devadasan Info.DemoteStackIndex = FI;
910d68458bdSChristudasan Devadasan Info.DemoteRegister = DemoteReg;
911d68458bdSChristudasan Devadasan }
912d68458bdSChristudasan Devadasan
checkReturn(CCState & CCInfo,SmallVectorImpl<BaseArgInfo> & Outs,CCAssignFn * Fn) const913d68458bdSChristudasan Devadasan bool CallLowering::checkReturn(CCState &CCInfo,
914d68458bdSChristudasan Devadasan SmallVectorImpl<BaseArgInfo> &Outs,
915d68458bdSChristudasan Devadasan CCAssignFn *Fn) const {
916d68458bdSChristudasan Devadasan for (unsigned I = 0, E = Outs.size(); I < E; ++I) {
917d68458bdSChristudasan Devadasan MVT VT = MVT::getVT(Outs[I].Ty);
918d68458bdSChristudasan Devadasan if (Fn(I, VT, VT, CCValAssign::Full, Outs[I].Flags[0], CCInfo))
919d68458bdSChristudasan Devadasan return false;
920d68458bdSChristudasan Devadasan }
921d68458bdSChristudasan Devadasan return true;
922d68458bdSChristudasan Devadasan }
923d68458bdSChristudasan Devadasan
getReturnInfo(CallingConv::ID CallConv,Type * RetTy,AttributeList Attrs,SmallVectorImpl<BaseArgInfo> & Outs,const DataLayout & DL) const924d68458bdSChristudasan Devadasan void CallLowering::getReturnInfo(CallingConv::ID CallConv, Type *RetTy,
925d68458bdSChristudasan Devadasan AttributeList Attrs,
926d68458bdSChristudasan Devadasan SmallVectorImpl<BaseArgInfo> &Outs,
927d68458bdSChristudasan Devadasan const DataLayout &DL) const {
928d68458bdSChristudasan Devadasan LLVMContext &Context = RetTy->getContext();
929d68458bdSChristudasan Devadasan ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
930d68458bdSChristudasan Devadasan
931d68458bdSChristudasan Devadasan SmallVector<EVT, 4> SplitVTs;
932d68458bdSChristudasan Devadasan ComputeValueVTs(*TLI, DL, RetTy, SplitVTs);
933d68458bdSChristudasan Devadasan addArgFlagsFromAttributes(Flags, Attrs, AttributeList::ReturnIndex);
934d68458bdSChristudasan Devadasan
935d68458bdSChristudasan Devadasan for (EVT VT : SplitVTs) {
936d68458bdSChristudasan Devadasan unsigned NumParts =
937d68458bdSChristudasan Devadasan TLI->getNumRegistersForCallingConv(Context, CallConv, VT);
938d68458bdSChristudasan Devadasan MVT RegVT = TLI->getRegisterTypeForCallingConv(Context, CallConv, VT);
939d68458bdSChristudasan Devadasan Type *PartTy = EVT(RegVT).getTypeForEVT(Context);
940d68458bdSChristudasan Devadasan
941d68458bdSChristudasan Devadasan for (unsigned I = 0; I < NumParts; ++I) {
942d68458bdSChristudasan Devadasan Outs.emplace_back(PartTy, Flags);
943d68458bdSChristudasan Devadasan }
944d68458bdSChristudasan Devadasan }
945d68458bdSChristudasan Devadasan }
946d68458bdSChristudasan Devadasan
checkReturnTypeForCallConv(MachineFunction & MF) const947d68458bdSChristudasan Devadasan bool CallLowering::checkReturnTypeForCallConv(MachineFunction &MF) const {
948d68458bdSChristudasan Devadasan const auto &F = MF.getFunction();
949d68458bdSChristudasan Devadasan Type *ReturnType = F.getReturnType();
950d68458bdSChristudasan Devadasan CallingConv::ID CallConv = F.getCallingConv();
951d68458bdSChristudasan Devadasan
952d68458bdSChristudasan Devadasan SmallVector<BaseArgInfo, 4> SplitArgs;
953d68458bdSChristudasan Devadasan getReturnInfo(CallConv, ReturnType, F.getAttributes(), SplitArgs,
954d68458bdSChristudasan Devadasan MF.getDataLayout());
955ae25a397SChristudasan Devadasan return canLowerReturn(MF, CallConv, SplitArgs, F.isVarArg());
956d68458bdSChristudasan Devadasan }
957d68458bdSChristudasan Devadasan
parametersInCSRMatch(const MachineRegisterInfo & MRI,const uint32_t * CallerPreservedMask,const SmallVectorImpl<CCValAssign> & OutLocs,const SmallVectorImpl<ArgInfo> & OutArgs) const95835c535a7SMatt Arsenault bool CallLowering::parametersInCSRMatch(
95935c535a7SMatt Arsenault const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask,
96035c535a7SMatt Arsenault const SmallVectorImpl<CCValAssign> &OutLocs,
96135c535a7SMatt Arsenault const SmallVectorImpl<ArgInfo> &OutArgs) const {
96235c535a7SMatt Arsenault for (unsigned i = 0; i < OutLocs.size(); ++i) {
963*9e6d1f4bSKazu Hirata const auto &ArgLoc = OutLocs[i];
96435c535a7SMatt Arsenault // If it's not a register, it's fine.
96535c535a7SMatt Arsenault if (!ArgLoc.isRegLoc())
96635c535a7SMatt Arsenault continue;
96735c535a7SMatt Arsenault
96835c535a7SMatt Arsenault MCRegister PhysReg = ArgLoc.getLocReg();
96935c535a7SMatt Arsenault
97035c535a7SMatt Arsenault // Only look at callee-saved registers.
97135c535a7SMatt Arsenault if (MachineOperand::clobbersPhysReg(CallerPreservedMask, PhysReg))
97235c535a7SMatt Arsenault continue;
97335c535a7SMatt Arsenault
97435c535a7SMatt Arsenault LLVM_DEBUG(
97535c535a7SMatt Arsenault dbgs()
97635c535a7SMatt Arsenault << "... Call has an argument passed in a callee-saved register.\n");
97735c535a7SMatt Arsenault
97835c535a7SMatt Arsenault // Check if it was copied from.
97935c535a7SMatt Arsenault const ArgInfo &OutInfo = OutArgs[i];
98035c535a7SMatt Arsenault
98135c535a7SMatt Arsenault if (OutInfo.Regs.size() > 1) {
98235c535a7SMatt Arsenault LLVM_DEBUG(
98335c535a7SMatt Arsenault dbgs() << "... Cannot handle arguments in multiple registers.\n");
98435c535a7SMatt Arsenault return false;
98535c535a7SMatt Arsenault }
98635c535a7SMatt Arsenault
98735c535a7SMatt Arsenault // Check if we copy the register, walking through copies from virtual
98835c535a7SMatt Arsenault // registers. Note that getDefIgnoringCopies does not ignore copies from
98935c535a7SMatt Arsenault // physical registers.
99035c535a7SMatt Arsenault MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
99135c535a7SMatt Arsenault if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
99235c535a7SMatt Arsenault LLVM_DEBUG(
99335c535a7SMatt Arsenault dbgs()
99435c535a7SMatt Arsenault << "... Parameter was not copied into a VReg, cannot tail call.\n");
99535c535a7SMatt Arsenault return false;
99635c535a7SMatt Arsenault }
99735c535a7SMatt Arsenault
99835c535a7SMatt Arsenault // Got a copy. Verify that it's the same as the register we want.
99935c535a7SMatt Arsenault Register CopyRHS = RegDef->getOperand(1).getReg();
100035c535a7SMatt Arsenault if (CopyRHS != PhysReg) {
100135c535a7SMatt Arsenault LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
100235c535a7SMatt Arsenault "VReg, cannot tail call.\n");
100335c535a7SMatt Arsenault return false;
100435c535a7SMatt Arsenault }
100535c535a7SMatt Arsenault }
100635c535a7SMatt Arsenault
100735c535a7SMatt Arsenault return true;
100835c535a7SMatt Arsenault }
100935c535a7SMatt Arsenault
resultsCompatible(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & InArgs,ValueAssigner & CalleeAssigner,ValueAssigner & CallerAssigner) const10102af5b193SJessica Paquette bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
10112af5b193SJessica Paquette MachineFunction &MF,
10122af5b193SJessica Paquette SmallVectorImpl<ArgInfo> &InArgs,
101324e2e5dfSMatt Arsenault ValueAssigner &CalleeAssigner,
101424e2e5dfSMatt Arsenault ValueAssigner &CallerAssigner) const {
10152af5b193SJessica Paquette const Function &F = MF.getFunction();
10162af5b193SJessica Paquette CallingConv::ID CalleeCC = Info.CallConv;
10172af5b193SJessica Paquette CallingConv::ID CallerCC = F.getCallingConv();
10182af5b193SJessica Paquette
10192af5b193SJessica Paquette if (CallerCC == CalleeCC)
10202af5b193SJessica Paquette return true;
10212af5b193SJessica Paquette
10222af5b193SJessica Paquette SmallVector<CCValAssign, 16> ArgLocs1;
10236f5ddf67SMatt Arsenault CCState CCInfo1(CalleeCC, Info.IsVarArg, MF, ArgLocs1, F.getContext());
102424e2e5dfSMatt Arsenault if (!determineAssignments(CalleeAssigner, InArgs, CCInfo1))
10252af5b193SJessica Paquette return false;
10262af5b193SJessica Paquette
10272af5b193SJessica Paquette SmallVector<CCValAssign, 16> ArgLocs2;
10286f5ddf67SMatt Arsenault CCState CCInfo2(CallerCC, F.isVarArg(), MF, ArgLocs2, F.getContext());
102924e2e5dfSMatt Arsenault if (!determineAssignments(CallerAssigner, InArgs, CCInfo2))
10302af5b193SJessica Paquette return false;
10312af5b193SJessica Paquette
10322af5b193SJessica Paquette // We need the argument locations to match up exactly. If there's more in
10332af5b193SJessica Paquette // one than the other, then we are done.
10342af5b193SJessica Paquette if (ArgLocs1.size() != ArgLocs2.size())
10352af5b193SJessica Paquette return false;
10362af5b193SJessica Paquette
10372af5b193SJessica Paquette // Make sure that each location is passed in exactly the same way.
10382af5b193SJessica Paquette for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
10392af5b193SJessica Paquette const CCValAssign &Loc1 = ArgLocs1[i];
10402af5b193SJessica Paquette const CCValAssign &Loc2 = ArgLocs2[i];
10412af5b193SJessica Paquette
10422af5b193SJessica Paquette // We need both of them to be the same. So if one is a register and one
10432af5b193SJessica Paquette // isn't, we're done.
10442af5b193SJessica Paquette if (Loc1.isRegLoc() != Loc2.isRegLoc())
10452af5b193SJessica Paquette return false;
10462af5b193SJessica Paquette
10472af5b193SJessica Paquette if (Loc1.isRegLoc()) {
10482af5b193SJessica Paquette // If they don't have the same register location, we're done.
10492af5b193SJessica Paquette if (Loc1.getLocReg() != Loc2.getLocReg())
10502af5b193SJessica Paquette return false;
10512af5b193SJessica Paquette
10522af5b193SJessica Paquette // They matched, so we can move to the next ArgLoc.
10532af5b193SJessica Paquette continue;
10542af5b193SJessica Paquette }
10552af5b193SJessica Paquette
10562af5b193SJessica Paquette // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
10572af5b193SJessica Paquette if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
10582af5b193SJessica Paquette return false;
10592af5b193SJessica Paquette }
10602af5b193SJessica Paquette
10612af5b193SJessica Paquette return true;
10622af5b193SJessica Paquette }
10632af5b193SJessica Paquette
getStackValueStoreType(const DataLayout & DL,const CCValAssign & VA,ISD::ArgFlagsTy Flags) const106499c7e918SMatt Arsenault LLT CallLowering::ValueHandler::getStackValueStoreType(
1065e91da668SMatt Arsenault const DataLayout &DL, const CCValAssign &VA, ISD::ArgFlagsTy Flags) const {
106699c7e918SMatt Arsenault const MVT ValVT = VA.getValVT();
1067e91da668SMatt Arsenault if (ValVT != MVT::iPTR) {
1068e91da668SMatt Arsenault LLT ValTy(ValVT);
1069fa0b93b5SMatt Arsenault
1070e91da668SMatt Arsenault // We lost the pointeriness going through CCValAssign, so try to restore it
1071e91da668SMatt Arsenault // based on the flags.
1072e91da668SMatt Arsenault if (Flags.isPointer()) {
1073e91da668SMatt Arsenault LLT PtrTy = LLT::pointer(Flags.getPointerAddrSpace(),
1074e91da668SMatt Arsenault ValTy.getScalarSizeInBits());
1075e91da668SMatt Arsenault if (ValVT.isVector())
1076e91da668SMatt Arsenault return LLT::vector(ValTy.getElementCount(), PtrTy);
1077e91da668SMatt Arsenault return PtrTy;
1078e91da668SMatt Arsenault }
1079e91da668SMatt Arsenault
1080e91da668SMatt Arsenault return ValTy;
1081e91da668SMatt Arsenault }
1082e91da668SMatt Arsenault
1083e91da668SMatt Arsenault unsigned AddrSpace = Flags.getPointerAddrSpace();
1084e91da668SMatt Arsenault return LLT::pointer(AddrSpace, DL.getPointerSize(AddrSpace));
1085fa0b93b5SMatt Arsenault }
1086fa0b93b5SMatt Arsenault
copyArgumentMemory(const ArgInfo & Arg,Register DstPtr,Register SrcPtr,const MachinePointerInfo & DstPtrInfo,Align DstAlign,const MachinePointerInfo & SrcPtrInfo,Align SrcAlign,uint64_t MemSize,CCValAssign & VA) const108761f834ccSMatt Arsenault void CallLowering::ValueHandler::copyArgumentMemory(
108861f834ccSMatt Arsenault const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
108961f834ccSMatt Arsenault const MachinePointerInfo &DstPtrInfo, Align DstAlign,
109061f834ccSMatt Arsenault const MachinePointerInfo &SrcPtrInfo, Align SrcAlign, uint64_t MemSize,
109161f834ccSMatt Arsenault CCValAssign &VA) const {
109261f834ccSMatt Arsenault MachineFunction &MF = MIRBuilder.getMF();
109361f834ccSMatt Arsenault MachineMemOperand *SrcMMO = MF.getMachineMemOperand(
109461f834ccSMatt Arsenault SrcPtrInfo,
109561f834ccSMatt Arsenault MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable, MemSize,
109661f834ccSMatt Arsenault SrcAlign);
109761f834ccSMatt Arsenault
109861f834ccSMatt Arsenault MachineMemOperand *DstMMO = MF.getMachineMemOperand(
109961f834ccSMatt Arsenault DstPtrInfo,
110061f834ccSMatt Arsenault MachineMemOperand::MOStore | MachineMemOperand::MODereferenceable,
110161f834ccSMatt Arsenault MemSize, DstAlign);
110261f834ccSMatt Arsenault
110361f834ccSMatt Arsenault const LLT PtrTy = MRI.getType(DstPtr);
110461f834ccSMatt Arsenault const LLT SizeTy = LLT::scalar(PtrTy.getSizeInBits());
110561f834ccSMatt Arsenault
110661f834ccSMatt Arsenault auto SizeConst = MIRBuilder.buildConstant(SizeTy, MemSize);
110761f834ccSMatt Arsenault MIRBuilder.buildMemCpy(DstPtr, SrcPtr, SizeConst, *DstMMO, *SrcMMO);
110861f834ccSMatt Arsenault }
110961f834ccSMatt Arsenault
extendRegister(Register ValReg,CCValAssign & VA,unsigned MaxSizeBits)1110faeaedf8SMatt Arsenault Register CallLowering::ValueHandler::extendRegister(Register ValReg,
1111dbb03567SAmara Emerson CCValAssign &VA,
1112dbb03567SAmara Emerson unsigned MaxSizeBits) {
11132d9adbf5SDiana Picus LLT LocTy{VA.getLocVT()};
1114fa0b93b5SMatt Arsenault LLT ValTy{VA.getValVT()};
1115fa0b93b5SMatt Arsenault
1116dbb03567SAmara Emerson if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
11172b523f81SAmara Emerson return ValReg;
1118dbb03567SAmara Emerson
1119dbb03567SAmara Emerson if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
1120dbb03567SAmara Emerson if (MaxSizeBits <= ValTy.getSizeInBits())
1121dbb03567SAmara Emerson return ValReg;
1122dbb03567SAmara Emerson LocTy = LLT::scalar(MaxSizeBits);
1123dbb03567SAmara Emerson }
1124dbb03567SAmara Emerson
1125e91da668SMatt Arsenault const LLT ValRegTy = MRI.getType(ValReg);
1126e91da668SMatt Arsenault if (ValRegTy.isPointer()) {
1127e91da668SMatt Arsenault // The x32 ABI wants to zero extend 32-bit pointers to 64-bit registers, so
1128e91da668SMatt Arsenault // we have to cast to do the extension.
1129e91da668SMatt Arsenault LLT IntPtrTy = LLT::scalar(ValRegTy.getSizeInBits());
1130e91da668SMatt Arsenault ValReg = MIRBuilder.buildPtrToInt(IntPtrTy, ValReg).getReg(0);
1131e91da668SMatt Arsenault }
1132e91da668SMatt Arsenault
11332d9adbf5SDiana Picus switch (VA.getLocInfo()) {
11342d9adbf5SDiana Picus default: break;
11352d9adbf5SDiana Picus case CCValAssign::Full:
11362d9adbf5SDiana Picus case CCValAssign::BCvt:
11372d9adbf5SDiana Picus // FIXME: bitconverting between vector types may or may not be a
11382d9adbf5SDiana Picus // nop in big-endian situations.
11392d9adbf5SDiana Picus return ValReg;
1140c3bfc81aSAditya Nandakumar case CCValAssign::AExt: {
1141c3bfc81aSAditya Nandakumar auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
1142b482e1bfSJay Foad return MIB.getReg(0);
1143c3bfc81aSAditya Nandakumar }
11442d9adbf5SDiana Picus case CCValAssign::SExt: {
11457e71902bSMatt Arsenault Register NewReg = MRI.createGenericVirtualRegister(LocTy);
11462d9adbf5SDiana Picus MIRBuilder.buildSExt(NewReg, ValReg);
11472d9adbf5SDiana Picus return NewReg;
11482d9adbf5SDiana Picus }
11492d9adbf5SDiana Picus case CCValAssign::ZExt: {
11507e71902bSMatt Arsenault Register NewReg = MRI.createGenericVirtualRegister(LocTy);
11512d9adbf5SDiana Picus MIRBuilder.buildZExt(NewReg, ValReg);
11522d9adbf5SDiana Picus return NewReg;
11532d9adbf5SDiana Picus }
11542d9adbf5SDiana Picus }
11552d9adbf5SDiana Picus llvm_unreachable("unable to extend register");
11562d9adbf5SDiana Picus }
1157a87b70d1SRichard Trieu
anchor()115824e2e5dfSMatt Arsenault void CallLowering::ValueAssigner::anchor() {}
115978dcff48SMatt Arsenault
buildExtensionHint(CCValAssign & VA,Register SrcReg,LLT NarrowTy)116078dcff48SMatt Arsenault Register CallLowering::IncomingValueHandler::buildExtensionHint(CCValAssign &VA,
116178dcff48SMatt Arsenault Register SrcReg,
116278dcff48SMatt Arsenault LLT NarrowTy) {
116378dcff48SMatt Arsenault switch (VA.getLocInfo()) {
116478dcff48SMatt Arsenault case CCValAssign::LocInfo::ZExt: {
116578dcff48SMatt Arsenault return MIRBuilder
116678dcff48SMatt Arsenault .buildAssertZExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
116778dcff48SMatt Arsenault NarrowTy.getScalarSizeInBits())
116878dcff48SMatt Arsenault .getReg(0);
116978dcff48SMatt Arsenault }
117078dcff48SMatt Arsenault case CCValAssign::LocInfo::SExt: {
117178dcff48SMatt Arsenault return MIRBuilder
117278dcff48SMatt Arsenault .buildAssertSExt(MRI.cloneVirtualRegister(SrcReg), SrcReg,
117378dcff48SMatt Arsenault NarrowTy.getScalarSizeInBits())
117478dcff48SMatt Arsenault .getReg(0);
117578dcff48SMatt Arsenault break;
117678dcff48SMatt Arsenault }
117778dcff48SMatt Arsenault default:
117878dcff48SMatt Arsenault return SrcReg;
117978dcff48SMatt Arsenault }
118078dcff48SMatt Arsenault }
118178dcff48SMatt Arsenault
1182fa0b93b5SMatt Arsenault /// Check if we can use a basic COPY instruction between the two types.
1183fa0b93b5SMatt Arsenault ///
1184fa0b93b5SMatt Arsenault /// We're currently building on top of the infrastructure using MVT, which loses
1185fa0b93b5SMatt Arsenault /// pointer information in the CCValAssign. We accept copies from physical
1186fa0b93b5SMatt Arsenault /// registers that have been reported as integers if it's to an equivalent sized
1187fa0b93b5SMatt Arsenault /// pointer LLT.
isCopyCompatibleType(LLT SrcTy,LLT DstTy)1188fa0b93b5SMatt Arsenault static bool isCopyCompatibleType(LLT SrcTy, LLT DstTy) {
1189fa0b93b5SMatt Arsenault if (SrcTy == DstTy)
1190fa0b93b5SMatt Arsenault return true;
1191fa0b93b5SMatt Arsenault
1192fa0b93b5SMatt Arsenault if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1193fa0b93b5SMatt Arsenault return false;
1194fa0b93b5SMatt Arsenault
1195fa0b93b5SMatt Arsenault SrcTy = SrcTy.getScalarType();
1196fa0b93b5SMatt Arsenault DstTy = DstTy.getScalarType();
1197fa0b93b5SMatt Arsenault
1198fa0b93b5SMatt Arsenault return (SrcTy.isPointer() && DstTy.isScalar()) ||
1199fa0b93b5SMatt Arsenault (DstTy.isScalar() && SrcTy.isPointer());
1200fa0b93b5SMatt Arsenault }
1201fa0b93b5SMatt Arsenault
assignValueToReg(Register ValVReg,Register PhysReg,CCValAssign VA)120278dcff48SMatt Arsenault void CallLowering::IncomingValueHandler::assignValueToReg(Register ValVReg,
120378dcff48SMatt Arsenault Register PhysReg,
12048bde5e58SAmara Emerson CCValAssign VA) {
1205fa0b93b5SMatt Arsenault const MVT LocVT = VA.getLocVT();
1206fa0b93b5SMatt Arsenault const LLT LocTy(LocVT);
1207fa0b93b5SMatt Arsenault const LLT RegTy = MRI.getType(ValVReg);
120878dcff48SMatt Arsenault
1209fa0b93b5SMatt Arsenault if (isCopyCompatibleType(RegTy, LocTy)) {
121078dcff48SMatt Arsenault MIRBuilder.buildCopy(ValVReg, PhysReg);
121178dcff48SMatt Arsenault return;
121278dcff48SMatt Arsenault }
121378dcff48SMatt Arsenault
121478dcff48SMatt Arsenault auto Copy = MIRBuilder.buildCopy(LocTy, PhysReg);
1215fa0b93b5SMatt Arsenault auto Hint = buildExtensionHint(VA, Copy.getReg(0), RegTy);
121678dcff48SMatt Arsenault MIRBuilder.buildTrunc(ValVReg, Hint);
121778dcff48SMatt Arsenault }
1218