1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 
21 using namespace llvm;
22 
23 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
24     : CallLowering(&TLI) {}
25 
26 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
27                                            const EVT &VT) {
28   if (VA.isRegLoc()) {
29     assignValueToReg(VReg, VA, VT);
30   } else if (VA.isMemLoc()) {
31     assignValueToAddress(VReg, VA);
32   } else {
33     return false;
34   }
35   return true;
36 }
37 
38 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
39                                                 ArrayRef<CCValAssign> ArgLocs,
40                                                 unsigned ArgLocsStartIndex,
41                                                 const EVT &VT) {
42   for (unsigned i = 0; i < VRegs.size(); ++i)
43     if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT))
44       return false;
45   return true;
46 }
47 
48 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
49     SmallVectorImpl<unsigned> &VRegs) {
50   if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
51     std::reverse(VRegs.begin(), VRegs.end());
52 }
53 
54 bool MipsCallLowering::MipsHandler::handle(
55     ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
56   SmallVector<unsigned, 4> VRegs;
57   unsigned SplitLength;
58   const Function &F = MIRBuilder.getMF().getFunction();
59   const DataLayout &DL = F.getParent()->getDataLayout();
60   const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
61       MIRBuilder.getMF().getSubtarget().getTargetLowering());
62 
63   for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
64        ++ArgsIndex, ArgLocsIndex += SplitLength) {
65     EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
66     SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
67                                                     F.getCallingConv(), VT);
68     if (SplitLength > 1) {
69       VRegs.clear();
70       MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
71           F.getContext(), F.getCallingConv(), VT);
72       for (unsigned i = 0; i < SplitLength; ++i)
73         VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
74 
75       if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg, VT))
76         return false;
77     } else {
78       if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex], VT))
79         return false;
80     }
81   }
82   return true;
83 }
84 
85 namespace {
86 class IncomingValueHandler : public MipsCallLowering::MipsHandler {
87 public:
88   IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
89       : MipsHandler(MIRBuilder, MRI) {}
90 
91 private:
92   void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
93                         const EVT &VT) override;
94 
95   unsigned getStackAddress(const CCValAssign &VA,
96                            MachineMemOperand *&MMO) override;
97 
98   void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
99 
100   bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
101                    ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
102                    unsigned ArgsReg, const EVT &VT) override;
103 
104   virtual void markPhysRegUsed(unsigned PhysReg) {
105     MIRBuilder.getMBB().addLiveIn(PhysReg);
106   }
107 
108   void buildLoad(unsigned Val, const CCValAssign &VA) {
109     MachineMemOperand *MMO;
110     unsigned Addr = getStackAddress(VA, MMO);
111     MIRBuilder.buildLoad(Val, Addr, *MMO);
112   }
113 };
114 
115 class CallReturnHandler : public IncomingValueHandler {
116 public:
117   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
118                     MachineInstrBuilder &MIB)
119       : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
120 
121 private:
122   void markPhysRegUsed(unsigned PhysReg) override {
123     MIB.addDef(PhysReg, RegState::Implicit);
124   }
125 
126   MachineInstrBuilder &MIB;
127 };
128 
129 } // end anonymous namespace
130 
131 void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
132                                             const CCValAssign &VA,
133                                             const EVT &VT) {
134   const MipsSubtarget &STI =
135       static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
136   unsigned PhysReg = VA.getLocReg();
137   if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
138     const MipsSubtarget &STI =
139         static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
140 
141     MIRBuilder
142         .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64
143                                     : Mips::BuildPairF64)
144         .addDef(ValVReg)
145         .addUse(PhysReg + (STI.isLittle() ? 0 : 1))
146         .addUse(PhysReg + (STI.isLittle() ? 1 : 0))
147         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
148                           *STI.getRegBankInfo());
149     markPhysRegUsed(PhysReg);
150     markPhysRegUsed(PhysReg + 1);
151   } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
152     MIRBuilder.buildInstr(Mips::MTC1)
153         .addDef(ValVReg)
154         .addUse(PhysReg)
155         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
156                           *STI.getRegBankInfo());
157     markPhysRegUsed(PhysReg);
158   } else {
159     switch (VA.getLocInfo()) {
160     case CCValAssign::LocInfo::SExt:
161     case CCValAssign::LocInfo::ZExt:
162     case CCValAssign::LocInfo::AExt: {
163       auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
164       MIRBuilder.buildTrunc(ValVReg, Copy);
165       break;
166     }
167     default:
168       MIRBuilder.buildCopy(ValVReg, PhysReg);
169       break;
170     }
171     markPhysRegUsed(PhysReg);
172   }
173 }
174 
175 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
176                                                MachineMemOperand *&MMO) {
177   MachineFunction &MF = MIRBuilder.getMF();
178   unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
179   unsigned Offset = VA.getLocMemOffset();
180   MachineFrameInfo &MFI = MF.getFrameInfo();
181 
182   int FI = MFI.CreateFixedObject(Size, Offset, true);
183   MachinePointerInfo MPO =
184       MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
185 
186   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
187   unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
188   MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
189 
190   unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
191   MIRBuilder.buildFrameIndex(AddrReg, FI);
192 
193   return AddrReg;
194 }
195 
196 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
197                                                 const CCValAssign &VA) {
198   if (VA.getLocInfo() == CCValAssign::SExt ||
199       VA.getLocInfo() == CCValAssign::ZExt ||
200       VA.getLocInfo() == CCValAssign::AExt) {
201     unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
202     buildLoad(LoadReg, VA);
203     MIRBuilder.buildTrunc(ValVReg, LoadReg);
204   } else
205     buildLoad(ValVReg, VA);
206 }
207 
208 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
209                                        ArrayRef<CCValAssign> ArgLocs,
210                                        unsigned ArgLocsStartIndex,
211                                        unsigned ArgsReg, const EVT &VT) {
212   if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
213     return false;
214   setLeastSignificantFirst(VRegs);
215   MIRBuilder.buildMerge(ArgsReg, VRegs);
216   return true;
217 }
218 
219 namespace {
220 class OutgoingValueHandler : public MipsCallLowering::MipsHandler {
221 public:
222   OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
223                        MachineInstrBuilder &MIB)
224       : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
225 
226 private:
227   void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
228                         const EVT &VT) override;
229 
230   unsigned getStackAddress(const CCValAssign &VA,
231                            MachineMemOperand *&MMO) override;
232 
233   void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
234 
235   bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
236                    ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
237                    unsigned ArgsReg, const EVT &VT) override;
238 
239   unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
240 
241   MachineInstrBuilder &MIB;
242 };
243 } // end anonymous namespace
244 
245 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
246                                             const CCValAssign &VA,
247                                             const EVT &VT) {
248   unsigned PhysReg = VA.getLocReg();
249   const MipsSubtarget &STI =
250       static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
251 
252   if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
253     MIRBuilder
254         .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
255                                     : Mips::ExtractElementF64)
256         .addDef(PhysReg + (STI.isLittle() ? 1 : 0))
257         .addUse(ValVReg)
258         .addImm(1)
259         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
260                           *STI.getRegBankInfo());
261     MIRBuilder
262         .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
263                                     : Mips::ExtractElementF64)
264         .addDef(PhysReg + (STI.isLittle() ? 0 : 1))
265         .addUse(ValVReg)
266         .addImm(0)
267         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
268                           *STI.getRegBankInfo());
269   } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
270     MIRBuilder.buildInstr(Mips::MFC1)
271         .addDef(PhysReg)
272         .addUse(ValVReg)
273         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
274                           *STI.getRegBankInfo());
275   } else {
276     unsigned ExtReg = extendRegister(ValVReg, VA);
277     MIRBuilder.buildCopy(PhysReg, ExtReg);
278     MIB.addUse(PhysReg, RegState::Implicit);
279   }
280 }
281 
282 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
283                                                MachineMemOperand *&MMO) {
284   MachineFunction &MF = MIRBuilder.getMF();
285   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
286 
287   LLT p0 = LLT::pointer(0, 32);
288   LLT s32 = LLT::scalar(32);
289   unsigned SPReg = MRI.createGenericVirtualRegister(p0);
290   MIRBuilder.buildCopy(SPReg, Mips::SP);
291 
292   unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
293   unsigned Offset = VA.getLocMemOffset();
294   MIRBuilder.buildConstant(OffsetReg, Offset);
295 
296   unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
297   MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
298 
299   MachinePointerInfo MPO =
300       MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
301   unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
302   unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
303   MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align);
304 
305   return AddrReg;
306 }
307 
308 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
309                                                 const CCValAssign &VA) {
310   MachineMemOperand *MMO;
311   unsigned Addr = getStackAddress(VA, MMO);
312   unsigned ExtReg = extendRegister(ValVReg, VA);
313   MIRBuilder.buildStore(ExtReg, Addr, *MMO);
314 }
315 
316 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
317                                               const CCValAssign &VA) {
318   LLT LocTy{VA.getLocVT()};
319   switch (VA.getLocInfo()) {
320   case CCValAssign::SExt: {
321     unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
322     MIRBuilder.buildSExt(ExtReg, ValReg);
323     return ExtReg;
324   }
325   case CCValAssign::ZExt: {
326     unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
327     MIRBuilder.buildZExt(ExtReg, ValReg);
328     return ExtReg;
329   }
330   case CCValAssign::AExt: {
331     unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
332     MIRBuilder.buildAnyExt(ExtReg, ValReg);
333     return ExtReg;
334   }
335   // TODO : handle upper extends
336   case CCValAssign::Full:
337     return ValReg;
338   default:
339     break;
340   }
341   llvm_unreachable("unable to extend register");
342 }
343 
344 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
345                                        ArrayRef<CCValAssign> ArgLocs,
346                                        unsigned ArgLocsStartIndex,
347                                        unsigned ArgsReg, const EVT &VT) {
348   MIRBuilder.buildUnmerge(VRegs, ArgsReg);
349   setLeastSignificantFirst(VRegs);
350   if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
351     return false;
352 
353   return true;
354 }
355 
356 static bool isSupportedType(Type *T) {
357   if (T->isIntegerTy())
358     return true;
359   if (T->isPointerTy())
360     return true;
361   if (T->isFloatingPointTy())
362     return true;
363   return false;
364 }
365 
366 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
367                                              const ISD::ArgFlagsTy &Flags) {
368   // > does not mean loss of information as type RegisterVT can't hold type VT,
369   // it means that type VT is split into multiple registers of type RegisterVT
370   if (VT.getSizeInBits() >= RegisterVT.getSizeInBits())
371     return CCValAssign::LocInfo::Full;
372   if (Flags.isSExt())
373     return CCValAssign::LocInfo::SExt;
374   if (Flags.isZExt())
375     return CCValAssign::LocInfo::ZExt;
376   return CCValAssign::LocInfo::AExt;
377 }
378 
379 template <typename T>
380 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs,
381                        const SmallVectorImpl<T> &Arguments) {
382   for (unsigned i = 0; i < ArgLocs.size(); ++i) {
383     const CCValAssign &VA = ArgLocs[i];
384     CCValAssign::LocInfo LocInfo = determineLocInfo(
385         Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
386     if (VA.isMemLoc())
387       ArgLocs[i] =
388           CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
389                               VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
390     else
391       ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
392                                        VA.getLocReg(), VA.getLocVT(), LocInfo);
393   }
394 }
395 
396 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
397                                    const Value *Val,
398                                    ArrayRef<unsigned> VRegs) const {
399 
400   MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
401 
402   if (Val != nullptr && !isSupportedType(Val->getType()))
403     return false;
404 
405   if (!VRegs.empty()) {
406     MachineFunction &MF = MIRBuilder.getMF();
407     const Function &F = MF.getFunction();
408     const DataLayout &DL = MF.getDataLayout();
409     const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
410     LLVMContext &Ctx = Val->getType()->getContext();
411 
412     SmallVector<EVT, 4> SplitEVTs;
413     ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
414     assert(VRegs.size() == SplitEVTs.size() &&
415            "For each split Type there should be exactly one VReg.");
416 
417     SmallVector<ArgInfo, 8> RetInfos;
418     SmallVector<unsigned, 8> OrigArgIndices;
419 
420     for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
421       ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
422       setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
423       splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices);
424     }
425 
426     SmallVector<ISD::OutputArg, 8> Outs;
427     subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
428 
429     SmallVector<CCValAssign, 16> ArgLocs;
430     MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
431                        F.getContext());
432     CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
433     setLocInfo(ArgLocs, Outs);
434 
435     OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
436     if (!RetHandler.handle(ArgLocs, RetInfos)) {
437       return false;
438     }
439   }
440   MIRBuilder.insertInstr(Ret);
441   return true;
442 }
443 
444 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
445                                             const Function &F,
446                                             ArrayRef<unsigned> VRegs) const {
447 
448   // Quick exit if there aren't any args.
449   if (F.arg_empty())
450     return true;
451 
452   if (F.isVarArg()) {
453     return false;
454   }
455 
456   for (auto &Arg : F.args()) {
457     if (!isSupportedType(Arg.getType()))
458       return false;
459   }
460 
461   MachineFunction &MF = MIRBuilder.getMF();
462   const DataLayout &DL = MF.getDataLayout();
463   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
464 
465   SmallVector<ArgInfo, 8> ArgInfos;
466   SmallVector<unsigned, 8> OrigArgIndices;
467   unsigned i = 0;
468   for (auto &Arg : F.args()) {
469     ArgInfo AInfo(VRegs[i], Arg.getType());
470     setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
471     splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices);
472     ++i;
473   }
474 
475   SmallVector<ISD::InputArg, 8> Ins;
476   subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins);
477 
478   SmallVector<CCValAssign, 16> ArgLocs;
479   MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
480                      F.getContext());
481 
482   const MipsTargetMachine &TM =
483       static_cast<const MipsTargetMachine &>(MF.getTarget());
484   const MipsABIInfo &ABI = TM.getABI();
485   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
486                        1);
487   CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
488   setLocInfo(ArgLocs, Ins);
489 
490   IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
491   if (!Handler.handle(ArgLocs, ArgInfos))
492     return false;
493 
494   return true;
495 }
496 
497 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
498                                  CallingConv::ID CallConv,
499                                  const MachineOperand &Callee,
500                                  const ArgInfo &OrigRet,
501                                  ArrayRef<ArgInfo> OrigArgs) const {
502 
503   if (CallConv != CallingConv::C)
504     return false;
505 
506   for (auto &Arg : OrigArgs) {
507     if (!isSupportedType(Arg.Ty))
508       return false;
509     if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
510       return false;
511   }
512   if (OrigRet.Reg && !isSupportedType(OrigRet.Ty))
513     return false;
514 
515   MachineFunction &MF = MIRBuilder.getMF();
516   const Function &F = MF.getFunction();
517   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
518   const MipsTargetMachine &TM =
519       static_cast<const MipsTargetMachine &>(MF.getTarget());
520   const MipsABIInfo &ABI = TM.getABI();
521 
522   MachineInstrBuilder CallSeqStart =
523       MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
524 
525   // FIXME: Add support for pic calling sequences, long call sequences for O32,
526   //       N32 and N64. First handle the case when Callee.isReg().
527   if (Callee.isReg())
528     return false;
529 
530   MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(Mips::JAL);
531   MIB.addDef(Mips::SP, RegState::Implicit);
532   MIB.add(Callee);
533   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
534   MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
535 
536   TargetLowering::ArgListTy FuncOrigArgs;
537   FuncOrigArgs.reserve(OrigArgs.size());
538 
539   SmallVector<ArgInfo, 8> ArgInfos;
540   SmallVector<unsigned, 8> OrigArgIndices;
541   unsigned i = 0;
542   for (auto &Arg : OrigArgs) {
543 
544     TargetLowering::ArgListEntry Entry;
545     Entry.Ty = Arg.Ty;
546     FuncOrigArgs.push_back(Entry);
547 
548     splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices);
549     ++i;
550   }
551 
552   SmallVector<ISD::OutputArg, 8> Outs;
553   subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
554 
555   SmallVector<CCValAssign, 8> ArgLocs;
556   MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
557                      F.getContext());
558 
559   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
560   const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr;
561   CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
562   setLocInfo(ArgLocs, Outs);
563 
564   OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
565   if (!RetHandler.handle(ArgLocs, ArgInfos)) {
566     return false;
567   }
568 
569   unsigned NextStackOffset = CCInfo.getNextStackOffset();
570   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
571   unsigned StackAlignment = TFL->getStackAlignment();
572   NextStackOffset = alignTo(NextStackOffset, StackAlignment);
573   CallSeqStart.addImm(NextStackOffset).addImm(0);
574 
575   MIRBuilder.insertInstr(MIB);
576 
577   if (OrigRet.Reg) {
578 
579     ArgInfos.clear();
580     SmallVector<unsigned, 8> OrigRetIndices;
581 
582     splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices);
583 
584     SmallVector<ISD::InputArg, 8> Ins;
585     subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
586 
587     SmallVector<CCValAssign, 8> ArgLocs;
588     MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
589                        F.getContext());
590 
591     CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call);
592     setLocInfo(ArgLocs, Ins);
593 
594     CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
595     if (!Handler.handle(ArgLocs, ArgInfos))
596       return false;
597   }
598 
599   MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0);
600 
601   return true;
602 }
603 
604 template <typename T>
605 void MipsCallLowering::subTargetRegTypeForCallingConv(
606     const Function &F, ArrayRef<ArgInfo> Args,
607     ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const {
608   const DataLayout &DL = F.getParent()->getDataLayout();
609   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
610 
611   unsigned ArgNo = 0;
612   for (auto &Arg : Args) {
613 
614     EVT VT = TLI.getValueType(DL, Arg.Ty);
615     MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
616                                                        F.getCallingConv(), VT);
617     unsigned NumRegs = TLI.getNumRegistersForCallingConv(
618         F.getContext(), F.getCallingConv(), VT);
619 
620     for (unsigned i = 0; i < NumRegs; ++i) {
621       ISD::ArgFlagsTy Flags = Arg.Flags;
622 
623       if (i == 0)
624         Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
625       else
626         Flags.setOrigAlign(1);
627 
628       ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
629                            0);
630     }
631     ++ArgNo;
632   }
633 }
634 
635 void MipsCallLowering::splitToValueTypes(
636     const ArgInfo &OrigArg, unsigned OriginalIndex,
637     SmallVectorImpl<ArgInfo> &SplitArgs,
638     SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
639 
640   // TODO : perform structure and array split. For now we only deal with
641   // types that pass isSupportedType check.
642   SplitArgs.push_back(OrigArg);
643   SplitArgsOrigIndices.push_back(OriginalIndex);
644 }
645