1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "MipsCallLowering.h"
16 #include "MipsCCState.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 
22 using namespace llvm;
23 
24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
25     : CallLowering(&TLI) {}
26 
27 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
28                                            const EVT &VT) {
29   if (VA.isRegLoc()) {
30     assignValueToReg(VReg, VA, VT);
31   } else if (VA.isMemLoc()) {
32     assignValueToAddress(VReg, VA);
33   } else {
34     return false;
35   }
36   return true;
37 }
38 
39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
40                                                 ArrayRef<CCValAssign> ArgLocs,
41                                                 unsigned ArgLocsStartIndex,
42                                                 const EVT &VT) {
43   for (unsigned i = 0; i < VRegs.size(); ++i)
44     if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT))
45       return false;
46   return true;
47 }
48 
49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
50     SmallVectorImpl<unsigned> &VRegs) {
51   if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
52     std::reverse(VRegs.begin(), VRegs.end());
53 }
54 
55 bool MipsCallLowering::MipsHandler::handle(
56     ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
57   SmallVector<unsigned, 4> VRegs;
58   unsigned SplitLength;
59   const Function &F = MIRBuilder.getMF().getFunction();
60   const DataLayout &DL = F.getParent()->getDataLayout();
61   const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>(
62       MIRBuilder.getMF().getSubtarget().getTargetLowering());
63 
64   for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size();
65        ++ArgsIndex, ArgLocsIndex += SplitLength) {
66     EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty);
67     SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(),
68                                                     F.getCallingConv(), VT);
69     if (SplitLength > 1) {
70       VRegs.clear();
71       MVT RegisterVT = TLI.getRegisterTypeForCallingConv(
72           F.getContext(), F.getCallingConv(), VT);
73       for (unsigned i = 0; i < SplitLength; ++i)
74         VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT}));
75 
76       if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg, VT))
77         return false;
78     } else {
79       if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex], VT))
80         return false;
81     }
82   }
83   return true;
84 }
85 
86 namespace {
87 class IncomingValueHandler : public MipsCallLowering::MipsHandler {
88 public:
89   IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
90       : MipsHandler(MIRBuilder, MRI) {}
91 
92 private:
93   void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
94                         const EVT &VT) override;
95 
96   unsigned getStackAddress(const CCValAssign &VA,
97                            MachineMemOperand *&MMO) override;
98 
99   void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
100 
101   bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
102                    ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
103                    unsigned ArgsReg, const EVT &VT) override;
104 
105   virtual void markPhysRegUsed(unsigned PhysReg) {
106     MIRBuilder.getMBB().addLiveIn(PhysReg);
107   }
108 
109   void buildLoad(unsigned Val, const CCValAssign &VA) {
110     MachineMemOperand *MMO;
111     unsigned Addr = getStackAddress(VA, MMO);
112     MIRBuilder.buildLoad(Val, Addr, *MMO);
113   }
114 };
115 
116 class CallReturnHandler : public IncomingValueHandler {
117 public:
118   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
119                     MachineInstrBuilder &MIB)
120       : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {}
121 
122 private:
123   void markPhysRegUsed(unsigned PhysReg) override {
124     MIB.addDef(PhysReg, RegState::Implicit);
125   }
126 
127   MachineInstrBuilder &MIB;
128 };
129 
130 } // end anonymous namespace
131 
132 void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
133                                             const CCValAssign &VA,
134                                             const EVT &VT) {
135   const MipsSubtarget &STI =
136       static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
137   unsigned PhysReg = VA.getLocReg();
138   if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
139     const MipsSubtarget &STI =
140         static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
141 
142     MIRBuilder
143         .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64
144                                     : Mips::BuildPairF64)
145         .addDef(ValVReg)
146         .addUse(PhysReg + (STI.isLittle() ? 0 : 1))
147         .addUse(PhysReg + (STI.isLittle() ? 1 : 0))
148         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
149                           *STI.getRegBankInfo());
150     markPhysRegUsed(PhysReg);
151     markPhysRegUsed(PhysReg + 1);
152   } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
153     MIRBuilder.buildInstr(Mips::MTC1)
154         .addDef(ValVReg)
155         .addUse(PhysReg)
156         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
157                           *STI.getRegBankInfo());
158     markPhysRegUsed(PhysReg);
159   } else {
160     switch (VA.getLocInfo()) {
161     case CCValAssign::LocInfo::SExt:
162     case CCValAssign::LocInfo::ZExt:
163     case CCValAssign::LocInfo::AExt: {
164       auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
165       MIRBuilder.buildTrunc(ValVReg, Copy);
166       break;
167     }
168     default:
169       MIRBuilder.buildCopy(ValVReg, PhysReg);
170       break;
171     }
172     markPhysRegUsed(PhysReg);
173   }
174 }
175 
176 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA,
177                                                MachineMemOperand *&MMO) {
178   MachineFunction &MF = MIRBuilder.getMF();
179   unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
180   unsigned Offset = VA.getLocMemOffset();
181   MachineFrameInfo &MFI = MF.getFrameInfo();
182 
183   int FI = MFI.CreateFixedObject(Size, Offset, true);
184   MachinePointerInfo MPO =
185       MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
186 
187   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
188   unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
189   MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align);
190 
191   unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32));
192   MIRBuilder.buildFrameIndex(AddrReg, FI);
193 
194   return AddrReg;
195 }
196 
197 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
198                                                 const CCValAssign &VA) {
199   if (VA.getLocInfo() == CCValAssign::SExt ||
200       VA.getLocInfo() == CCValAssign::ZExt ||
201       VA.getLocInfo() == CCValAssign::AExt) {
202     unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
203     buildLoad(LoadReg, VA);
204     MIRBuilder.buildTrunc(ValVReg, LoadReg);
205   } else
206     buildLoad(ValVReg, VA);
207 }
208 
209 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
210                                        ArrayRef<CCValAssign> ArgLocs,
211                                        unsigned ArgLocsStartIndex,
212                                        unsigned ArgsReg, const EVT &VT) {
213   if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
214     return false;
215   setLeastSignificantFirst(VRegs);
216   MIRBuilder.buildMerge(ArgsReg, VRegs);
217   return true;
218 }
219 
220 namespace {
221 class OutgoingValueHandler : public MipsCallLowering::MipsHandler {
222 public:
223   OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
224                        MachineInstrBuilder &MIB)
225       : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
226 
227 private:
228   void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
229                         const EVT &VT) override;
230 
231   unsigned getStackAddress(const CCValAssign &VA,
232                            MachineMemOperand *&MMO) override;
233 
234   void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
235 
236   bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
237                    ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
238                    unsigned ArgsReg, const EVT &VT) override;
239 
240   unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
241 
242   MachineInstrBuilder &MIB;
243 };
244 } // end anonymous namespace
245 
246 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
247                                             const CCValAssign &VA,
248                                             const EVT &VT) {
249   unsigned PhysReg = VA.getLocReg();
250   const MipsSubtarget &STI =
251       static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
252 
253   if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
254     MIRBuilder
255         .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
256                                     : Mips::ExtractElementF64)
257         .addDef(PhysReg + (STI.isLittle() ? 1 : 0))
258         .addUse(ValVReg)
259         .addImm(1)
260         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
261                           *STI.getRegBankInfo());
262     MIRBuilder
263         .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64
264                                     : Mips::ExtractElementF64)
265         .addDef(PhysReg + (STI.isLittle() ? 0 : 1))
266         .addUse(ValVReg)
267         .addImm(0)
268         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
269                           *STI.getRegBankInfo());
270   } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) {
271     MIRBuilder.buildInstr(Mips::MFC1)
272         .addDef(PhysReg)
273         .addUse(ValVReg)
274         .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
275                           *STI.getRegBankInfo());
276   } else {
277     unsigned ExtReg = extendRegister(ValVReg, VA);
278     MIRBuilder.buildCopy(PhysReg, ExtReg);
279     MIB.addUse(PhysReg, RegState::Implicit);
280   }
281 }
282 
283 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA,
284                                                MachineMemOperand *&MMO) {
285   MachineFunction &MF = MIRBuilder.getMF();
286   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
287 
288   LLT p0 = LLT::pointer(0, 32);
289   LLT s32 = LLT::scalar(32);
290   unsigned SPReg = MRI.createGenericVirtualRegister(p0);
291   MIRBuilder.buildCopy(SPReg, Mips::SP);
292 
293   unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
294   unsigned Offset = VA.getLocMemOffset();
295   MIRBuilder.buildConstant(OffsetReg, Offset);
296 
297   unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
298   MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
299 
300   MachinePointerInfo MPO =
301       MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
302   unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8;
303   unsigned Align = MinAlign(TFL->getStackAlignment(), Offset);
304   MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align);
305 
306   return AddrReg;
307 }
308 
309 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
310                                                 const CCValAssign &VA) {
311   MachineMemOperand *MMO;
312   unsigned Addr = getStackAddress(VA, MMO);
313   unsigned ExtReg = extendRegister(ValVReg, VA);
314   MIRBuilder.buildStore(ExtReg, Addr, *MMO);
315 }
316 
317 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
318                                               const CCValAssign &VA) {
319   LLT LocTy{VA.getLocVT()};
320   switch (VA.getLocInfo()) {
321   case CCValAssign::SExt: {
322     unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
323     MIRBuilder.buildSExt(ExtReg, ValReg);
324     return ExtReg;
325   }
326   case CCValAssign::ZExt: {
327     unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
328     MIRBuilder.buildZExt(ExtReg, ValReg);
329     return ExtReg;
330   }
331   case CCValAssign::AExt: {
332     unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
333     MIRBuilder.buildAnyExt(ExtReg, ValReg);
334     return ExtReg;
335   }
336   // TODO : handle upper extends
337   case CCValAssign::Full:
338     return ValReg;
339   default:
340     break;
341   }
342   llvm_unreachable("unable to extend register");
343 }
344 
345 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
346                                        ArrayRef<CCValAssign> ArgLocs,
347                                        unsigned ArgLocsStartIndex,
348                                        unsigned ArgsReg, const EVT &VT) {
349   MIRBuilder.buildUnmerge(VRegs, ArgsReg);
350   setLeastSignificantFirst(VRegs);
351   if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
352     return false;
353 
354   return true;
355 }
356 
357 static bool isSupportedType(Type *T) {
358   if (T->isIntegerTy())
359     return true;
360   if (T->isPointerTy())
361     return true;
362   if (T->isFloatingPointTy())
363     return true;
364   return false;
365 }
366 
367 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT,
368                                              const ISD::ArgFlagsTy &Flags) {
369   // > does not mean loss of information as type RegisterVT can't hold type VT,
370   // it means that type VT is split into multiple registers of type RegisterVT
371   if (VT.getSizeInBits() >= RegisterVT.getSizeInBits())
372     return CCValAssign::LocInfo::Full;
373   if (Flags.isSExt())
374     return CCValAssign::LocInfo::SExt;
375   if (Flags.isZExt())
376     return CCValAssign::LocInfo::ZExt;
377   return CCValAssign::LocInfo::AExt;
378 }
379 
380 template <typename T>
381 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs,
382                        const SmallVectorImpl<T> &Arguments) {
383   for (unsigned i = 0; i < ArgLocs.size(); ++i) {
384     const CCValAssign &VA = ArgLocs[i];
385     CCValAssign::LocInfo LocInfo = determineLocInfo(
386         Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags);
387     if (VA.isMemLoc())
388       ArgLocs[i] =
389           CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
390                               VA.getLocMemOffset(), VA.getLocVT(), LocInfo);
391     else
392       ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
393                                        VA.getLocReg(), VA.getLocVT(), LocInfo);
394   }
395 }
396 
397 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
398                                    const Value *Val,
399                                    ArrayRef<unsigned> VRegs) const {
400 
401   MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
402 
403   if (Val != nullptr && !isSupportedType(Val->getType()))
404     return false;
405 
406   if (!VRegs.empty()) {
407     MachineFunction &MF = MIRBuilder.getMF();
408     const Function &F = MF.getFunction();
409     const DataLayout &DL = MF.getDataLayout();
410     const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
411     LLVMContext &Ctx = Val->getType()->getContext();
412 
413     SmallVector<EVT, 4> SplitEVTs;
414     ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
415     assert(VRegs.size() == SplitEVTs.size() &&
416            "For each split Type there should be exactly one VReg.");
417 
418     SmallVector<ArgInfo, 8> RetInfos;
419     SmallVector<unsigned, 8> OrigArgIndices;
420 
421     for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
422       ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
423       setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
424       splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices);
425     }
426 
427     SmallVector<ISD::OutputArg, 8> Outs;
428     subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs);
429 
430     SmallVector<CCValAssign, 16> ArgLocs;
431     MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
432                        F.getContext());
433     CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn());
434     setLocInfo(ArgLocs, Outs);
435 
436     OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
437     if (!RetHandler.handle(ArgLocs, RetInfos)) {
438       return false;
439     }
440   }
441   MIRBuilder.insertInstr(Ret);
442   return true;
443 }
444 
445 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
446                                             const Function &F,
447                                             ArrayRef<unsigned> VRegs) const {
448 
449   // Quick exit if there aren't any args.
450   if (F.arg_empty())
451     return true;
452 
453   if (F.isVarArg()) {
454     return false;
455   }
456 
457   for (auto &Arg : F.args()) {
458     if (!isSupportedType(Arg.getType()))
459       return false;
460   }
461 
462   MachineFunction &MF = MIRBuilder.getMF();
463   const DataLayout &DL = MF.getDataLayout();
464   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
465 
466   SmallVector<ArgInfo, 8> ArgInfos;
467   SmallVector<unsigned, 8> OrigArgIndices;
468   unsigned i = 0;
469   for (auto &Arg : F.args()) {
470     ArgInfo AInfo(VRegs[i], Arg.getType());
471     setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F);
472     splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices);
473     ++i;
474   }
475 
476   SmallVector<ISD::InputArg, 8> Ins;
477   subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins);
478 
479   SmallVector<CCValAssign, 16> ArgLocs;
480   MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
481                      F.getContext());
482 
483   const MipsTargetMachine &TM =
484       static_cast<const MipsTargetMachine &>(MF.getTarget());
485   const MipsABIInfo &ABI = TM.getABI();
486   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
487                        1);
488   CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall());
489   setLocInfo(ArgLocs, Ins);
490 
491   IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo());
492   if (!Handler.handle(ArgLocs, ArgInfos))
493     return false;
494 
495   return true;
496 }
497 
498 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
499                                  CallingConv::ID CallConv,
500                                  const MachineOperand &Callee,
501                                  const ArgInfo &OrigRet,
502                                  ArrayRef<ArgInfo> OrigArgs) const {
503 
504   if (CallConv != CallingConv::C)
505     return false;
506 
507   for (auto &Arg : OrigArgs) {
508     if (!isSupportedType(Arg.Ty))
509       return false;
510     if (Arg.Flags.isByVal() || Arg.Flags.isSRet())
511       return false;
512   }
513   if (OrigRet.Reg && !isSupportedType(OrigRet.Ty))
514     return false;
515 
516   MachineFunction &MF = MIRBuilder.getMF();
517   const Function &F = MF.getFunction();
518   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
519   const MipsTargetMachine &TM =
520       static_cast<const MipsTargetMachine &>(MF.getTarget());
521   const MipsABIInfo &ABI = TM.getABI();
522 
523   MachineInstrBuilder CallSeqStart =
524       MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN);
525 
526   const bool IsCalleeGlobalPIC =
527       Callee.isGlobal() && TM.isPositionIndependent();
528 
529   MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(
530       Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL);
531   MIB.addDef(Mips::SP, RegState::Implicit);
532   if (IsCalleeGlobalPIC) {
533     unsigned CalleeReg =
534         MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32));
535     MachineInstr *CalleeGlobalValue =
536         MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal());
537     if (!Callee.getGlobal()->hasLocalLinkage())
538       CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL);
539     MIB.addUse(CalleeReg);
540   } else
541     MIB.add(Callee);
542   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
543   MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
544 
545   TargetLowering::ArgListTy FuncOrigArgs;
546   FuncOrigArgs.reserve(OrigArgs.size());
547 
548   SmallVector<ArgInfo, 8> ArgInfos;
549   SmallVector<unsigned, 8> OrigArgIndices;
550   unsigned i = 0;
551   for (auto &Arg : OrigArgs) {
552 
553     TargetLowering::ArgListEntry Entry;
554     Entry.Ty = Arg.Ty;
555     FuncOrigArgs.push_back(Entry);
556 
557     splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices);
558     ++i;
559   }
560 
561   SmallVector<ISD::OutputArg, 8> Outs;
562   subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs);
563 
564   SmallVector<CCValAssign, 8> ArgLocs;
565   MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
566                      F.getContext());
567 
568   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1);
569   const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr;
570   CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call);
571   setLocInfo(ArgLocs, Outs);
572 
573   OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
574   if (!RetHandler.handle(ArgLocs, ArgInfos)) {
575     return false;
576   }
577 
578   unsigned NextStackOffset = CCInfo.getNextStackOffset();
579   const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering();
580   unsigned StackAlignment = TFL->getStackAlignment();
581   NextStackOffset = alignTo(NextStackOffset, StackAlignment);
582   CallSeqStart.addImm(NextStackOffset).addImm(0);
583 
584   if (IsCalleeGlobalPIC) {
585     MIRBuilder.buildCopy(
586         Mips::GP,
587         MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel());
588     MIB.addDef(Mips::GP, RegState::Implicit);
589   }
590   MIRBuilder.insertInstr(MIB);
591   if (MIB->getOpcode() == Mips::JALRPseudo) {
592     const MipsSubtarget &STI =
593         static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
594     MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(),
595                          *STI.getRegBankInfo());
596   }
597 
598   if (OrigRet.Reg) {
599 
600     ArgInfos.clear();
601     SmallVector<unsigned, 8> OrigRetIndices;
602 
603     splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices);
604 
605     SmallVector<ISD::InputArg, 8> Ins;
606     subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins);
607 
608     SmallVector<CCValAssign, 8> ArgLocs;
609     MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs,
610                        F.getContext());
611 
612     CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call);
613     setLocInfo(ArgLocs, Ins);
614 
615     CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB);
616     if (!Handler.handle(ArgLocs, ArgInfos))
617       return false;
618   }
619 
620   MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0);
621 
622   return true;
623 }
624 
625 template <typename T>
626 void MipsCallLowering::subTargetRegTypeForCallingConv(
627     const Function &F, ArrayRef<ArgInfo> Args,
628     ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const {
629   const DataLayout &DL = F.getParent()->getDataLayout();
630   const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>();
631 
632   unsigned ArgNo = 0;
633   for (auto &Arg : Args) {
634 
635     EVT VT = TLI.getValueType(DL, Arg.Ty);
636     MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(),
637                                                        F.getCallingConv(), VT);
638     unsigned NumRegs = TLI.getNumRegistersForCallingConv(
639         F.getContext(), F.getCallingConv(), VT);
640 
641     for (unsigned i = 0; i < NumRegs; ++i) {
642       ISD::ArgFlagsTy Flags = Arg.Flags;
643 
644       if (i == 0)
645         Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL));
646       else
647         Flags.setOrigAlign(1);
648 
649       ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo],
650                            0);
651     }
652     ++ArgNo;
653   }
654 }
655 
656 void MipsCallLowering::splitToValueTypes(
657     const ArgInfo &OrigArg, unsigned OriginalIndex,
658     SmallVectorImpl<ArgInfo> &SplitArgs,
659     SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const {
660 
661   // TODO : perform structure and array split. For now we only deal with
662   // types that pass isSupportedType check.
663   SplitArgs.push_back(OrigArg);
664   SplitArgsOrigIndices.push_back(OriginalIndex);
665 }
666