1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the CCState class, used for lowering and implementing
10 // calling conventions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/CallingConvLower.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineRegisterInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetRegisterInfo.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/SaveAndRestore.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include <algorithm>
26 
27 using namespace llvm;
28 
29 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
30                  SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
31     : CallingConv(CC), IsVarArg(isVarArg), MF(mf),
32       TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) {
33   // No stack is used.
34   StackOffset = 0;
35 
36   clearByValRegsInfo();
37   UsedRegs.resize((TRI.getNumRegs()+31)/32);
38 }
39 
40 /// Allocate space on the stack large enough to pass an argument by value.
41 /// The size and alignment information of the argument is encoded in
42 /// its parameter attribute.
43 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
44                           CCValAssign::LocInfo LocInfo, int MinSize,
45                           int MinAlignment, ISD::ArgFlagsTy ArgFlags) {
46   llvm::Align MinAlign(MinAlignment);
47   llvm::Align Align(ArgFlags.getByValAlign());
48   unsigned Size  = ArgFlags.getByValSize();
49   if (MinSize > (int)Size)
50     Size = MinSize;
51   if (MinAlign > Align)
52     Align = MinAlign;
53   ensureMaxAlignment(Align);
54   MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align.value());
55   Size = unsigned(alignTo(Size, MinAlign));
56   unsigned Offset = AllocateStack(Size, Align.value());
57   addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
58 }
59 
60 /// Mark a register and all of its aliases as allocated.
61 void CCState::MarkAllocated(unsigned Reg) {
62   for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
63     UsedRegs[*AI/32] |= 1 << (*AI&31);
64 }
65 
66 bool CCState::IsShadowAllocatedReg(unsigned Reg) const {
67   if (!isAllocated(Reg))
68     return false;
69 
70   for (auto const &ValAssign : Locs) {
71     if (ValAssign.isRegLoc()) {
72       for (MCRegAliasIterator AI(ValAssign.getLocReg(), &TRI, true);
73            AI.isValid(); ++AI) {
74         if (*AI == Reg)
75           return false;
76       }
77     }
78   }
79   return true;
80 }
81 
82 /// Analyze an array of argument values,
83 /// incorporating info about the formals into this state.
84 void
85 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
86                                 CCAssignFn Fn) {
87   unsigned NumArgs = Ins.size();
88 
89   for (unsigned i = 0; i != NumArgs; ++i) {
90     MVT ArgVT = Ins[i].VT;
91     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
92     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
93 #ifndef NDEBUG
94       dbgs() << "Formal argument #" << i << " has unhandled type "
95              << EVT(ArgVT).getEVTString() << '\n';
96 #endif
97       llvm_unreachable(nullptr);
98     }
99   }
100 }
101 
102 /// Analyze the return values of a function, returning true if the return can
103 /// be performed without sret-demotion and false otherwise.
104 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
105                           CCAssignFn Fn) {
106   // Determine which register each value should be copied into.
107   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
108     MVT VT = Outs[i].VT;
109     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
110     if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
111       return false;
112   }
113   return true;
114 }
115 
116 /// Analyze the returned values of a return,
117 /// incorporating info about the result values into this state.
118 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
119                             CCAssignFn Fn) {
120   // Determine which register each value should be copied into.
121   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
122     MVT VT = Outs[i].VT;
123     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
124     if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
125 #ifndef NDEBUG
126       dbgs() << "Return operand #" << i << " has unhandled type "
127              << EVT(VT).getEVTString() << '\n';
128 #endif
129       llvm_unreachable(nullptr);
130     }
131   }
132 }
133 
134 /// Analyze the outgoing arguments to a call,
135 /// incorporating info about the passed values into this state.
136 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
137                                   CCAssignFn Fn) {
138   unsigned NumOps = Outs.size();
139   for (unsigned i = 0; i != NumOps; ++i) {
140     MVT ArgVT = Outs[i].VT;
141     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
142     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
143 #ifndef NDEBUG
144       dbgs() << "Call operand #" << i << " has unhandled type "
145              << EVT(ArgVT).getEVTString() << '\n';
146 #endif
147       llvm_unreachable(nullptr);
148     }
149   }
150 }
151 
152 /// Same as above except it takes vectors of types and argument flags.
153 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
154                                   SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
155                                   CCAssignFn Fn) {
156   unsigned NumOps = ArgVTs.size();
157   for (unsigned i = 0; i != NumOps; ++i) {
158     MVT ArgVT = ArgVTs[i];
159     ISD::ArgFlagsTy ArgFlags = Flags[i];
160     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
161 #ifndef NDEBUG
162       dbgs() << "Call operand #" << i << " has unhandled type "
163              << EVT(ArgVT).getEVTString() << '\n';
164 #endif
165       llvm_unreachable(nullptr);
166     }
167   }
168 }
169 
170 /// Analyze the return values of a call, incorporating info about the passed
171 /// values into this state.
172 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
173                                 CCAssignFn Fn) {
174   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
175     MVT VT = Ins[i].VT;
176     ISD::ArgFlagsTy Flags = Ins[i].Flags;
177     if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
178 #ifndef NDEBUG
179       dbgs() << "Call result #" << i << " has unhandled type "
180              << EVT(VT).getEVTString() << '\n';
181 #endif
182       llvm_unreachable(nullptr);
183     }
184   }
185 }
186 
187 /// Same as above except it's specialized for calls that produce a single value.
188 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
189   if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
190 #ifndef NDEBUG
191     dbgs() << "Call result has unhandled type "
192            << EVT(VT).getEVTString() << '\n';
193 #endif
194     llvm_unreachable(nullptr);
195   }
196 }
197 
198 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
199   if (VT.isVector())
200     return true; // Assume -msse-regparm might be in effect.
201   if (!VT.isInteger())
202     return false;
203   if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
204     return true;
205   return false;
206 }
207 
208 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
209                                           MVT VT, CCAssignFn Fn) {
210   unsigned SavedStackOffset = StackOffset;
211   llvm::Align SavedMaxStackArgAlign = MaxStackArgAlign;
212   unsigned NumLocs = Locs.size();
213 
214   // Set the 'inreg' flag if it is used for this calling convention.
215   ISD::ArgFlagsTy Flags;
216   if (isValueTypeInRegForCC(CallingConv, VT))
217     Flags.setInReg();
218 
219   // Allocate something of this value type repeatedly until we get assigned a
220   // location in memory.
221   bool HaveRegParm = true;
222   while (HaveRegParm) {
223     if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
224 #ifndef NDEBUG
225       dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
226              << " while computing remaining regparms\n";
227 #endif
228       llvm_unreachable(nullptr);
229     }
230     HaveRegParm = Locs.back().isRegLoc();
231   }
232 
233   // Copy all the registers from the value locations we added.
234   assert(NumLocs < Locs.size() && "CC assignment failed to add location");
235   for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
236     if (Locs[I].isRegLoc())
237       Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
238 
239   // Clear the assigned values and stack memory. We leave the registers marked
240   // as allocated so that future queries don't return the same registers, i.e.
241   // when i64 and f64 are both passed in GPRs.
242   StackOffset = SavedStackOffset;
243   MaxStackArgAlign = SavedMaxStackArgAlign;
244   Locs.resize(NumLocs);
245 }
246 
247 void CCState::analyzeMustTailForwardedRegisters(
248     SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
249     CCAssignFn Fn) {
250   // Oftentimes calling conventions will not user register parameters for
251   // variadic functions, so we need to assume we're not variadic so that we get
252   // all the registers that might be used in a non-variadic call.
253   SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
254   SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true);
255 
256   for (MVT RegVT : RegParmTypes) {
257     SmallVector<MCPhysReg, 8> RemainingRegs;
258     getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
259     const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
260     const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
261     for (MCPhysReg PReg : RemainingRegs) {
262       unsigned VReg = MF.addLiveIn(PReg, RC);
263       Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
264     }
265   }
266 }
267 
268 bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
269                                 CallingConv::ID CallerCC, MachineFunction &MF,
270                                 LLVMContext &C,
271                                 const SmallVectorImpl<ISD::InputArg> &Ins,
272                                 CCAssignFn CalleeFn, CCAssignFn CallerFn) {
273   if (CalleeCC == CallerCC)
274     return true;
275   SmallVector<CCValAssign, 4> RVLocs1;
276   CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
277   CCInfo1.AnalyzeCallResult(Ins, CalleeFn);
278 
279   SmallVector<CCValAssign, 4> RVLocs2;
280   CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
281   CCInfo2.AnalyzeCallResult(Ins, CallerFn);
282 
283   if (RVLocs1.size() != RVLocs2.size())
284     return false;
285   for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) {
286     const CCValAssign &Loc1 = RVLocs1[I];
287     const CCValAssign &Loc2 = RVLocs2[I];
288     if (Loc1.getLocInfo() != Loc2.getLocInfo())
289       return false;
290     bool RegLoc1 = Loc1.isRegLoc();
291     if (RegLoc1 != Loc2.isRegLoc())
292       return false;
293     if (RegLoc1) {
294       if (Loc1.getLocReg() != Loc2.getLocReg())
295         return false;
296     } else {
297       if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
298         return false;
299     }
300   }
301   return true;
302 }
303