1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the CCState class, used for lowering and implementing 11 // calling conventions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/Support/Debug.h" 20 #include "llvm/Support/ErrorHandling.h" 21 #include "llvm/Support/SaveAndRestore.h" 22 #include "llvm/Support/raw_ostream.h" 23 #include "llvm/Target/TargetLowering.h" 24 #include "llvm/Target/TargetRegisterInfo.h" 25 #include "llvm/Target/TargetSubtargetInfo.h" 26 #include <algorithm> 27 28 using namespace llvm; 29 30 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, 31 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C) 32 : CallingConv(CC), IsVarArg(isVarArg), MF(mf), 33 TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C), 34 CallOrPrologue(Unknown) { 35 // No stack is used. 36 StackOffset = 0; 37 MaxStackArgAlign = 1; 38 39 clearByValRegsInfo(); 40 UsedRegs.resize((TRI.getNumRegs()+31)/32); 41 } 42 43 /// Allocate space on the stack large enough to pass an argument by value. 44 /// The size and alignment information of the argument is encoded in 45 /// its parameter attribute. 46 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, 47 MVT LocVT, CCValAssign::LocInfo LocInfo, 48 int MinSize, int MinAlign, 49 ISD::ArgFlagsTy ArgFlags) { 50 unsigned Align = ArgFlags.getByValAlign(); 51 unsigned Size = ArgFlags.getByValSize(); 52 if (MinSize > (int)Size) 53 Size = MinSize; 54 if (MinAlign > (int)Align) 55 Align = MinAlign; 56 ensureMaxAlignment(Align); 57 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align); 58 Size = unsigned(alignTo(Size, MinAlign)); 59 unsigned Offset = AllocateStack(Size, Align); 60 addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 61 } 62 63 /// Mark a register and all of its aliases as allocated. 64 void CCState::MarkAllocated(unsigned Reg) { 65 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 66 UsedRegs[*AI/32] |= 1 << (*AI&31); 67 } 68 69 bool CCState::IsShadowAllocatedReg(unsigned Reg) const { 70 if (!isAllocated(Reg)) 71 return false; 72 73 for (auto const &ValAssign : Locs) { 74 if (ValAssign.isRegLoc()) { 75 for (MCRegAliasIterator AI(ValAssign.getLocReg(), &TRI, true); 76 AI.isValid(); ++AI) { 77 if (*AI == Reg) 78 return false; 79 } 80 } 81 } 82 return true; 83 } 84 85 /// Analyze an array of argument values, 86 /// incorporating info about the formals into this state. 87 void 88 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins, 89 CCAssignFn Fn) { 90 unsigned NumArgs = Ins.size(); 91 92 for (unsigned i = 0; i != NumArgs; ++i) { 93 MVT ArgVT = Ins[i].VT; 94 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 95 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 96 #ifndef NDEBUG 97 dbgs() << "Formal argument #" << i << " has unhandled type " 98 << EVT(ArgVT).getEVTString() << '\n'; 99 #endif 100 llvm_unreachable(nullptr); 101 } 102 } 103 } 104 105 /// Analyze the return values of a function, returning true if the return can 106 /// be performed without sret-demotion and false otherwise. 107 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 108 CCAssignFn Fn) { 109 // Determine which register each value should be copied into. 110 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 111 MVT VT = Outs[i].VT; 112 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 113 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) 114 return false; 115 } 116 return true; 117 } 118 119 /// Analyze the returned values of a return, 120 /// incorporating info about the result values into this state. 121 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 122 CCAssignFn Fn) { 123 // Determine which register each value should be copied into. 124 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 125 MVT VT = Outs[i].VT; 126 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 127 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) { 128 #ifndef NDEBUG 129 dbgs() << "Return operand #" << i << " has unhandled type " 130 << EVT(VT).getEVTString() << '\n'; 131 #endif 132 llvm_unreachable(nullptr); 133 } 134 } 135 } 136 137 /// Analyze the outgoing arguments to a call, 138 /// incorporating info about the passed values into this state. 139 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs, 140 CCAssignFn Fn) { 141 unsigned NumOps = Outs.size(); 142 for (unsigned i = 0; i != NumOps; ++i) { 143 MVT ArgVT = Outs[i].VT; 144 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 145 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 146 #ifndef NDEBUG 147 dbgs() << "Call operand #" << i << " has unhandled type " 148 << EVT(ArgVT).getEVTString() << '\n'; 149 #endif 150 llvm_unreachable(nullptr); 151 } 152 } 153 } 154 155 /// Same as above except it takes vectors of types and argument flags. 156 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs, 157 SmallVectorImpl<ISD::ArgFlagsTy> &Flags, 158 CCAssignFn Fn) { 159 unsigned NumOps = ArgVTs.size(); 160 for (unsigned i = 0; i != NumOps; ++i) { 161 MVT ArgVT = ArgVTs[i]; 162 ISD::ArgFlagsTy ArgFlags = Flags[i]; 163 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 164 #ifndef NDEBUG 165 dbgs() << "Call operand #" << i << " has unhandled type " 166 << EVT(ArgVT).getEVTString() << '\n'; 167 #endif 168 llvm_unreachable(nullptr); 169 } 170 } 171 } 172 173 /// Analyze the return values of a call, incorporating info about the passed 174 /// values into this state. 175 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, 176 CCAssignFn Fn) { 177 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 178 MVT VT = Ins[i].VT; 179 ISD::ArgFlagsTy Flags = Ins[i].Flags; 180 if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { 181 #ifndef NDEBUG 182 dbgs() << "Call result #" << i << " has unhandled type " 183 << EVT(VT).getEVTString() << '\n'; 184 #endif 185 llvm_unreachable(nullptr); 186 } 187 } 188 } 189 190 /// Same as above except it's specialized for calls that produce a single value. 191 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { 192 if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { 193 #ifndef NDEBUG 194 dbgs() << "Call result has unhandled type " 195 << EVT(VT).getEVTString() << '\n'; 196 #endif 197 llvm_unreachable(nullptr); 198 } 199 } 200 201 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) { 202 if (VT.isVector()) 203 return true; // Assume -msse-regparm might be in effect. 204 if (!VT.isInteger()) 205 return false; 206 if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall) 207 return true; 208 return false; 209 } 210 211 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, 212 MVT VT, CCAssignFn Fn) { 213 unsigned SavedStackOffset = StackOffset; 214 unsigned SavedMaxStackArgAlign = MaxStackArgAlign; 215 unsigned NumLocs = Locs.size(); 216 217 // Set the 'inreg' flag if it is used for this calling convention. 218 ISD::ArgFlagsTy Flags; 219 if (isValueTypeInRegForCC(CallingConv, VT)) 220 Flags.setInReg(); 221 222 // Allocate something of this value type repeatedly until we get assigned a 223 // location in memory. 224 bool HaveRegParm = true; 225 while (HaveRegParm) { 226 if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { 227 #ifndef NDEBUG 228 dbgs() << "Call has unhandled type " << EVT(VT).getEVTString() 229 << " while computing remaining regparms\n"; 230 #endif 231 llvm_unreachable(nullptr); 232 } 233 HaveRegParm = Locs.back().isRegLoc(); 234 } 235 236 // Copy all the registers from the value locations we added. 237 assert(NumLocs < Locs.size() && "CC assignment failed to add location"); 238 for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I) 239 if (Locs[I].isRegLoc()) 240 Regs.push_back(MCPhysReg(Locs[I].getLocReg())); 241 242 // Clear the assigned values and stack memory. We leave the registers marked 243 // as allocated so that future queries don't return the same registers, i.e. 244 // when i64 and f64 are both passed in GPRs. 245 StackOffset = SavedStackOffset; 246 MaxStackArgAlign = SavedMaxStackArgAlign; 247 Locs.resize(NumLocs); 248 } 249 250 void CCState::analyzeMustTailForwardedRegisters( 251 SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes, 252 CCAssignFn Fn) { 253 // Oftentimes calling conventions will not user register parameters for 254 // variadic functions, so we need to assume we're not variadic so that we get 255 // all the registers that might be used in a non-variadic call. 256 SaveAndRestore<bool> SavedVarArg(IsVarArg, false); 257 SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true); 258 259 for (MVT RegVT : RegParmTypes) { 260 SmallVector<MCPhysReg, 8> RemainingRegs; 261 getRemainingRegParmsForType(RemainingRegs, RegVT, Fn); 262 const TargetLowering *TL = MF.getSubtarget().getTargetLowering(); 263 const TargetRegisterClass *RC = TL->getRegClassFor(RegVT); 264 for (MCPhysReg PReg : RemainingRegs) { 265 unsigned VReg = MF.addLiveIn(PReg, RC); 266 Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT)); 267 } 268 } 269 } 270 271 bool CCState::resultsCompatible(CallingConv::ID CalleeCC, 272 CallingConv::ID CallerCC, MachineFunction &MF, 273 LLVMContext &C, 274 const SmallVectorImpl<ISD::InputArg> &Ins, 275 CCAssignFn CalleeFn, CCAssignFn CallerFn) { 276 if (CalleeCC == CallerCC) 277 return true; 278 SmallVector<CCValAssign, 4> RVLocs1; 279 CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C); 280 CCInfo1.AnalyzeCallResult(Ins, CalleeFn); 281 282 SmallVector<CCValAssign, 4> RVLocs2; 283 CCState CCInfo2(CallerCC, false, MF, RVLocs2, C); 284 CCInfo2.AnalyzeCallResult(Ins, CallerFn); 285 286 if (RVLocs1.size() != RVLocs2.size()) 287 return false; 288 for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) { 289 const CCValAssign &Loc1 = RVLocs1[I]; 290 const CCValAssign &Loc2 = RVLocs2[I]; 291 if (Loc1.getLocInfo() != Loc2.getLocInfo()) 292 return false; 293 bool RegLoc1 = Loc1.isRegLoc(); 294 if (RegLoc1 != Loc2.isRegLoc()) 295 return false; 296 if (RegLoc1) { 297 if (Loc1.getLocReg() != Loc2.getLocReg()) 298 return false; 299 } else { 300 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) 301 return false; 302 } 303 } 304 return true; 305 } 306