1 //===- llvm/lib/Target/X86/X86CallLowering.cpp - Call lowering ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "X86CallLowering.h" 16 #include "X86CallingConv.h" 17 #include "X86ISelLowering.h" 18 #include "X86InstrInfo.h" 19 #include "X86RegisterInfo.h" 20 #include "X86Subtarget.h" 21 #include "llvm/ADT/ArrayRef.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/CodeGen/Analysis.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/GlobalISel/Utils.h" 27 #include "llvm/CodeGen/LowLevelType.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineMemOperand.h" 33 #include "llvm/CodeGen/MachineOperand.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/TargetInstrInfo.h" 36 #include "llvm/CodeGen/TargetSubtargetInfo.h" 37 #include "llvm/CodeGen/ValueTypes.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/Value.h" 42 #include "llvm/MC/MCRegisterInfo.h" 43 #include "llvm/Support/LowLevelTypeImpl.h" 44 #include "llvm/Support/MachineValueType.h" 45 #include <cassert> 46 #include <cstdint> 47 48 using namespace llvm; 49 50 X86CallLowering::X86CallLowering(const X86TargetLowering &TLI) 51 : CallLowering(&TLI) {} 52 53 namespace { 54 55 struct X86OutgoingValueAssigner : public CallLowering::OutgoingValueAssigner { 56 private: 57 uint64_t StackSize = 0; 58 unsigned NumXMMRegs = 0; 59 60 public: 61 uint64_t getStackSize() { return StackSize; } 62 unsigned getNumXmmRegs() { return NumXMMRegs; } 63 64 X86OutgoingValueAssigner(CCAssignFn *AssignFn_) 65 : CallLowering::OutgoingValueAssigner(AssignFn_) {} 66 67 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT, 68 CCValAssign::LocInfo LocInfo, 69 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags, 70 CCState &State) override { 71 bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); 72 StackSize = State.getNextStackOffset(); 73 74 static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2, 75 X86::XMM3, X86::XMM4, X86::XMM5, 76 X86::XMM6, X86::XMM7}; 77 if (!Info.IsFixed) 78 NumXMMRegs = State.getFirstUnallocated(XMMArgRegs); 79 80 return Res; 81 } 82 }; 83 84 struct X86OutgoingValueHandler : public CallLowering::OutgoingValueHandler { 85 X86OutgoingValueHandler(MachineIRBuilder &MIRBuilder, 86 MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) 87 : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), 88 DL(MIRBuilder.getMF().getDataLayout()), 89 STI(MIRBuilder.getMF().getSubtarget<X86Subtarget>()) {} 90 91 Register getStackAddress(uint64_t Size, int64_t Offset, 92 MachinePointerInfo &MPO, 93 ISD::ArgFlagsTy Flags) override { 94 LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0)); 95 LLT SType = LLT::scalar(DL.getPointerSizeInBits(0)); 96 auto SPReg = 97 MIRBuilder.buildCopy(p0, STI.getRegisterInfo()->getStackRegister()); 98 99 auto OffsetReg = MIRBuilder.buildConstant(SType, Offset); 100 101 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg); 102 103 MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 104 return AddrReg.getReg(0); 105 } 106 107 void assignValueToReg(Register ValVReg, Register PhysReg, 108 CCValAssign VA) override { 109 MIB.addUse(PhysReg, RegState::Implicit); 110 Register ExtReg = extendRegister(ValVReg, VA); 111 MIRBuilder.buildCopy(PhysReg, ExtReg); 112 } 113 114 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 115 MachinePointerInfo &MPO, CCValAssign &VA) override { 116 MachineFunction &MF = MIRBuilder.getMF(); 117 Register ExtReg = extendRegister(ValVReg, VA); 118 119 auto *MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy, 120 inferAlignFromPtrInfo(MF, MPO)); 121 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 122 } 123 124 protected: 125 MachineInstrBuilder &MIB; 126 const DataLayout &DL; 127 const X86Subtarget &STI; 128 }; 129 130 } // end anonymous namespace 131 132 bool X86CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 133 const Value *Val, ArrayRef<Register> VRegs, 134 FunctionLoweringInfo &FLI) const { 135 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && 136 "Return value without a vreg"); 137 auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0); 138 139 if (!VRegs.empty()) { 140 MachineFunction &MF = MIRBuilder.getMF(); 141 const Function &F = MF.getFunction(); 142 MachineRegisterInfo &MRI = MF.getRegInfo(); 143 const DataLayout &DL = MF.getDataLayout(); 144 145 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0); 146 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F); 147 148 SmallVector<ArgInfo, 4> SplitRetInfos; 149 splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, F.getCallingConv()); 150 151 X86OutgoingValueAssigner Assigner(RetCC_X86); 152 X86OutgoingValueHandler Handler(MIRBuilder, MRI, MIB); 153 if (!determineAndHandleAssignments(Handler, Assigner, SplitRetInfos, 154 MIRBuilder, F.getCallingConv(), 155 F.isVarArg())) 156 return false; 157 } 158 159 MIRBuilder.insertInstr(MIB); 160 return true; 161 } 162 163 namespace { 164 165 struct X86IncomingValueHandler : public CallLowering::IncomingValueHandler { 166 X86IncomingValueHandler(MachineIRBuilder &MIRBuilder, 167 MachineRegisterInfo &MRI) 168 : IncomingValueHandler(MIRBuilder, MRI), 169 DL(MIRBuilder.getMF().getDataLayout()) {} 170 171 Register getStackAddress(uint64_t Size, int64_t Offset, 172 MachinePointerInfo &MPO, 173 ISD::ArgFlagsTy Flags) override { 174 auto &MFI = MIRBuilder.getMF().getFrameInfo(); 175 176 // Byval is assumed to be writable memory, but other stack passed arguments 177 // are not. 178 const bool IsImmutable = !Flags.isByVal(); 179 180 int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable); 181 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 182 183 return MIRBuilder 184 .buildFrameIndex(LLT::pointer(0, DL.getPointerSizeInBits(0)), FI) 185 .getReg(0); 186 } 187 188 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy, 189 MachinePointerInfo &MPO, CCValAssign &VA) override { 190 MachineFunction &MF = MIRBuilder.getMF(); 191 auto *MMO = MF.getMachineMemOperand( 192 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemTy, 193 inferAlignFromPtrInfo(MF, MPO)); 194 MIRBuilder.buildLoad(ValVReg, Addr, *MMO); 195 } 196 197 void assignValueToReg(Register ValVReg, Register PhysReg, 198 CCValAssign VA) override { 199 markPhysRegUsed(PhysReg); 200 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA); 201 } 202 203 /// How the physical register gets marked varies between formal 204 /// parameters (it's a basic-block live-in), and a call instruction 205 /// (it's an implicit-def of the BL). 206 virtual void markPhysRegUsed(unsigned PhysReg) = 0; 207 208 protected: 209 const DataLayout &DL; 210 }; 211 212 struct FormalArgHandler : public X86IncomingValueHandler { 213 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 214 : X86IncomingValueHandler(MIRBuilder, MRI) {} 215 216 void markPhysRegUsed(unsigned PhysReg) override { 217 MIRBuilder.getMRI()->addLiveIn(PhysReg); 218 MIRBuilder.getMBB().addLiveIn(PhysReg); 219 } 220 }; 221 222 struct CallReturnHandler : public X86IncomingValueHandler { 223 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 224 MachineInstrBuilder &MIB) 225 : X86IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 226 227 void markPhysRegUsed(unsigned PhysReg) override { 228 MIB.addDef(PhysReg, RegState::Implicit); 229 } 230 231 protected: 232 MachineInstrBuilder &MIB; 233 }; 234 235 } // end anonymous namespace 236 237 bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, 238 const Function &F, 239 ArrayRef<ArrayRef<Register>> VRegs, 240 FunctionLoweringInfo &FLI) const { 241 if (F.arg_empty()) 242 return true; 243 244 // TODO: handle variadic function 245 if (F.isVarArg()) 246 return false; 247 248 MachineFunction &MF = MIRBuilder.getMF(); 249 MachineRegisterInfo &MRI = MF.getRegInfo(); 250 auto DL = MF.getDataLayout(); 251 252 SmallVector<ArgInfo, 8> SplitArgs; 253 unsigned Idx = 0; 254 for (const auto &Arg : F.args()) { 255 // TODO: handle not simple cases. 256 if (Arg.hasAttribute(Attribute::ByVal) || 257 Arg.hasAttribute(Attribute::InReg) || 258 Arg.hasAttribute(Attribute::StructRet) || 259 Arg.hasAttribute(Attribute::SwiftSelf) || 260 Arg.hasAttribute(Attribute::SwiftError) || 261 Arg.hasAttribute(Attribute::Nest) || VRegs[Idx].size() > 1) 262 return false; 263 264 ArgInfo OrigArg(VRegs[Idx], Arg.getType(), Idx); 265 setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F); 266 splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv()); 267 Idx++; 268 } 269 270 MachineBasicBlock &MBB = MIRBuilder.getMBB(); 271 if (!MBB.empty()) 272 MIRBuilder.setInstr(*MBB.begin()); 273 274 X86OutgoingValueAssigner Assigner(CC_X86); 275 FormalArgHandler Handler(MIRBuilder, MRI); 276 if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder, 277 F.getCallingConv(), F.isVarArg())) 278 return false; 279 280 // Move back to the end of the basic block. 281 MIRBuilder.setMBB(MBB); 282 283 return true; 284 } 285 286 bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 287 CallLoweringInfo &Info) const { 288 MachineFunction &MF = MIRBuilder.getMF(); 289 const Function &F = MF.getFunction(); 290 MachineRegisterInfo &MRI = MF.getRegInfo(); 291 const DataLayout &DL = F.getParent()->getDataLayout(); 292 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); 293 const TargetInstrInfo &TII = *STI.getInstrInfo(); 294 const X86RegisterInfo *TRI = STI.getRegisterInfo(); 295 296 // Handle only Linux C, X86_64_SysV calling conventions for now. 297 if (!STI.isTargetLinux() || !(Info.CallConv == CallingConv::C || 298 Info.CallConv == CallingConv::X86_64_SysV)) 299 return false; 300 301 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 302 auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown); 303 304 // Create a temporarily-floating call instruction so we can add the implicit 305 // uses of arg registers. 306 bool Is64Bit = STI.is64Bit(); 307 unsigned CallOpc = Info.Callee.isReg() 308 ? (Is64Bit ? X86::CALL64r : X86::CALL32r) 309 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); 310 311 auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc) 312 .add(Info.Callee) 313 .addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv)); 314 315 SmallVector<ArgInfo, 8> SplitArgs; 316 for (const auto &OrigArg : Info.OrigArgs) { 317 318 // TODO: handle not simple cases. 319 if (OrigArg.Flags[0].isByVal()) 320 return false; 321 322 if (OrigArg.Regs.size() > 1) 323 return false; 324 325 splitToValueTypes(OrigArg, SplitArgs, DL, Info.CallConv); 326 } 327 // Do the actual argument marshalling. 328 X86OutgoingValueAssigner Assigner(CC_X86); 329 X86OutgoingValueHandler Handler(MIRBuilder, MRI, MIB); 330 if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder, 331 Info.CallConv, Info.IsVarArg)) 332 return false; 333 334 bool IsFixed = Info.OrigArgs.empty() ? true : Info.OrigArgs.back().IsFixed; 335 if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(Info.CallConv)) { 336 // From AMD64 ABI document: 337 // For calls that may call functions that use varargs or stdargs 338 // (prototype-less calls or calls to functions containing ellipsis (...) in 339 // the declaration) %al is used as hidden argument to specify the number 340 // of SSE registers used. The contents of %al do not need to match exactly 341 // the number of registers, but must be an ubound on the number of SSE 342 // registers used and is in the range 0 - 8 inclusive. 343 344 MIRBuilder.buildInstr(X86::MOV8ri) 345 .addDef(X86::AL) 346 .addImm(Assigner.getNumXmmRegs()); 347 MIB.addUse(X86::AL, RegState::Implicit); 348 } 349 350 // Now we can add the actual call instruction to the correct basic block. 351 MIRBuilder.insertInstr(MIB); 352 353 // If Callee is a reg, since it is used by a target specific 354 // instruction, it must have a register class matching the 355 // constraint of that instruction. 356 if (Info.Callee.isReg()) 357 MIB->getOperand(0).setReg(constrainOperandRegClass( 358 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), 359 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee, 360 0)); 361 362 // Finally we can copy the returned value back into its virtual-register. In 363 // symmetry with the arguments, the physical register must be an 364 // implicit-define of the call instruction. 365 366 if (!Info.OrigRet.Ty->isVoidTy()) { 367 if (Info.OrigRet.Regs.size() > 1) 368 return false; 369 370 SplitArgs.clear(); 371 SmallVector<Register, 8> NewRegs; 372 373 splitToValueTypes(Info.OrigRet, SplitArgs, DL, Info.CallConv); 374 375 X86OutgoingValueAssigner Assigner(RetCC_X86); 376 CallReturnHandler Handler(MIRBuilder, MRI, MIB); 377 if (!determineAndHandleAssignments(Handler, Assigner, SplitArgs, MIRBuilder, 378 Info.CallConv, Info.IsVarArg)) 379 return false; 380 381 if (!NewRegs.empty()) 382 MIRBuilder.buildMerge(Info.OrigRet.Regs[0], NewRegs); 383 } 384 385 CallSeqStart.addImm(Assigner.getStackSize()) 386 .addImm(0 /* see getFrameTotalSize */) 387 .addImm(0 /* see getFrameAdjustment */); 388 389 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 390 MIRBuilder.buildInstr(AdjStackUp) 391 .addImm(Assigner.getStackSize()) 392 .addImm(0 /* NumBytesForCalleeToPop */); 393 394 return true; 395 } 396