1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This file implements the lowering of LLVM calls to machine code calls for 12 /// GlobalISel. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "MipsCallLowering.h" 17 #include "MipsCCState.h" 18 #include "MipsTargetMachine.h" 19 #include "llvm/CodeGen/Analysis.h" 20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 21 22 using namespace llvm; 23 24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) 25 : CallLowering(&TLI) {} 26 27 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, 28 const CCValAssign &VA) { 29 if (VA.isRegLoc()) { 30 assignValueToReg(VReg, VA); 31 } else if (VA.isMemLoc()) { 32 assignValueToAddress(VReg, VA); 33 } else { 34 return false; 35 } 36 return true; 37 } 38 39 namespace { 40 class IncomingValueHandler : public MipsCallLowering::MipsHandler { 41 public: 42 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 43 : MipsHandler(MIRBuilder, MRI) {} 44 45 bool handle(ArrayRef<CCValAssign> ArgLocs, 46 ArrayRef<CallLowering::ArgInfo> Args); 47 48 private: 49 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; 50 51 unsigned getStackAddress(const CCValAssign &VA, 52 MachineMemOperand *&MMO) override; 53 54 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; 55 56 virtual void markPhysRegUsed(unsigned PhysReg) { 57 MIRBuilder.getMBB().addLiveIn(PhysReg); 58 } 59 60 void buildLoad(unsigned Val, const CCValAssign &VA) { 61 MachineMemOperand *MMO; 62 unsigned Addr = getStackAddress(VA, MMO); 63 MIRBuilder.buildLoad(Val, Addr, *MMO); 64 } 65 }; 66 67 class CallReturnHandler : public IncomingValueHandler { 68 public: 69 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 70 MachineInstrBuilder &MIB) 71 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 72 73 private: 74 void markPhysRegUsed(unsigned PhysReg) override { 75 MIB.addDef(PhysReg, RegState::Implicit); 76 } 77 78 MachineInstrBuilder &MIB; 79 }; 80 81 } // end anonymous namespace 82 83 void IncomingValueHandler::assignValueToReg(unsigned ValVReg, 84 const CCValAssign &VA) { 85 unsigned PhysReg = VA.getLocReg(); 86 switch (VA.getLocInfo()) { 87 case CCValAssign::LocInfo::SExt: 88 case CCValAssign::LocInfo::ZExt: 89 case CCValAssign::LocInfo::AExt: { 90 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 91 MIRBuilder.buildTrunc(ValVReg, Copy); 92 break; 93 } 94 default: 95 MIRBuilder.buildCopy(ValVReg, PhysReg); 96 break; 97 } 98 markPhysRegUsed(PhysReg); 99 } 100 101 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA, 102 MachineMemOperand *&MMO) { 103 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 104 unsigned Offset = VA.getLocMemOffset(); 105 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo(); 106 107 int FI = MFI.CreateFixedObject(Size, Offset, true); 108 MachinePointerInfo MPO = 109 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 110 MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOLoad, 111 Size, /* Alignment */ 0); 112 113 unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); 114 MIRBuilder.buildFrameIndex(AddrReg, FI); 115 116 return AddrReg; 117 } 118 119 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, 120 const CCValAssign &VA) { 121 if (VA.getLocInfo() == CCValAssign::SExt || 122 VA.getLocInfo() == CCValAssign::ZExt || 123 VA.getLocInfo() == CCValAssign::AExt) { 124 unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); 125 buildLoad(LoadReg, VA); 126 MIRBuilder.buildTrunc(ValVReg, LoadReg); 127 } else 128 buildLoad(ValVReg, VA); 129 } 130 131 bool IncomingValueHandler::handle(ArrayRef<CCValAssign> ArgLocs, 132 ArrayRef<CallLowering::ArgInfo> Args) { 133 for (unsigned i = 0, ArgsSize = Args.size(); i < ArgsSize; ++i) { 134 if (!assign(Args[i].Reg, ArgLocs[i])) 135 return false; 136 } 137 return true; 138 } 139 140 namespace { 141 class OutgoingValueHandler : public MipsCallLowering::MipsHandler { 142 public: 143 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 144 MachineInstrBuilder &MIB) 145 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} 146 147 bool handle(ArrayRef<CCValAssign> ArgLocs, 148 ArrayRef<CallLowering::ArgInfo> Args); 149 150 private: 151 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; 152 153 unsigned getStackAddress(const CCValAssign &VA, 154 MachineMemOperand *&MMO) override; 155 156 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; 157 158 unsigned extendRegister(unsigned ValReg, const CCValAssign &VA); 159 160 MachineInstrBuilder &MIB; 161 }; 162 } // end anonymous namespace 163 164 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg, 165 const CCValAssign &VA) { 166 unsigned PhysReg = VA.getLocReg(); 167 unsigned ExtReg = extendRegister(ValVReg, VA); 168 MIRBuilder.buildCopy(PhysReg, ExtReg); 169 MIB.addUse(PhysReg, RegState::Implicit); 170 } 171 172 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA, 173 MachineMemOperand *&MMO) { 174 LLT p0 = LLT::pointer(0, 32); 175 LLT s32 = LLT::scalar(32); 176 unsigned SPReg = MRI.createGenericVirtualRegister(p0); 177 MIRBuilder.buildCopy(SPReg, Mips::SP); 178 179 unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); 180 unsigned Offset = VA.getLocMemOffset(); 181 MIRBuilder.buildConstant(OffsetReg, Offset); 182 183 unsigned AddrReg = MRI.createGenericVirtualRegister(p0); 184 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); 185 186 MachinePointerInfo MPO = 187 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 188 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 189 MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOStore, 190 Size, /* Alignment */ 0); 191 192 return AddrReg; 193 } 194 195 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, 196 const CCValAssign &VA) { 197 MachineMemOperand *MMO; 198 unsigned Addr = getStackAddress(VA, MMO); 199 unsigned ExtReg = extendRegister(ValVReg, VA); 200 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 201 } 202 203 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg, 204 const CCValAssign &VA) { 205 LLT LocTy{VA.getLocVT()}; 206 switch (VA.getLocInfo()) { 207 case CCValAssign::SExt: { 208 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 209 MIRBuilder.buildSExt(ExtReg, ValReg); 210 return ExtReg; 211 } 212 case CCValAssign::ZExt: { 213 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 214 MIRBuilder.buildZExt(ExtReg, ValReg); 215 return ExtReg; 216 } 217 case CCValAssign::AExt: { 218 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 219 MIRBuilder.buildAnyExt(ExtReg, ValReg); 220 return ExtReg; 221 } 222 // TODO : handle upper extends 223 case CCValAssign::Full: 224 return ValReg; 225 default: 226 break; 227 } 228 llvm_unreachable("unable to extend register"); 229 } 230 231 bool OutgoingValueHandler::handle(ArrayRef<CCValAssign> ArgLocs, 232 ArrayRef<CallLowering::ArgInfo> Args) { 233 for (unsigned i = 0; i < Args.size(); ++i) { 234 if (!assign(Args[i].Reg, ArgLocs[i])) 235 return false; 236 } 237 return true; 238 } 239 240 static bool isSupportedType(Type *T) { 241 if (T->isIntegerTy() && T->getScalarSizeInBits() <= 32) 242 return true; 243 if (T->isPointerTy()) 244 return true; 245 return false; 246 } 247 248 CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, 249 const ISD::ArgFlagsTy &Flags) { 250 if (VT.getSizeInBits() == RegisterVT.getSizeInBits()) 251 return CCValAssign::LocInfo::Full; 252 if (Flags.isSExt()) 253 return CCValAssign::LocInfo::SExt; 254 if (Flags.isZExt()) 255 return CCValAssign::LocInfo::ZExt; 256 return CCValAssign::LocInfo::AExt; 257 } 258 259 template <typename T> 260 void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, 261 const SmallVectorImpl<T> &Arguments) { 262 for (unsigned i = 0; i < ArgLocs.size(); ++i) { 263 const CCValAssign &VA = ArgLocs[i]; 264 CCValAssign::LocInfo LocInfo = determineLocInfo( 265 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); 266 if (VA.isMemLoc()) 267 ArgLocs[i] = 268 CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 269 VA.getLocMemOffset(), VA.getLocVT(), LocInfo); 270 else 271 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 272 VA.getLocReg(), VA.getLocVT(), LocInfo); 273 } 274 } 275 276 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 277 const Value *Val, 278 ArrayRef<unsigned> VRegs) const { 279 280 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); 281 282 if (Val != nullptr && !isSupportedType(Val->getType())) 283 return false; 284 285 if (!VRegs.empty()) { 286 MachineFunction &MF = MIRBuilder.getMF(); 287 const Function &F = MF.getFunction(); 288 const DataLayout &DL = MF.getDataLayout(); 289 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 290 LLVMContext &Ctx = Val->getType()->getContext(); 291 292 SmallVector<EVT, 4> SplitEVTs; 293 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 294 assert(VRegs.size() == SplitEVTs.size() && 295 "For each split Type there should be exactly one VReg."); 296 297 SmallVector<ArgInfo, 8> RetInfos; 298 SmallVector<unsigned, 8> OrigArgIndices; 299 300 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 301 ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; 302 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); 303 splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices); 304 } 305 306 SmallVector<ISD::OutputArg, 8> Outs; 307 subTargetRegTypeForCallingConv( 308 MIRBuilder, RetInfos, OrigArgIndices, 309 [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, 310 unsigned origIdx, unsigned partOffs) { 311 Outs.emplace_back(flags, vt, argvt, used, origIdx, partOffs); 312 }); 313 314 SmallVector<CCValAssign, 16> ArgLocs; 315 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 316 F.getContext()); 317 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); 318 setLocInfo(ArgLocs, Outs); 319 320 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); 321 if (!RetHandler.handle(ArgLocs, RetInfos)) { 322 return false; 323 } 324 } 325 MIRBuilder.insertInstr(Ret); 326 return true; 327 } 328 329 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, 330 const Function &F, 331 ArrayRef<unsigned> VRegs) const { 332 333 // Quick exit if there aren't any args. 334 if (F.arg_empty()) 335 return true; 336 337 if (F.isVarArg()) { 338 return false; 339 } 340 341 for (auto &Arg : F.args()) { 342 if (!isSupportedType(Arg.getType())) 343 return false; 344 } 345 346 MachineFunction &MF = MIRBuilder.getMF(); 347 const DataLayout &DL = MF.getDataLayout(); 348 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 349 350 SmallVector<ArgInfo, 8> ArgInfos; 351 SmallVector<unsigned, 8> OrigArgIndices; 352 unsigned i = 0; 353 for (auto &Arg : F.args()) { 354 ArgInfo AInfo(VRegs[i], Arg.getType()); 355 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); 356 splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices); 357 ++i; 358 } 359 360 SmallVector<ISD::InputArg, 8> Ins; 361 subTargetRegTypeForCallingConv( 362 MIRBuilder, ArgInfos, OrigArgIndices, 363 [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, 364 unsigned partOffs) { 365 Ins.emplace_back(flags, vt, argvt, used, origIdx, partOffs); 366 }); 367 368 SmallVector<CCValAssign, 16> ArgLocs; 369 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 370 F.getContext()); 371 372 const MipsTargetMachine &TM = 373 static_cast<const MipsTargetMachine &>(MF.getTarget()); 374 const MipsABIInfo &ABI = TM.getABI(); 375 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 376 1); 377 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); 378 setLocInfo(ArgLocs, Ins); 379 380 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); 381 if (!Handler.handle(ArgLocs, ArgInfos)) 382 return false; 383 384 return true; 385 } 386 387 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 388 CallingConv::ID CallConv, 389 const MachineOperand &Callee, 390 const ArgInfo &OrigRet, 391 ArrayRef<ArgInfo> OrigArgs) const { 392 393 if (CallConv != CallingConv::C) 394 return false; 395 396 for (auto &Arg : OrigArgs) { 397 if (!isSupportedType(Arg.Ty)) 398 return false; 399 if (Arg.Flags.isByVal() || Arg.Flags.isSRet()) 400 return false; 401 } 402 if (OrigRet.Reg && !isSupportedType(OrigRet.Ty)) 403 return false; 404 405 MachineFunction &MF = MIRBuilder.getMF(); 406 const Function &F = MF.getFunction(); 407 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 408 const MipsTargetMachine &TM = 409 static_cast<const MipsTargetMachine &>(MF.getTarget()); 410 const MipsABIInfo &ABI = TM.getABI(); 411 412 MachineInstrBuilder CallSeqStart = 413 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); 414 415 // FIXME: Add support for pic calling sequences, long call sequences for O32, 416 // N32 and N64. First handle the case when Callee.isReg(). 417 if (Callee.isReg()) 418 return false; 419 420 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(Mips::JAL); 421 MIB.addDef(Mips::SP, RegState::Implicit); 422 MIB.add(Callee); 423 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 424 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); 425 426 TargetLowering::ArgListTy FuncOrigArgs; 427 FuncOrigArgs.reserve(OrigArgs.size()); 428 429 SmallVector<ArgInfo, 8> ArgInfos; 430 SmallVector<unsigned, 8> OrigArgIndices; 431 unsigned i = 0; 432 for (auto &Arg : OrigArgs) { 433 434 TargetLowering::ArgListEntry Entry; 435 Entry.Ty = Arg.Ty; 436 FuncOrigArgs.push_back(Entry); 437 438 splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices); 439 ++i; 440 } 441 442 SmallVector<ISD::OutputArg, 8> Outs; 443 subTargetRegTypeForCallingConv( 444 MIRBuilder, ArgInfos, OrigArgIndices, 445 [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, 446 unsigned partOffs) { 447 Outs.emplace_back(flags, vt, argvt, used, origIdx, partOffs); 448 }); 449 450 SmallVector<CCValAssign, 8> ArgLocs; 451 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 452 F.getContext()); 453 454 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); 455 const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr; 456 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); 457 setLocInfo(ArgLocs, Outs); 458 459 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); 460 if (!RetHandler.handle(ArgLocs, ArgInfos)) { 461 return false; 462 } 463 464 unsigned NextStackOffset = CCInfo.getNextStackOffset(); 465 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 466 unsigned StackAlignment = TFL->getStackAlignment(); 467 NextStackOffset = alignTo(NextStackOffset, StackAlignment); 468 CallSeqStart.addImm(NextStackOffset).addImm(0); 469 470 MIRBuilder.insertInstr(MIB); 471 472 if (OrigRet.Reg) { 473 474 ArgInfos.clear(); 475 SmallVector<unsigned, 8> OrigRetIndices; 476 477 splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); 478 479 SmallVector<ISD::InputArg, 8> Ins; 480 subTargetRegTypeForCallingConv( 481 MIRBuilder, ArgInfos, OrigRetIndices, 482 [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, 483 unsigned origIdx, unsigned partOffs) { 484 Ins.emplace_back(flags, vt, argvt, used, origIdx, partOffs); 485 }); 486 487 SmallVector<CCValAssign, 8> ArgLocs; 488 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 489 F.getContext()); 490 491 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call); 492 setLocInfo(ArgLocs, Ins); 493 494 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); 495 if (!Handler.handle(ArgLocs, ArgInfos)) 496 return false; 497 } 498 499 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); 500 501 return true; 502 } 503 504 void MipsCallLowering::subTargetRegTypeForCallingConv( 505 MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args, 506 ArrayRef<unsigned> OrigArgIndices, const FunTy &PushBack) const { 507 MachineFunction &MF = MIRBuilder.getMF(); 508 const Function &F = MF.getFunction(); 509 const DataLayout &DL = F.getParent()->getDataLayout(); 510 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 511 512 unsigned ArgNo = 0; 513 for (auto &Arg : Args) { 514 515 EVT VT = TLI.getValueType(DL, Arg.Ty); 516 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), 517 F.getCallingConv(), VT); 518 519 ISD::ArgFlagsTy Flags = Arg.Flags; 520 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); 521 522 PushBack(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 0); 523 524 ++ArgNo; 525 } 526 } 527 528 void MipsCallLowering::splitToValueTypes( 529 const ArgInfo &OrigArg, unsigned OriginalIndex, 530 SmallVectorImpl<ArgInfo> &SplitArgs, 531 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const { 532 533 // TODO : perform structure and array split. For now we only deal with 534 // types that pass isSupportedType check. 535 SplitArgs.push_back(OrigArg); 536 SplitArgsOrigIndices.push_back(OriginalIndex); 537 } 538