1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MipsCallLowering.h" 16 #include "MipsCCState.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsTargetMachine.h" 19 #include "llvm/CodeGen/Analysis.h" 20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 21 22 using namespace llvm; 23 24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) 25 : CallLowering(&TLI) {} 26 27 bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA, 28 const EVT &VT) { 29 if (VA.isRegLoc()) { 30 assignValueToReg(VReg, VA, VT); 31 } else if (VA.isMemLoc()) { 32 assignValueToAddress(VReg, VA); 33 } else { 34 return false; 35 } 36 return true; 37 } 38 39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs, 40 ArrayRef<CCValAssign> ArgLocs, 41 unsigned ArgLocsStartIndex, 42 const EVT &VT) { 43 for (unsigned i = 0; i < VRegs.size(); ++i) 44 if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT)) 45 return false; 46 return true; 47 } 48 49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst( 50 SmallVectorImpl<Register> &VRegs) { 51 if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) 52 std::reverse(VRegs.begin(), VRegs.end()); 53 } 54 55 bool MipsCallLowering::MipsHandler::handle( 56 ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) { 57 SmallVector<Register, 4> VRegs; 58 unsigned SplitLength; 59 const Function &F = MIRBuilder.getMF().getFunction(); 60 const DataLayout &DL = F.getParent()->getDataLayout(); 61 const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>( 62 MIRBuilder.getMF().getSubtarget().getTargetLowering()); 63 64 for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size(); 65 ++ArgsIndex, ArgLocsIndex += SplitLength) { 66 EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty); 67 SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(), 68 F.getCallingConv(), VT); 69 assert(Args[ArgsIndex].Regs.size() == 1 && "Can't handle multple regs yet"); 70 71 if (SplitLength > 1) { 72 VRegs.clear(); 73 MVT RegisterVT = TLI.getRegisterTypeForCallingConv( 74 F.getContext(), F.getCallingConv(), VT); 75 for (unsigned i = 0; i < SplitLength; ++i) 76 VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT})); 77 78 if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Regs[0], 79 VT)) 80 return false; 81 } else { 82 if (!assign(Args[ArgsIndex].Regs[0], ArgLocs[ArgLocsIndex], VT)) 83 return false; 84 } 85 } 86 return true; 87 } 88 89 namespace { 90 class IncomingValueHandler : public MipsCallLowering::MipsHandler { 91 public: 92 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 93 : MipsHandler(MIRBuilder, MRI) {} 94 95 private: 96 void assignValueToReg(Register ValVReg, const CCValAssign &VA, 97 const EVT &VT) override; 98 99 Register getStackAddress(const CCValAssign &VA, 100 MachineMemOperand *&MMO) override; 101 102 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; 103 104 bool handleSplit(SmallVectorImpl<Register> &VRegs, 105 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 106 Register ArgsReg, const EVT &VT) override; 107 108 virtual void markPhysRegUsed(unsigned PhysReg) { 109 MIRBuilder.getMRI()->addLiveIn(PhysReg); 110 MIRBuilder.getMBB().addLiveIn(PhysReg); 111 } 112 113 MachineInstrBuilder buildLoad(const DstOp &Res, const CCValAssign &VA) { 114 MachineMemOperand *MMO; 115 Register Addr = getStackAddress(VA, MMO); 116 return MIRBuilder.buildLoad(Res, Addr, *MMO); 117 } 118 }; 119 120 class CallReturnHandler : public IncomingValueHandler { 121 public: 122 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 123 MachineInstrBuilder &MIB) 124 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 125 126 private: 127 void markPhysRegUsed(unsigned PhysReg) override { 128 MIB.addDef(PhysReg, RegState::Implicit); 129 } 130 131 MachineInstrBuilder &MIB; 132 }; 133 134 } // end anonymous namespace 135 136 void IncomingValueHandler::assignValueToReg(Register ValVReg, 137 const CCValAssign &VA, 138 const EVT &VT) { 139 const MipsSubtarget &STI = 140 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 141 Register PhysReg = VA.getLocReg(); 142 if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 143 const MipsSubtarget &STI = 144 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 145 146 MIRBuilder 147 .buildInstr(STI.isFP64bit() ? Mips::BuildPairF64_64 148 : Mips::BuildPairF64) 149 .addDef(ValVReg) 150 .addUse(PhysReg + (STI.isLittle() ? 0 : 1)) 151 .addUse(PhysReg + (STI.isLittle() ? 1 : 0)) 152 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 153 *STI.getRegBankInfo()); 154 markPhysRegUsed(PhysReg); 155 markPhysRegUsed(PhysReg + 1); 156 } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 157 MIRBuilder.buildInstr(Mips::MTC1) 158 .addDef(ValVReg) 159 .addUse(PhysReg) 160 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 161 *STI.getRegBankInfo()); 162 markPhysRegUsed(PhysReg); 163 } else { 164 switch (VA.getLocInfo()) { 165 case CCValAssign::LocInfo::SExt: 166 case CCValAssign::LocInfo::ZExt: 167 case CCValAssign::LocInfo::AExt: { 168 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 169 MIRBuilder.buildTrunc(ValVReg, Copy); 170 break; 171 } 172 default: 173 MIRBuilder.buildCopy(ValVReg, PhysReg); 174 break; 175 } 176 markPhysRegUsed(PhysReg); 177 } 178 } 179 180 Register IncomingValueHandler::getStackAddress(const CCValAssign &VA, 181 MachineMemOperand *&MMO) { 182 MachineFunction &MF = MIRBuilder.getMF(); 183 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 184 unsigned Offset = VA.getLocMemOffset(); 185 MachineFrameInfo &MFI = MF.getFrameInfo(); 186 187 int FI = MFI.CreateFixedObject(Size, Offset, true); 188 MachinePointerInfo MPO = 189 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 190 191 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 192 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 193 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align); 194 195 return MIRBuilder.buildFrameIndex(LLT::pointer(0, 32), FI).getReg(0); 196 } 197 198 void IncomingValueHandler::assignValueToAddress(Register ValVReg, 199 const CCValAssign &VA) { 200 if (VA.getLocInfo() == CCValAssign::SExt || 201 VA.getLocInfo() == CCValAssign::ZExt || 202 VA.getLocInfo() == CCValAssign::AExt) { 203 auto Load = buildLoad(LLT::scalar(32), VA); 204 MIRBuilder.buildTrunc(ValVReg, Load); 205 } else 206 buildLoad(ValVReg, VA); 207 } 208 209 bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs, 210 ArrayRef<CCValAssign> ArgLocs, 211 unsigned ArgLocsStartIndex, 212 Register ArgsReg, const EVT &VT) { 213 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) 214 return false; 215 setLeastSignificantFirst(VRegs); 216 MIRBuilder.buildMerge(ArgsReg, VRegs); 217 return true; 218 } 219 220 namespace { 221 class OutgoingValueHandler : public MipsCallLowering::MipsHandler { 222 public: 223 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 224 MachineInstrBuilder &MIB) 225 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} 226 227 private: 228 void assignValueToReg(Register ValVReg, const CCValAssign &VA, 229 const EVT &VT) override; 230 231 Register getStackAddress(const CCValAssign &VA, 232 MachineMemOperand *&MMO) override; 233 234 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; 235 236 bool handleSplit(SmallVectorImpl<Register> &VRegs, 237 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 238 Register ArgsReg, const EVT &VT) override; 239 240 Register extendRegister(Register ValReg, const CCValAssign &VA); 241 242 MachineInstrBuilder &MIB; 243 }; 244 } // end anonymous namespace 245 246 void OutgoingValueHandler::assignValueToReg(Register ValVReg, 247 const CCValAssign &VA, 248 const EVT &VT) { 249 Register PhysReg = VA.getLocReg(); 250 const MipsSubtarget &STI = 251 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 252 253 if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 254 MIRBuilder 255 .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64 256 : Mips::ExtractElementF64) 257 .addDef(PhysReg + (STI.isLittle() ? 1 : 0)) 258 .addUse(ValVReg) 259 .addImm(1) 260 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 261 *STI.getRegBankInfo()); 262 MIRBuilder 263 .buildInstr(STI.isFP64bit() ? Mips::ExtractElementF64_64 264 : Mips::ExtractElementF64) 265 .addDef(PhysReg + (STI.isLittle() ? 0 : 1)) 266 .addUse(ValVReg) 267 .addImm(0) 268 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 269 *STI.getRegBankInfo()); 270 } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 271 MIRBuilder.buildInstr(Mips::MFC1) 272 .addDef(PhysReg) 273 .addUse(ValVReg) 274 .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 275 *STI.getRegBankInfo()); 276 } else { 277 Register ExtReg = extendRegister(ValVReg, VA); 278 MIRBuilder.buildCopy(PhysReg, ExtReg); 279 MIB.addUse(PhysReg, RegState::Implicit); 280 } 281 } 282 283 Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA, 284 MachineMemOperand *&MMO) { 285 MachineFunction &MF = MIRBuilder.getMF(); 286 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 287 288 LLT p0 = LLT::pointer(0, 32); 289 LLT s32 = LLT::scalar(32); 290 auto SPReg = MIRBuilder.buildCopy(p0, Register(Mips::SP)); 291 292 unsigned Offset = VA.getLocMemOffset(); 293 auto OffsetReg = MIRBuilder.buildConstant(s32, Offset); 294 295 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg); 296 297 MachinePointerInfo MPO = 298 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 299 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 300 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 301 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align); 302 303 return AddrReg.getReg(0); 304 } 305 306 void OutgoingValueHandler::assignValueToAddress(Register ValVReg, 307 const CCValAssign &VA) { 308 MachineMemOperand *MMO; 309 Register Addr = getStackAddress(VA, MMO); 310 Register ExtReg = extendRegister(ValVReg, VA); 311 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 312 } 313 314 Register OutgoingValueHandler::extendRegister(Register ValReg, 315 const CCValAssign &VA) { 316 LLT LocTy{VA.getLocVT()}; 317 switch (VA.getLocInfo()) { 318 case CCValAssign::SExt: { 319 return MIRBuilder.buildSExt(LocTy, ValReg).getReg(0); 320 } 321 case CCValAssign::ZExt: { 322 return MIRBuilder.buildZExt(LocTy, ValReg).getReg(0); 323 } 324 case CCValAssign::AExt: { 325 return MIRBuilder.buildAnyExt(LocTy, ValReg).getReg(0); 326 } 327 // TODO : handle upper extends 328 case CCValAssign::Full: 329 return ValReg; 330 default: 331 break; 332 } 333 llvm_unreachable("unable to extend register"); 334 } 335 336 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs, 337 ArrayRef<CCValAssign> ArgLocs, 338 unsigned ArgLocsStartIndex, 339 Register ArgsReg, const EVT &VT) { 340 MIRBuilder.buildUnmerge(VRegs, ArgsReg); 341 setLeastSignificantFirst(VRegs); 342 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) 343 return false; 344 345 return true; 346 } 347 348 static bool isSupportedArgumentType(Type *T) { 349 if (T->isIntegerTy()) 350 return true; 351 if (T->isPointerTy()) 352 return true; 353 if (T->isFloatingPointTy()) 354 return true; 355 return false; 356 } 357 358 static bool isSupportedReturnType(Type *T) { 359 if (T->isIntegerTy()) 360 return true; 361 if (T->isPointerTy()) 362 return true; 363 if (T->isFloatingPointTy()) 364 return true; 365 if (T->isAggregateType()) 366 return true; 367 return false; 368 } 369 370 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, 371 const ISD::ArgFlagsTy &Flags) { 372 // > does not mean loss of information as type RegisterVT can't hold type VT, 373 // it means that type VT is split into multiple registers of type RegisterVT 374 if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) 375 return CCValAssign::LocInfo::Full; 376 if (Flags.isSExt()) 377 return CCValAssign::LocInfo::SExt; 378 if (Flags.isZExt()) 379 return CCValAssign::LocInfo::ZExt; 380 return CCValAssign::LocInfo::AExt; 381 } 382 383 template <typename T> 384 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, 385 const SmallVectorImpl<T> &Arguments) { 386 for (unsigned i = 0; i < ArgLocs.size(); ++i) { 387 const CCValAssign &VA = ArgLocs[i]; 388 CCValAssign::LocInfo LocInfo = determineLocInfo( 389 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); 390 if (VA.isMemLoc()) 391 ArgLocs[i] = 392 CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 393 VA.getLocMemOffset(), VA.getLocVT(), LocInfo); 394 else 395 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 396 VA.getLocReg(), VA.getLocVT(), LocInfo); 397 } 398 } 399 400 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 401 const Value *Val, 402 ArrayRef<Register> VRegs) const { 403 404 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); 405 406 if (Val != nullptr && !isSupportedReturnType(Val->getType())) 407 return false; 408 409 if (!VRegs.empty()) { 410 MachineFunction &MF = MIRBuilder.getMF(); 411 const Function &F = MF.getFunction(); 412 const DataLayout &DL = MF.getDataLayout(); 413 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 414 415 SmallVector<ArgInfo, 8> RetInfos; 416 SmallVector<unsigned, 8> OrigArgIndices; 417 418 ArgInfo ArgRetInfo(VRegs, Val->getType()); 419 setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F); 420 splitToValueTypes(DL, ArgRetInfo, 0, RetInfos, OrigArgIndices); 421 422 SmallVector<ISD::OutputArg, 8> Outs; 423 subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); 424 425 SmallVector<CCValAssign, 16> ArgLocs; 426 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 427 F.getContext()); 428 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); 429 setLocInfo(ArgLocs, Outs); 430 431 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); 432 if (!RetHandler.handle(ArgLocs, RetInfos)) { 433 return false; 434 } 435 } 436 MIRBuilder.insertInstr(Ret); 437 return true; 438 } 439 440 bool MipsCallLowering::lowerFormalArguments( 441 MachineIRBuilder &MIRBuilder, const Function &F, 442 ArrayRef<ArrayRef<Register>> VRegs) const { 443 444 // Quick exit if there aren't any args. 445 if (F.arg_empty()) 446 return true; 447 448 for (auto &Arg : F.args()) { 449 if (!isSupportedArgumentType(Arg.getType())) 450 return false; 451 } 452 453 MachineFunction &MF = MIRBuilder.getMF(); 454 const DataLayout &DL = MF.getDataLayout(); 455 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 456 457 SmallVector<ArgInfo, 8> ArgInfos; 458 SmallVector<unsigned, 8> OrigArgIndices; 459 unsigned i = 0; 460 for (auto &Arg : F.args()) { 461 ArgInfo AInfo(VRegs[i], Arg.getType()); 462 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); 463 ArgInfos.push_back(AInfo); 464 OrigArgIndices.push_back(i); 465 ++i; 466 } 467 468 SmallVector<ISD::InputArg, 8> Ins; 469 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); 470 471 SmallVector<CCValAssign, 16> ArgLocs; 472 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 473 F.getContext()); 474 475 const MipsTargetMachine &TM = 476 static_cast<const MipsTargetMachine &>(MF.getTarget()); 477 const MipsABIInfo &ABI = TM.getABI(); 478 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 479 1); 480 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); 481 setLocInfo(ArgLocs, Ins); 482 483 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); 484 if (!Handler.handle(ArgLocs, ArgInfos)) 485 return false; 486 487 if (F.isVarArg()) { 488 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs(); 489 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 490 491 int VaArgOffset; 492 unsigned RegSize = 4; 493 if (ArgRegs.size() == Idx) 494 VaArgOffset = alignTo(CCInfo.getNextStackOffset(), RegSize); 495 else { 496 VaArgOffset = 497 (int)ABI.GetCalleeAllocdArgSizeInBytes(CCInfo.getCallingConv()) - 498 (int)(RegSize * (ArgRegs.size() - Idx)); 499 } 500 501 MachineFrameInfo &MFI = MF.getFrameInfo(); 502 int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); 503 MF.getInfo<MipsFunctionInfo>()->setVarArgsFrameIndex(FI); 504 505 for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) { 506 MIRBuilder.getMBB().addLiveIn(ArgRegs[I]); 507 508 MachineInstrBuilder Copy = 509 MIRBuilder.buildCopy(LLT::scalar(RegSize * 8), Register(ArgRegs[I])); 510 FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); 511 MachinePointerInfo MPO = MachinePointerInfo::getFixedStack(MF, FI); 512 MachineInstrBuilder FrameIndex = 513 MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI); 514 MachineMemOperand *MMO = 515 MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, RegSize, 516 /* Alignment */ RegSize); 517 MIRBuilder.buildStore(Copy, FrameIndex, *MMO); 518 } 519 } 520 521 return true; 522 } 523 524 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 525 CallLoweringInfo &Info) const { 526 527 if (Info.CallConv != CallingConv::C) 528 return false; 529 530 for (auto &Arg : Info.OrigArgs) { 531 if (!isSupportedArgumentType(Arg.Ty)) 532 return false; 533 if (Arg.Flags[0].isByVal()) 534 return false; 535 if (Arg.Flags[0].isSRet() && !Arg.Ty->isPointerTy()) 536 return false; 537 } 538 539 if (!Info.OrigRet.Ty->isVoidTy() && !isSupportedReturnType(Info.OrigRet.Ty)) 540 return false; 541 542 MachineFunction &MF = MIRBuilder.getMF(); 543 const Function &F = MF.getFunction(); 544 const DataLayout &DL = MF.getDataLayout(); 545 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 546 const MipsTargetMachine &TM = 547 static_cast<const MipsTargetMachine &>(MF.getTarget()); 548 const MipsABIInfo &ABI = TM.getABI(); 549 550 MachineInstrBuilder CallSeqStart = 551 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); 552 553 const bool IsCalleeGlobalPIC = 554 Info.Callee.isGlobal() && TM.isPositionIndependent(); 555 556 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert( 557 Info.Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL); 558 MIB.addDef(Mips::SP, RegState::Implicit); 559 if (IsCalleeGlobalPIC) { 560 Register CalleeReg = 561 MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32)); 562 MachineInstr *CalleeGlobalValue = 563 MIRBuilder.buildGlobalValue(CalleeReg, Info.Callee.getGlobal()); 564 if (!Info.Callee.getGlobal()->hasLocalLinkage()) 565 CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL); 566 MIB.addUse(CalleeReg); 567 } else 568 MIB.add(Info.Callee); 569 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 570 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); 571 572 TargetLowering::ArgListTy FuncOrigArgs; 573 FuncOrigArgs.reserve(Info.OrigArgs.size()); 574 575 SmallVector<ArgInfo, 8> ArgInfos; 576 SmallVector<unsigned, 8> OrigArgIndices; 577 unsigned i = 0; 578 for (auto &Arg : Info.OrigArgs) { 579 580 TargetLowering::ArgListEntry Entry; 581 Entry.Ty = Arg.Ty; 582 FuncOrigArgs.push_back(Entry); 583 584 ArgInfos.push_back(Arg); 585 OrigArgIndices.push_back(i); 586 ++i; 587 } 588 589 SmallVector<ISD::OutputArg, 8> Outs; 590 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); 591 592 SmallVector<CCValAssign, 8> ArgLocs; 593 bool IsCalleeVarArg = false; 594 if (Info.Callee.isGlobal()) { 595 const Function *CF = static_cast<const Function *>(Info.Callee.getGlobal()); 596 IsCalleeVarArg = CF->isVarArg(); 597 } 598 MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs, 599 F.getContext()); 600 601 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1); 602 const char *Call = 603 Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr; 604 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); 605 setLocInfo(ArgLocs, Outs); 606 607 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); 608 if (!RetHandler.handle(ArgLocs, ArgInfos)) { 609 return false; 610 } 611 612 unsigned NextStackOffset = CCInfo.getNextStackOffset(); 613 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 614 unsigned StackAlignment = TFL->getStackAlignment(); 615 NextStackOffset = alignTo(NextStackOffset, StackAlignment); 616 CallSeqStart.addImm(NextStackOffset).addImm(0); 617 618 if (IsCalleeGlobalPIC) { 619 MIRBuilder.buildCopy( 620 Register(Mips::GP), 621 MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel()); 622 MIB.addDef(Mips::GP, RegState::Implicit); 623 } 624 MIRBuilder.insertInstr(MIB); 625 if (MIB->getOpcode() == Mips::JALRPseudo) { 626 const MipsSubtarget &STI = 627 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 628 MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 629 *STI.getRegBankInfo()); 630 } 631 632 if (!Info.OrigRet.Ty->isVoidTy()) { 633 ArgInfos.clear(); 634 SmallVector<unsigned, 8> OrigRetIndices; 635 636 splitToValueTypes(DL, Info.OrigRet, 0, ArgInfos, OrigRetIndices); 637 638 SmallVector<ISD::InputArg, 8> Ins; 639 subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); 640 641 SmallVector<CCValAssign, 8> ArgLocs; 642 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 643 F.getContext()); 644 645 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty, 646 Call); 647 setLocInfo(ArgLocs, Ins); 648 649 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); 650 if (!Handler.handle(ArgLocs, ArgInfos)) 651 return false; 652 } 653 654 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); 655 656 return true; 657 } 658 659 template <typename T> 660 void MipsCallLowering::subTargetRegTypeForCallingConv( 661 const Function &F, ArrayRef<ArgInfo> Args, 662 ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const { 663 const DataLayout &DL = F.getParent()->getDataLayout(); 664 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 665 666 unsigned ArgNo = 0; 667 for (auto &Arg : Args) { 668 669 EVT VT = TLI.getValueType(DL, Arg.Ty); 670 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), 671 F.getCallingConv(), VT); 672 unsigned NumRegs = TLI.getNumRegistersForCallingConv( 673 F.getContext(), F.getCallingConv(), VT); 674 675 for (unsigned i = 0; i < NumRegs; ++i) { 676 ISD::ArgFlagsTy Flags = Arg.Flags[0]; 677 678 if (i == 0) 679 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); 680 else 681 Flags.setOrigAlign(Align(1)); 682 683 ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 684 0); 685 } 686 ++ArgNo; 687 } 688 } 689 690 void MipsCallLowering::splitToValueTypes( 691 const DataLayout &DL, const ArgInfo &OrigArg, unsigned OriginalIndex, 692 SmallVectorImpl<ArgInfo> &SplitArgs, 693 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const { 694 695 SmallVector<EVT, 4> SplitEVTs; 696 SmallVector<Register, 4> SplitVRegs; 697 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 698 LLVMContext &Ctx = OrigArg.Ty->getContext(); 699 700 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitEVTs); 701 702 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 703 ArgInfo Info = ArgInfo{OrigArg.Regs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; 704 Info.Flags = OrigArg.Flags; 705 SplitArgs.push_back(Info); 706 SplitArgsOrigIndices.push_back(OriginalIndex); 707 } 708 } 709