1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MipsCallLowering.h" 16 #include "MipsCCState.h" 17 #include "MipsMachineFunction.h" 18 #include "MipsTargetMachine.h" 19 #include "llvm/CodeGen/Analysis.h" 20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 21 22 using namespace llvm; 23 24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) 25 : CallLowering(&TLI) {} 26 27 bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA, 28 const EVT &VT) { 29 if (VA.isRegLoc()) { 30 assignValueToReg(VReg, VA, VT); 31 } else if (VA.isMemLoc()) { 32 assignValueToAddress(VReg, VA); 33 } else { 34 return false; 35 } 36 return true; 37 } 38 39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs, 40 ArrayRef<CCValAssign> ArgLocs, 41 unsigned ArgLocsStartIndex, 42 const EVT &VT) { 43 for (unsigned i = 0; i < VRegs.size(); ++i) 44 if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i], VT)) 45 return false; 46 return true; 47 } 48 49 void MipsCallLowering::MipsHandler::setLeastSignificantFirst( 50 SmallVectorImpl<Register> &VRegs) { 51 if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) 52 std::reverse(VRegs.begin(), VRegs.end()); 53 } 54 55 bool MipsCallLowering::MipsHandler::handle( 56 ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) { 57 SmallVector<Register, 4> VRegs; 58 unsigned SplitLength; 59 const Function &F = MIRBuilder.getMF().getFunction(); 60 const DataLayout &DL = F.getParent()->getDataLayout(); 61 const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>( 62 MIRBuilder.getMF().getSubtarget().getTargetLowering()); 63 64 for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size(); 65 ++ArgsIndex, ArgLocsIndex += SplitLength) { 66 EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty); 67 SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(), 68 F.getCallingConv(), VT); 69 assert(Args[ArgsIndex].Regs.size() == 1 && "Can't handle multple regs yet"); 70 71 if (SplitLength > 1) { 72 VRegs.clear(); 73 MVT RegisterVT = TLI.getRegisterTypeForCallingConv( 74 F.getContext(), F.getCallingConv(), VT); 75 for (unsigned i = 0; i < SplitLength; ++i) 76 VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT})); 77 78 if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Regs[0], 79 VT)) 80 return false; 81 } else { 82 if (!assign(Args[ArgsIndex].Regs[0], ArgLocs[ArgLocsIndex], VT)) 83 return false; 84 } 85 } 86 return true; 87 } 88 89 namespace { 90 class IncomingValueHandler : public MipsCallLowering::MipsHandler { 91 public: 92 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 93 : MipsHandler(MIRBuilder, MRI) {} 94 95 private: 96 void assignValueToReg(Register ValVReg, const CCValAssign &VA, 97 const EVT &VT) override; 98 99 Register getStackAddress(const CCValAssign &VA, 100 MachineMemOperand *&MMO) override; 101 102 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; 103 104 bool handleSplit(SmallVectorImpl<Register> &VRegs, 105 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 106 Register ArgsReg, const EVT &VT) override; 107 108 virtual void markPhysRegUsed(unsigned PhysReg) { 109 MIRBuilder.getMRI()->addLiveIn(PhysReg); 110 MIRBuilder.getMBB().addLiveIn(PhysReg); 111 } 112 113 MachineInstrBuilder buildLoad(const DstOp &Res, const CCValAssign &VA) { 114 MachineMemOperand *MMO; 115 Register Addr = getStackAddress(VA, MMO); 116 return MIRBuilder.buildLoad(Res, Addr, *MMO); 117 } 118 }; 119 120 class CallReturnHandler : public IncomingValueHandler { 121 public: 122 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 123 MachineInstrBuilder &MIB) 124 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 125 126 private: 127 void markPhysRegUsed(unsigned PhysReg) override { 128 MIB.addDef(PhysReg, RegState::Implicit); 129 } 130 131 MachineInstrBuilder &MIB; 132 }; 133 134 } // end anonymous namespace 135 136 void IncomingValueHandler::assignValueToReg(Register ValVReg, 137 const CCValAssign &VA, 138 const EVT &VT) { 139 Register PhysReg = VA.getLocReg(); 140 if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 141 const MipsSubtarget &STI = 142 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 143 bool IsEL = STI.isLittle(); 144 LLT s32 = LLT::scalar(32); 145 auto Lo = MIRBuilder.buildCopy(s32, Register(PhysReg + (IsEL ? 0 : 1))); 146 auto Hi = MIRBuilder.buildCopy(s32, Register(PhysReg + (IsEL ? 1 : 0))); 147 MIRBuilder.buildMerge(ValVReg, {Lo, Hi}); 148 markPhysRegUsed(PhysReg); 149 markPhysRegUsed(PhysReg + 1); 150 } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 151 MIRBuilder.buildCopy(ValVReg, PhysReg); 152 markPhysRegUsed(PhysReg); 153 } else { 154 switch (VA.getLocInfo()) { 155 case CCValAssign::LocInfo::SExt: 156 case CCValAssign::LocInfo::ZExt: 157 case CCValAssign::LocInfo::AExt: { 158 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 159 MIRBuilder.buildTrunc(ValVReg, Copy); 160 break; 161 } 162 default: 163 MIRBuilder.buildCopy(ValVReg, PhysReg); 164 break; 165 } 166 markPhysRegUsed(PhysReg); 167 } 168 } 169 170 Register IncomingValueHandler::getStackAddress(const CCValAssign &VA, 171 MachineMemOperand *&MMO) { 172 MachineFunction &MF = MIRBuilder.getMF(); 173 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 174 unsigned Offset = VA.getLocMemOffset(); 175 MachineFrameInfo &MFI = MF.getFrameInfo(); 176 177 int FI = MFI.CreateFixedObject(Size, Offset, true); 178 MachinePointerInfo MPO = 179 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 180 181 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 182 Align Alignment = commonAlignment(TFL->getStackAlign(), Offset); 183 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, 184 Alignment.value()); 185 186 return MIRBuilder.buildFrameIndex(LLT::pointer(0, 32), FI).getReg(0); 187 } 188 189 void IncomingValueHandler::assignValueToAddress(Register ValVReg, 190 const CCValAssign &VA) { 191 if (VA.getLocInfo() == CCValAssign::SExt || 192 VA.getLocInfo() == CCValAssign::ZExt || 193 VA.getLocInfo() == CCValAssign::AExt) { 194 auto Load = buildLoad(LLT::scalar(32), VA); 195 MIRBuilder.buildTrunc(ValVReg, Load); 196 } else 197 buildLoad(ValVReg, VA); 198 } 199 200 bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs, 201 ArrayRef<CCValAssign> ArgLocs, 202 unsigned ArgLocsStartIndex, 203 Register ArgsReg, const EVT &VT) { 204 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) 205 return false; 206 setLeastSignificantFirst(VRegs); 207 MIRBuilder.buildMerge(ArgsReg, VRegs); 208 return true; 209 } 210 211 namespace { 212 class OutgoingValueHandler : public MipsCallLowering::MipsHandler { 213 public: 214 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 215 MachineInstrBuilder &MIB) 216 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} 217 218 private: 219 void assignValueToReg(Register ValVReg, const CCValAssign &VA, 220 const EVT &VT) override; 221 222 Register getStackAddress(const CCValAssign &VA, 223 MachineMemOperand *&MMO) override; 224 225 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; 226 227 bool handleSplit(SmallVectorImpl<Register> &VRegs, 228 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 229 Register ArgsReg, const EVT &VT) override; 230 231 Register extendRegister(Register ValReg, const CCValAssign &VA); 232 233 MachineInstrBuilder &MIB; 234 }; 235 } // end anonymous namespace 236 237 void OutgoingValueHandler::assignValueToReg(Register ValVReg, 238 const CCValAssign &VA, 239 const EVT &VT) { 240 Register PhysReg = VA.getLocReg(); 241 if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 242 const MipsSubtarget &STI = 243 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 244 bool IsEL = STI.isLittle(); 245 auto Unmerge = MIRBuilder.buildUnmerge(LLT::scalar(32), ValVReg); 246 MIRBuilder.buildCopy(Register(PhysReg + (IsEL ? 0 : 1)), Unmerge.getReg(0)); 247 MIRBuilder.buildCopy(Register(PhysReg + (IsEL ? 1 : 0)), Unmerge.getReg(1)); 248 } else if (VT == MVT::f32 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { 249 MIRBuilder.buildCopy(PhysReg, ValVReg); 250 } else { 251 Register ExtReg = extendRegister(ValVReg, VA); 252 MIRBuilder.buildCopy(PhysReg, ExtReg); 253 MIB.addUse(PhysReg, RegState::Implicit); 254 } 255 } 256 257 Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA, 258 MachineMemOperand *&MMO) { 259 MachineFunction &MF = MIRBuilder.getMF(); 260 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 261 262 LLT p0 = LLT::pointer(0, 32); 263 LLT s32 = LLT::scalar(32); 264 auto SPReg = MIRBuilder.buildCopy(p0, Register(Mips::SP)); 265 266 unsigned Offset = VA.getLocMemOffset(); 267 auto OffsetReg = MIRBuilder.buildConstant(s32, Offset); 268 269 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg); 270 271 MachinePointerInfo MPO = 272 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 273 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 274 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 275 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align); 276 277 return AddrReg.getReg(0); 278 } 279 280 void OutgoingValueHandler::assignValueToAddress(Register ValVReg, 281 const CCValAssign &VA) { 282 MachineMemOperand *MMO; 283 Register Addr = getStackAddress(VA, MMO); 284 Register ExtReg = extendRegister(ValVReg, VA); 285 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 286 } 287 288 Register OutgoingValueHandler::extendRegister(Register ValReg, 289 const CCValAssign &VA) { 290 LLT LocTy{VA.getLocVT()}; 291 switch (VA.getLocInfo()) { 292 case CCValAssign::SExt: { 293 return MIRBuilder.buildSExt(LocTy, ValReg).getReg(0); 294 } 295 case CCValAssign::ZExt: { 296 return MIRBuilder.buildZExt(LocTy, ValReg).getReg(0); 297 } 298 case CCValAssign::AExt: { 299 return MIRBuilder.buildAnyExt(LocTy, ValReg).getReg(0); 300 } 301 // TODO : handle upper extends 302 case CCValAssign::Full: 303 return ValReg; 304 default: 305 break; 306 } 307 llvm_unreachable("unable to extend register"); 308 } 309 310 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs, 311 ArrayRef<CCValAssign> ArgLocs, 312 unsigned ArgLocsStartIndex, 313 Register ArgsReg, const EVT &VT) { 314 MIRBuilder.buildUnmerge(VRegs, ArgsReg); 315 setLeastSignificantFirst(VRegs); 316 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) 317 return false; 318 319 return true; 320 } 321 322 static bool isSupportedArgumentType(Type *T) { 323 if (T->isIntegerTy()) 324 return true; 325 if (T->isPointerTy()) 326 return true; 327 if (T->isFloatingPointTy()) 328 return true; 329 return false; 330 } 331 332 static bool isSupportedReturnType(Type *T) { 333 if (T->isIntegerTy()) 334 return true; 335 if (T->isPointerTy()) 336 return true; 337 if (T->isFloatingPointTy()) 338 return true; 339 if (T->isAggregateType()) 340 return true; 341 return false; 342 } 343 344 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, 345 const ISD::ArgFlagsTy &Flags) { 346 // > does not mean loss of information as type RegisterVT can't hold type VT, 347 // it means that type VT is split into multiple registers of type RegisterVT 348 if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) 349 return CCValAssign::LocInfo::Full; 350 if (Flags.isSExt()) 351 return CCValAssign::LocInfo::SExt; 352 if (Flags.isZExt()) 353 return CCValAssign::LocInfo::ZExt; 354 return CCValAssign::LocInfo::AExt; 355 } 356 357 template <typename T> 358 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, 359 const SmallVectorImpl<T> &Arguments) { 360 for (unsigned i = 0; i < ArgLocs.size(); ++i) { 361 const CCValAssign &VA = ArgLocs[i]; 362 CCValAssign::LocInfo LocInfo = determineLocInfo( 363 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); 364 if (VA.isMemLoc()) 365 ArgLocs[i] = 366 CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 367 VA.getLocMemOffset(), VA.getLocVT(), LocInfo); 368 else 369 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 370 VA.getLocReg(), VA.getLocVT(), LocInfo); 371 } 372 } 373 374 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 375 const Value *Val, 376 ArrayRef<Register> VRegs) const { 377 378 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); 379 380 if (Val != nullptr && !isSupportedReturnType(Val->getType())) 381 return false; 382 383 if (!VRegs.empty()) { 384 MachineFunction &MF = MIRBuilder.getMF(); 385 const Function &F = MF.getFunction(); 386 const DataLayout &DL = MF.getDataLayout(); 387 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 388 389 SmallVector<ArgInfo, 8> RetInfos; 390 SmallVector<unsigned, 8> OrigArgIndices; 391 392 ArgInfo ArgRetInfo(VRegs, Val->getType()); 393 setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F); 394 splitToValueTypes(DL, ArgRetInfo, 0, RetInfos, OrigArgIndices); 395 396 SmallVector<ISD::OutputArg, 8> Outs; 397 subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); 398 399 SmallVector<CCValAssign, 16> ArgLocs; 400 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 401 F.getContext()); 402 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); 403 setLocInfo(ArgLocs, Outs); 404 405 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); 406 if (!RetHandler.handle(ArgLocs, RetInfos)) { 407 return false; 408 } 409 } 410 MIRBuilder.insertInstr(Ret); 411 return true; 412 } 413 414 bool MipsCallLowering::lowerFormalArguments( 415 MachineIRBuilder &MIRBuilder, const Function &F, 416 ArrayRef<ArrayRef<Register>> VRegs) const { 417 418 // Quick exit if there aren't any args. 419 if (F.arg_empty()) 420 return true; 421 422 for (auto &Arg : F.args()) { 423 if (!isSupportedArgumentType(Arg.getType())) 424 return false; 425 } 426 427 MachineFunction &MF = MIRBuilder.getMF(); 428 const DataLayout &DL = MF.getDataLayout(); 429 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 430 431 SmallVector<ArgInfo, 8> ArgInfos; 432 SmallVector<unsigned, 8> OrigArgIndices; 433 unsigned i = 0; 434 for (auto &Arg : F.args()) { 435 ArgInfo AInfo(VRegs[i], Arg.getType()); 436 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); 437 ArgInfos.push_back(AInfo); 438 OrigArgIndices.push_back(i); 439 ++i; 440 } 441 442 SmallVector<ISD::InputArg, 8> Ins; 443 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); 444 445 SmallVector<CCValAssign, 16> ArgLocs; 446 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 447 F.getContext()); 448 449 const MipsTargetMachine &TM = 450 static_cast<const MipsTargetMachine &>(MF.getTarget()); 451 const MipsABIInfo &ABI = TM.getABI(); 452 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 453 1); 454 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); 455 setLocInfo(ArgLocs, Ins); 456 457 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); 458 if (!Handler.handle(ArgLocs, ArgInfos)) 459 return false; 460 461 if (F.isVarArg()) { 462 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs(); 463 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 464 465 int VaArgOffset; 466 unsigned RegSize = 4; 467 if (ArgRegs.size() == Idx) 468 VaArgOffset = alignTo(CCInfo.getNextStackOffset(), RegSize); 469 else { 470 VaArgOffset = 471 (int)ABI.GetCalleeAllocdArgSizeInBytes(CCInfo.getCallingConv()) - 472 (int)(RegSize * (ArgRegs.size() - Idx)); 473 } 474 475 MachineFrameInfo &MFI = MF.getFrameInfo(); 476 int FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); 477 MF.getInfo<MipsFunctionInfo>()->setVarArgsFrameIndex(FI); 478 479 for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += RegSize) { 480 MIRBuilder.getMBB().addLiveIn(ArgRegs[I]); 481 482 MachineInstrBuilder Copy = 483 MIRBuilder.buildCopy(LLT::scalar(RegSize * 8), Register(ArgRegs[I])); 484 FI = MFI.CreateFixedObject(RegSize, VaArgOffset, true); 485 MachinePointerInfo MPO = MachinePointerInfo::getFixedStack(MF, FI); 486 MachineInstrBuilder FrameIndex = 487 MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI); 488 MachineMemOperand *MMO = 489 MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, RegSize, 490 /* Alignment */ RegSize); 491 MIRBuilder.buildStore(Copy, FrameIndex, *MMO); 492 } 493 } 494 495 return true; 496 } 497 498 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 499 CallLoweringInfo &Info) const { 500 501 if (Info.CallConv != CallingConv::C) 502 return false; 503 504 for (auto &Arg : Info.OrigArgs) { 505 if (!isSupportedArgumentType(Arg.Ty)) 506 return false; 507 if (Arg.Flags[0].isByVal()) 508 return false; 509 if (Arg.Flags[0].isSRet() && !Arg.Ty->isPointerTy()) 510 return false; 511 } 512 513 if (!Info.OrigRet.Ty->isVoidTy() && !isSupportedReturnType(Info.OrigRet.Ty)) 514 return false; 515 516 MachineFunction &MF = MIRBuilder.getMF(); 517 const Function &F = MF.getFunction(); 518 const DataLayout &DL = MF.getDataLayout(); 519 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 520 const MipsTargetMachine &TM = 521 static_cast<const MipsTargetMachine &>(MF.getTarget()); 522 const MipsABIInfo &ABI = TM.getABI(); 523 524 MachineInstrBuilder CallSeqStart = 525 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); 526 527 const bool IsCalleeGlobalPIC = 528 Info.Callee.isGlobal() && TM.isPositionIndependent(); 529 530 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert( 531 Info.Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL); 532 MIB.addDef(Mips::SP, RegState::Implicit); 533 if (IsCalleeGlobalPIC) { 534 Register CalleeReg = 535 MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32)); 536 MachineInstr *CalleeGlobalValue = 537 MIRBuilder.buildGlobalValue(CalleeReg, Info.Callee.getGlobal()); 538 if (!Info.Callee.getGlobal()->hasLocalLinkage()) 539 CalleeGlobalValue->getOperand(1).setTargetFlags(MipsII::MO_GOT_CALL); 540 MIB.addUse(CalleeReg); 541 } else 542 MIB.add(Info.Callee); 543 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 544 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); 545 546 TargetLowering::ArgListTy FuncOrigArgs; 547 FuncOrigArgs.reserve(Info.OrigArgs.size()); 548 549 SmallVector<ArgInfo, 8> ArgInfos; 550 SmallVector<unsigned, 8> OrigArgIndices; 551 unsigned i = 0; 552 for (auto &Arg : Info.OrigArgs) { 553 554 TargetLowering::ArgListEntry Entry; 555 Entry.Ty = Arg.Ty; 556 FuncOrigArgs.push_back(Entry); 557 558 ArgInfos.push_back(Arg); 559 OrigArgIndices.push_back(i); 560 ++i; 561 } 562 563 SmallVector<ISD::OutputArg, 8> Outs; 564 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); 565 566 SmallVector<CCValAssign, 8> ArgLocs; 567 bool IsCalleeVarArg = false; 568 if (Info.Callee.isGlobal()) { 569 const Function *CF = static_cast<const Function *>(Info.Callee.getGlobal()); 570 IsCalleeVarArg = CF->isVarArg(); 571 } 572 MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs, 573 F.getContext()); 574 575 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1); 576 const char *Call = 577 Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr; 578 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); 579 setLocInfo(ArgLocs, Outs); 580 581 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); 582 if (!RetHandler.handle(ArgLocs, ArgInfos)) { 583 return false; 584 } 585 586 unsigned NextStackOffset = CCInfo.getNextStackOffset(); 587 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 588 unsigned StackAlignment = TFL->getStackAlignment(); 589 NextStackOffset = alignTo(NextStackOffset, StackAlignment); 590 CallSeqStart.addImm(NextStackOffset).addImm(0); 591 592 if (IsCalleeGlobalPIC) { 593 MIRBuilder.buildCopy( 594 Register(Mips::GP), 595 MF.getInfo<MipsFunctionInfo>()->getGlobalBaseRegForGlobalISel()); 596 MIB.addDef(Mips::GP, RegState::Implicit); 597 } 598 MIRBuilder.insertInstr(MIB); 599 if (MIB->getOpcode() == Mips::JALRPseudo) { 600 const MipsSubtarget &STI = 601 static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget()); 602 MIB.constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), 603 *STI.getRegBankInfo()); 604 } 605 606 if (!Info.OrigRet.Ty->isVoidTy()) { 607 ArgInfos.clear(); 608 SmallVector<unsigned, 8> OrigRetIndices; 609 610 splitToValueTypes(DL, Info.OrigRet, 0, ArgInfos, OrigRetIndices); 611 612 SmallVector<ISD::InputArg, 8> Ins; 613 subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); 614 615 SmallVector<CCValAssign, 8> ArgLocs; 616 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 617 F.getContext()); 618 619 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), Info.OrigRet.Ty, 620 Call); 621 setLocInfo(ArgLocs, Ins); 622 623 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); 624 if (!Handler.handle(ArgLocs, ArgInfos)) 625 return false; 626 } 627 628 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); 629 630 return true; 631 } 632 633 template <typename T> 634 void MipsCallLowering::subTargetRegTypeForCallingConv( 635 const Function &F, ArrayRef<ArgInfo> Args, 636 ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const { 637 const DataLayout &DL = F.getParent()->getDataLayout(); 638 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 639 640 unsigned ArgNo = 0; 641 for (auto &Arg : Args) { 642 643 EVT VT = TLI.getValueType(DL, Arg.Ty); 644 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), 645 F.getCallingConv(), VT); 646 unsigned NumRegs = TLI.getNumRegistersForCallingConv( 647 F.getContext(), F.getCallingConv(), VT); 648 649 for (unsigned i = 0; i < NumRegs; ++i) { 650 ISD::ArgFlagsTy Flags = Arg.Flags[0]; 651 652 if (i == 0) 653 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); 654 else 655 Flags.setOrigAlign(Align(1)); 656 657 ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 658 0); 659 } 660 ++ArgNo; 661 } 662 } 663 664 void MipsCallLowering::splitToValueTypes( 665 const DataLayout &DL, const ArgInfo &OrigArg, unsigned OriginalIndex, 666 SmallVectorImpl<ArgInfo> &SplitArgs, 667 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const { 668 669 SmallVector<EVT, 4> SplitEVTs; 670 SmallVector<Register, 4> SplitVRegs; 671 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 672 LLVMContext &Ctx = OrigArg.Ty->getContext(); 673 674 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitEVTs); 675 676 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 677 ArgInfo Info = ArgInfo{OrigArg.Regs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; 678 Info.Flags = OrigArg.Flags; 679 SplitArgs.push_back(Info); 680 SplitArgsOrigIndices.push_back(OriginalIndex); 681 } 682 } 683