1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the lowering of LLVM calls to machine code calls for 11 /// GlobalISel. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "MipsCallLowering.h" 16 #include "MipsCCState.h" 17 #include "MipsTargetMachine.h" 18 #include "llvm/CodeGen/Analysis.h" 19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 20 21 using namespace llvm; 22 23 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) 24 : CallLowering(&TLI) {} 25 26 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, 27 const CCValAssign &VA) { 28 if (VA.isRegLoc()) { 29 assignValueToReg(VReg, VA); 30 } else if (VA.isMemLoc()) { 31 assignValueToAddress(VReg, VA); 32 } else { 33 return false; 34 } 35 return true; 36 } 37 38 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs, 39 ArrayRef<CCValAssign> ArgLocs, 40 unsigned ArgLocsStartIndex) { 41 for (unsigned i = 0; i < VRegs.size(); ++i) 42 if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i])) 43 return false; 44 return true; 45 } 46 47 void MipsCallLowering::MipsHandler::setLeastSignificantFirst( 48 SmallVectorImpl<unsigned> &VRegs) { 49 if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) 50 std::reverse(VRegs.begin(), VRegs.end()); 51 } 52 53 bool MipsCallLowering::MipsHandler::handle( 54 ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) { 55 SmallVector<unsigned, 4> VRegs; 56 unsigned SplitLength; 57 const Function &F = MIRBuilder.getMF().getFunction(); 58 const DataLayout &DL = F.getParent()->getDataLayout(); 59 const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>( 60 MIRBuilder.getMF().getSubtarget().getTargetLowering()); 61 62 for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size(); 63 ++ArgsIndex, ArgLocsIndex += SplitLength) { 64 EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty); 65 SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(), 66 F.getCallingConv(), VT); 67 if (SplitLength > 1) { 68 VRegs.clear(); 69 MVT RegisterVT = TLI.getRegisterTypeForCallingConv( 70 F.getContext(), F.getCallingConv(), VT); 71 for (unsigned i = 0; i < SplitLength; ++i) 72 VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT})); 73 74 if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg)) 75 return false; 76 } else { 77 if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex])) 78 return false; 79 } 80 } 81 return true; 82 } 83 84 namespace { 85 class IncomingValueHandler : public MipsCallLowering::MipsHandler { 86 public: 87 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 88 : MipsHandler(MIRBuilder, MRI) {} 89 90 private: 91 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; 92 93 unsigned getStackAddress(const CCValAssign &VA, 94 MachineMemOperand *&MMO) override; 95 96 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; 97 98 bool handleSplit(SmallVectorImpl<unsigned> &VRegs, 99 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 100 unsigned ArgsReg) override; 101 102 virtual void markPhysRegUsed(unsigned PhysReg) { 103 MIRBuilder.getMBB().addLiveIn(PhysReg); 104 } 105 106 void buildLoad(unsigned Val, const CCValAssign &VA) { 107 MachineMemOperand *MMO; 108 unsigned Addr = getStackAddress(VA, MMO); 109 MIRBuilder.buildLoad(Val, Addr, *MMO); 110 } 111 }; 112 113 class CallReturnHandler : public IncomingValueHandler { 114 public: 115 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 116 MachineInstrBuilder &MIB) 117 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 118 119 private: 120 void markPhysRegUsed(unsigned PhysReg) override { 121 MIB.addDef(PhysReg, RegState::Implicit); 122 } 123 124 MachineInstrBuilder &MIB; 125 }; 126 127 } // end anonymous namespace 128 129 void IncomingValueHandler::assignValueToReg(unsigned ValVReg, 130 const CCValAssign &VA) { 131 unsigned PhysReg = VA.getLocReg(); 132 switch (VA.getLocInfo()) { 133 case CCValAssign::LocInfo::SExt: 134 case CCValAssign::LocInfo::ZExt: 135 case CCValAssign::LocInfo::AExt: { 136 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 137 MIRBuilder.buildTrunc(ValVReg, Copy); 138 break; 139 } 140 default: 141 MIRBuilder.buildCopy(ValVReg, PhysReg); 142 break; 143 } 144 markPhysRegUsed(PhysReg); 145 } 146 147 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA, 148 MachineMemOperand *&MMO) { 149 MachineFunction &MF = MIRBuilder.getMF(); 150 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 151 unsigned Offset = VA.getLocMemOffset(); 152 MachineFrameInfo &MFI = MF.getFrameInfo(); 153 154 int FI = MFI.CreateFixedObject(Size, Offset, true); 155 MachinePointerInfo MPO = 156 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 157 158 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 159 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 160 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align); 161 162 unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); 163 MIRBuilder.buildFrameIndex(AddrReg, FI); 164 165 return AddrReg; 166 } 167 168 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, 169 const CCValAssign &VA) { 170 if (VA.getLocInfo() == CCValAssign::SExt || 171 VA.getLocInfo() == CCValAssign::ZExt || 172 VA.getLocInfo() == CCValAssign::AExt) { 173 unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); 174 buildLoad(LoadReg, VA); 175 MIRBuilder.buildTrunc(ValVReg, LoadReg); 176 } else 177 buildLoad(ValVReg, VA); 178 } 179 180 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs, 181 ArrayRef<CCValAssign> ArgLocs, 182 unsigned ArgLocsStartIndex, 183 unsigned ArgsReg) { 184 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) 185 return false; 186 setLeastSignificantFirst(VRegs); 187 MIRBuilder.buildMerge(ArgsReg, VRegs); 188 return true; 189 } 190 191 namespace { 192 class OutgoingValueHandler : public MipsCallLowering::MipsHandler { 193 public: 194 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 195 MachineInstrBuilder &MIB) 196 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} 197 198 private: 199 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; 200 201 unsigned getStackAddress(const CCValAssign &VA, 202 MachineMemOperand *&MMO) override; 203 204 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; 205 206 bool handleSplit(SmallVectorImpl<unsigned> &VRegs, 207 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 208 unsigned ArgsReg) override; 209 210 unsigned extendRegister(unsigned ValReg, const CCValAssign &VA); 211 212 MachineInstrBuilder &MIB; 213 }; 214 } // end anonymous namespace 215 216 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg, 217 const CCValAssign &VA) { 218 unsigned PhysReg = VA.getLocReg(); 219 unsigned ExtReg = extendRegister(ValVReg, VA); 220 MIRBuilder.buildCopy(PhysReg, ExtReg); 221 MIB.addUse(PhysReg, RegState::Implicit); 222 } 223 224 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA, 225 MachineMemOperand *&MMO) { 226 MachineFunction &MF = MIRBuilder.getMF(); 227 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 228 229 LLT p0 = LLT::pointer(0, 32); 230 LLT s32 = LLT::scalar(32); 231 unsigned SPReg = MRI.createGenericVirtualRegister(p0); 232 MIRBuilder.buildCopy(SPReg, Mips::SP); 233 234 unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); 235 unsigned Offset = VA.getLocMemOffset(); 236 MIRBuilder.buildConstant(OffsetReg, Offset); 237 238 unsigned AddrReg = MRI.createGenericVirtualRegister(p0); 239 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); 240 241 MachinePointerInfo MPO = 242 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 243 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 244 unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); 245 MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align); 246 247 return AddrReg; 248 } 249 250 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, 251 const CCValAssign &VA) { 252 MachineMemOperand *MMO; 253 unsigned Addr = getStackAddress(VA, MMO); 254 unsigned ExtReg = extendRegister(ValVReg, VA); 255 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 256 } 257 258 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg, 259 const CCValAssign &VA) { 260 LLT LocTy{VA.getLocVT()}; 261 switch (VA.getLocInfo()) { 262 case CCValAssign::SExt: { 263 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 264 MIRBuilder.buildSExt(ExtReg, ValReg); 265 return ExtReg; 266 } 267 case CCValAssign::ZExt: { 268 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 269 MIRBuilder.buildZExt(ExtReg, ValReg); 270 return ExtReg; 271 } 272 case CCValAssign::AExt: { 273 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 274 MIRBuilder.buildAnyExt(ExtReg, ValReg); 275 return ExtReg; 276 } 277 // TODO : handle upper extends 278 case CCValAssign::Full: 279 return ValReg; 280 default: 281 break; 282 } 283 llvm_unreachable("unable to extend register"); 284 } 285 286 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs, 287 ArrayRef<CCValAssign> ArgLocs, 288 unsigned ArgLocsStartIndex, 289 unsigned ArgsReg) { 290 MIRBuilder.buildUnmerge(VRegs, ArgsReg); 291 setLeastSignificantFirst(VRegs); 292 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) 293 return false; 294 295 return true; 296 } 297 298 static bool isSupportedType(Type *T) { 299 if (T->isIntegerTy()) 300 return true; 301 if (T->isPointerTy()) 302 return true; 303 return false; 304 } 305 306 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, 307 const ISD::ArgFlagsTy &Flags) { 308 // > does not mean loss of information as type RegisterVT can't hold type VT, 309 // it means that type VT is split into multiple registers of type RegisterVT 310 if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) 311 return CCValAssign::LocInfo::Full; 312 if (Flags.isSExt()) 313 return CCValAssign::LocInfo::SExt; 314 if (Flags.isZExt()) 315 return CCValAssign::LocInfo::ZExt; 316 return CCValAssign::LocInfo::AExt; 317 } 318 319 template <typename T> 320 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, 321 const SmallVectorImpl<T> &Arguments) { 322 for (unsigned i = 0; i < ArgLocs.size(); ++i) { 323 const CCValAssign &VA = ArgLocs[i]; 324 CCValAssign::LocInfo LocInfo = determineLocInfo( 325 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); 326 if (VA.isMemLoc()) 327 ArgLocs[i] = 328 CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 329 VA.getLocMemOffset(), VA.getLocVT(), LocInfo); 330 else 331 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 332 VA.getLocReg(), VA.getLocVT(), LocInfo); 333 } 334 } 335 336 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 337 const Value *Val, 338 ArrayRef<unsigned> VRegs) const { 339 340 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); 341 342 if (Val != nullptr && !isSupportedType(Val->getType())) 343 return false; 344 345 if (!VRegs.empty()) { 346 MachineFunction &MF = MIRBuilder.getMF(); 347 const Function &F = MF.getFunction(); 348 const DataLayout &DL = MF.getDataLayout(); 349 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 350 LLVMContext &Ctx = Val->getType()->getContext(); 351 352 SmallVector<EVT, 4> SplitEVTs; 353 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 354 assert(VRegs.size() == SplitEVTs.size() && 355 "For each split Type there should be exactly one VReg."); 356 357 SmallVector<ArgInfo, 8> RetInfos; 358 SmallVector<unsigned, 8> OrigArgIndices; 359 360 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 361 ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; 362 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); 363 splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices); 364 } 365 366 SmallVector<ISD::OutputArg, 8> Outs; 367 subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); 368 369 SmallVector<CCValAssign, 16> ArgLocs; 370 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 371 F.getContext()); 372 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); 373 setLocInfo(ArgLocs, Outs); 374 375 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); 376 if (!RetHandler.handle(ArgLocs, RetInfos)) { 377 return false; 378 } 379 } 380 MIRBuilder.insertInstr(Ret); 381 return true; 382 } 383 384 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, 385 const Function &F, 386 ArrayRef<unsigned> VRegs) const { 387 388 // Quick exit if there aren't any args. 389 if (F.arg_empty()) 390 return true; 391 392 if (F.isVarArg()) { 393 return false; 394 } 395 396 for (auto &Arg : F.args()) { 397 if (!isSupportedType(Arg.getType())) 398 return false; 399 } 400 401 MachineFunction &MF = MIRBuilder.getMF(); 402 const DataLayout &DL = MF.getDataLayout(); 403 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 404 405 SmallVector<ArgInfo, 8> ArgInfos; 406 SmallVector<unsigned, 8> OrigArgIndices; 407 unsigned i = 0; 408 for (auto &Arg : F.args()) { 409 ArgInfo AInfo(VRegs[i], Arg.getType()); 410 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); 411 splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices); 412 ++i; 413 } 414 415 SmallVector<ISD::InputArg, 8> Ins; 416 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); 417 418 SmallVector<CCValAssign, 16> ArgLocs; 419 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 420 F.getContext()); 421 422 const MipsTargetMachine &TM = 423 static_cast<const MipsTargetMachine &>(MF.getTarget()); 424 const MipsABIInfo &ABI = TM.getABI(); 425 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 426 1); 427 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); 428 setLocInfo(ArgLocs, Ins); 429 430 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); 431 if (!Handler.handle(ArgLocs, ArgInfos)) 432 return false; 433 434 return true; 435 } 436 437 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 438 CallingConv::ID CallConv, 439 const MachineOperand &Callee, 440 const ArgInfo &OrigRet, 441 ArrayRef<ArgInfo> OrigArgs) const { 442 443 if (CallConv != CallingConv::C) 444 return false; 445 446 for (auto &Arg : OrigArgs) { 447 if (!isSupportedType(Arg.Ty)) 448 return false; 449 if (Arg.Flags.isByVal() || Arg.Flags.isSRet()) 450 return false; 451 } 452 if (OrigRet.Reg && !isSupportedType(OrigRet.Ty)) 453 return false; 454 455 MachineFunction &MF = MIRBuilder.getMF(); 456 const Function &F = MF.getFunction(); 457 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 458 const MipsTargetMachine &TM = 459 static_cast<const MipsTargetMachine &>(MF.getTarget()); 460 const MipsABIInfo &ABI = TM.getABI(); 461 462 MachineInstrBuilder CallSeqStart = 463 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); 464 465 // FIXME: Add support for pic calling sequences, long call sequences for O32, 466 // N32 and N64. First handle the case when Callee.isReg(). 467 if (Callee.isReg()) 468 return false; 469 470 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(Mips::JAL); 471 MIB.addDef(Mips::SP, RegState::Implicit); 472 MIB.add(Callee); 473 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 474 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); 475 476 TargetLowering::ArgListTy FuncOrigArgs; 477 FuncOrigArgs.reserve(OrigArgs.size()); 478 479 SmallVector<ArgInfo, 8> ArgInfos; 480 SmallVector<unsigned, 8> OrigArgIndices; 481 unsigned i = 0; 482 for (auto &Arg : OrigArgs) { 483 484 TargetLowering::ArgListEntry Entry; 485 Entry.Ty = Arg.Ty; 486 FuncOrigArgs.push_back(Entry); 487 488 splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices); 489 ++i; 490 } 491 492 SmallVector<ISD::OutputArg, 8> Outs; 493 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); 494 495 SmallVector<CCValAssign, 8> ArgLocs; 496 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 497 F.getContext()); 498 499 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); 500 const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr; 501 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); 502 setLocInfo(ArgLocs, Outs); 503 504 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); 505 if (!RetHandler.handle(ArgLocs, ArgInfos)) { 506 return false; 507 } 508 509 unsigned NextStackOffset = CCInfo.getNextStackOffset(); 510 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 511 unsigned StackAlignment = TFL->getStackAlignment(); 512 NextStackOffset = alignTo(NextStackOffset, StackAlignment); 513 CallSeqStart.addImm(NextStackOffset).addImm(0); 514 515 MIRBuilder.insertInstr(MIB); 516 517 if (OrigRet.Reg) { 518 519 ArgInfos.clear(); 520 SmallVector<unsigned, 8> OrigRetIndices; 521 522 splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); 523 524 SmallVector<ISD::InputArg, 8> Ins; 525 subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); 526 527 SmallVector<CCValAssign, 8> ArgLocs; 528 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 529 F.getContext()); 530 531 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call); 532 setLocInfo(ArgLocs, Ins); 533 534 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); 535 if (!Handler.handle(ArgLocs, ArgInfos)) 536 return false; 537 } 538 539 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); 540 541 return true; 542 } 543 544 template <typename T> 545 void MipsCallLowering::subTargetRegTypeForCallingConv( 546 const Function &F, ArrayRef<ArgInfo> Args, 547 ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const { 548 const DataLayout &DL = F.getParent()->getDataLayout(); 549 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 550 551 unsigned ArgNo = 0; 552 for (auto &Arg : Args) { 553 554 EVT VT = TLI.getValueType(DL, Arg.Ty); 555 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), 556 F.getCallingConv(), VT); 557 unsigned NumRegs = TLI.getNumRegistersForCallingConv( 558 F.getContext(), F.getCallingConv(), VT); 559 560 for (unsigned i = 0; i < NumRegs; ++i) { 561 ISD::ArgFlagsTy Flags = Arg.Flags; 562 563 if (i == 0) 564 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); 565 else 566 Flags.setOrigAlign(1); 567 568 ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 569 0); 570 } 571 ++ArgNo; 572 } 573 } 574 575 void MipsCallLowering::splitToValueTypes( 576 const ArgInfo &OrigArg, unsigned OriginalIndex, 577 SmallVectorImpl<ArgInfo> &SplitArgs, 578 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const { 579 580 // TODO : perform structure and array split. For now we only deal with 581 // types that pass isSupportedType check. 582 SplitArgs.push_back(OrigArg); 583 SplitArgsOrigIndices.push_back(OriginalIndex); 584 } 585