1 //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This file implements the lowering of LLVM calls to machine code calls for 12 /// GlobalISel. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "MipsCallLowering.h" 17 #include "MipsCCState.h" 18 #include "MipsTargetMachine.h" 19 #include "llvm/CodeGen/Analysis.h" 20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 21 22 using namespace llvm; 23 24 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) 25 : CallLowering(&TLI) {} 26 27 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, 28 const CCValAssign &VA) { 29 if (VA.isRegLoc()) { 30 assignValueToReg(VReg, VA); 31 } else if (VA.isMemLoc()) { 32 assignValueToAddress(VReg, VA); 33 } else { 34 return false; 35 } 36 return true; 37 } 38 39 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs, 40 ArrayRef<CCValAssign> ArgLocs, 41 unsigned ArgLocsStartIndex) { 42 for (unsigned i = 0; i < VRegs.size(); ++i) 43 if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i])) 44 return false; 45 return true; 46 } 47 48 void MipsCallLowering::MipsHandler::setLeastSignificantFirst( 49 SmallVectorImpl<unsigned> &VRegs) { 50 if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) 51 std::reverse(VRegs.begin(), VRegs.end()); 52 } 53 54 bool MipsCallLowering::MipsHandler::handle( 55 ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) { 56 SmallVector<unsigned, 4> VRegs; 57 unsigned SplitLength; 58 const Function &F = MIRBuilder.getMF().getFunction(); 59 const DataLayout &DL = F.getParent()->getDataLayout(); 60 const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>( 61 MIRBuilder.getMF().getSubtarget().getTargetLowering()); 62 63 for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size(); 64 ++ArgsIndex, ArgLocsIndex += SplitLength) { 65 EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty); 66 SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(), 67 F.getCallingConv(), VT); 68 if (SplitLength > 1) { 69 VRegs.clear(); 70 MVT RegisterVT = TLI.getRegisterTypeForCallingConv( 71 F.getContext(), F.getCallingConv(), VT); 72 for (unsigned i = 0; i < SplitLength; ++i) 73 VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT})); 74 75 if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg)) 76 return false; 77 } else { 78 if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex])) 79 return false; 80 } 81 } 82 return true; 83 } 84 85 namespace { 86 class IncomingValueHandler : public MipsCallLowering::MipsHandler { 87 public: 88 IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) 89 : MipsHandler(MIRBuilder, MRI) {} 90 91 private: 92 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; 93 94 unsigned getStackAddress(const CCValAssign &VA, 95 MachineMemOperand *&MMO) override; 96 97 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; 98 99 bool handleSplit(SmallVectorImpl<unsigned> &VRegs, 100 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 101 unsigned ArgsReg) override; 102 103 virtual void markPhysRegUsed(unsigned PhysReg) { 104 MIRBuilder.getMBB().addLiveIn(PhysReg); 105 } 106 107 void buildLoad(unsigned Val, const CCValAssign &VA) { 108 MachineMemOperand *MMO; 109 unsigned Addr = getStackAddress(VA, MMO); 110 MIRBuilder.buildLoad(Val, Addr, *MMO); 111 } 112 }; 113 114 class CallReturnHandler : public IncomingValueHandler { 115 public: 116 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 117 MachineInstrBuilder &MIB) 118 : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} 119 120 private: 121 void markPhysRegUsed(unsigned PhysReg) override { 122 MIB.addDef(PhysReg, RegState::Implicit); 123 } 124 125 MachineInstrBuilder &MIB; 126 }; 127 128 } // end anonymous namespace 129 130 void IncomingValueHandler::assignValueToReg(unsigned ValVReg, 131 const CCValAssign &VA) { 132 unsigned PhysReg = VA.getLocReg(); 133 switch (VA.getLocInfo()) { 134 case CCValAssign::LocInfo::SExt: 135 case CCValAssign::LocInfo::ZExt: 136 case CCValAssign::LocInfo::AExt: { 137 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); 138 MIRBuilder.buildTrunc(ValVReg, Copy); 139 break; 140 } 141 default: 142 MIRBuilder.buildCopy(ValVReg, PhysReg); 143 break; 144 } 145 markPhysRegUsed(PhysReg); 146 } 147 148 unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA, 149 MachineMemOperand *&MMO) { 150 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 151 unsigned Offset = VA.getLocMemOffset(); 152 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo(); 153 154 int FI = MFI.CreateFixedObject(Size, Offset, true); 155 MachinePointerInfo MPO = 156 MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); 157 MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOLoad, 158 Size, /* Alignment */ 0); 159 160 unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); 161 MIRBuilder.buildFrameIndex(AddrReg, FI); 162 163 return AddrReg; 164 } 165 166 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, 167 const CCValAssign &VA) { 168 if (VA.getLocInfo() == CCValAssign::SExt || 169 VA.getLocInfo() == CCValAssign::ZExt || 170 VA.getLocInfo() == CCValAssign::AExt) { 171 unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); 172 buildLoad(LoadReg, VA); 173 MIRBuilder.buildTrunc(ValVReg, LoadReg); 174 } else 175 buildLoad(ValVReg, VA); 176 } 177 178 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs, 179 ArrayRef<CCValAssign> ArgLocs, 180 unsigned ArgLocsStartIndex, 181 unsigned ArgsReg) { 182 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) 183 return false; 184 setLeastSignificantFirst(VRegs); 185 MIRBuilder.buildMerge(ArgsReg, VRegs); 186 return true; 187 } 188 189 namespace { 190 class OutgoingValueHandler : public MipsCallLowering::MipsHandler { 191 public: 192 OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, 193 MachineInstrBuilder &MIB) 194 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} 195 196 private: 197 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; 198 199 unsigned getStackAddress(const CCValAssign &VA, 200 MachineMemOperand *&MMO) override; 201 202 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; 203 204 bool handleSplit(SmallVectorImpl<unsigned> &VRegs, 205 ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, 206 unsigned ArgsReg) override; 207 208 unsigned extendRegister(unsigned ValReg, const CCValAssign &VA); 209 210 MachineInstrBuilder &MIB; 211 }; 212 } // end anonymous namespace 213 214 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg, 215 const CCValAssign &VA) { 216 unsigned PhysReg = VA.getLocReg(); 217 unsigned ExtReg = extendRegister(ValVReg, VA); 218 MIRBuilder.buildCopy(PhysReg, ExtReg); 219 MIB.addUse(PhysReg, RegState::Implicit); 220 } 221 222 unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA, 223 MachineMemOperand *&MMO) { 224 LLT p0 = LLT::pointer(0, 32); 225 LLT s32 = LLT::scalar(32); 226 unsigned SPReg = MRI.createGenericVirtualRegister(p0); 227 MIRBuilder.buildCopy(SPReg, Mips::SP); 228 229 unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); 230 unsigned Offset = VA.getLocMemOffset(); 231 MIRBuilder.buildConstant(OffsetReg, Offset); 232 233 unsigned AddrReg = MRI.createGenericVirtualRegister(p0); 234 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); 235 236 MachinePointerInfo MPO = 237 MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); 238 unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 239 MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOStore, 240 Size, /* Alignment */ 0); 241 242 return AddrReg; 243 } 244 245 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, 246 const CCValAssign &VA) { 247 MachineMemOperand *MMO; 248 unsigned Addr = getStackAddress(VA, MMO); 249 unsigned ExtReg = extendRegister(ValVReg, VA); 250 MIRBuilder.buildStore(ExtReg, Addr, *MMO); 251 } 252 253 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg, 254 const CCValAssign &VA) { 255 LLT LocTy{VA.getLocVT()}; 256 switch (VA.getLocInfo()) { 257 case CCValAssign::SExt: { 258 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 259 MIRBuilder.buildSExt(ExtReg, ValReg); 260 return ExtReg; 261 } 262 case CCValAssign::ZExt: { 263 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 264 MIRBuilder.buildZExt(ExtReg, ValReg); 265 return ExtReg; 266 } 267 case CCValAssign::AExt: { 268 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); 269 MIRBuilder.buildAnyExt(ExtReg, ValReg); 270 return ExtReg; 271 } 272 // TODO : handle upper extends 273 case CCValAssign::Full: 274 return ValReg; 275 default: 276 break; 277 } 278 llvm_unreachable("unable to extend register"); 279 } 280 281 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs, 282 ArrayRef<CCValAssign> ArgLocs, 283 unsigned ArgLocsStartIndex, 284 unsigned ArgsReg) { 285 MIRBuilder.buildUnmerge(VRegs, ArgsReg); 286 setLeastSignificantFirst(VRegs); 287 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) 288 return false; 289 290 return true; 291 } 292 293 static bool isSupportedType(Type *T) { 294 if (T->isIntegerTy()) 295 return true; 296 if (T->isPointerTy()) 297 return true; 298 return false; 299 } 300 301 static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, 302 const ISD::ArgFlagsTy &Flags) { 303 // > does not mean loss of information as type RegisterVT can't hold type VT, 304 // it means that type VT is split into multiple registers of type RegisterVT 305 if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) 306 return CCValAssign::LocInfo::Full; 307 if (Flags.isSExt()) 308 return CCValAssign::LocInfo::SExt; 309 if (Flags.isZExt()) 310 return CCValAssign::LocInfo::ZExt; 311 return CCValAssign::LocInfo::AExt; 312 } 313 314 template <typename T> 315 static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, 316 const SmallVectorImpl<T> &Arguments) { 317 for (unsigned i = 0; i < ArgLocs.size(); ++i) { 318 const CCValAssign &VA = ArgLocs[i]; 319 CCValAssign::LocInfo LocInfo = determineLocInfo( 320 Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); 321 if (VA.isMemLoc()) 322 ArgLocs[i] = 323 CCValAssign::getMem(VA.getValNo(), VA.getValVT(), 324 VA.getLocMemOffset(), VA.getLocVT(), LocInfo); 325 else 326 ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), 327 VA.getLocReg(), VA.getLocVT(), LocInfo); 328 } 329 } 330 331 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, 332 const Value *Val, 333 ArrayRef<unsigned> VRegs) const { 334 335 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); 336 337 if (Val != nullptr && !isSupportedType(Val->getType())) 338 return false; 339 340 if (!VRegs.empty()) { 341 MachineFunction &MF = MIRBuilder.getMF(); 342 const Function &F = MF.getFunction(); 343 const DataLayout &DL = MF.getDataLayout(); 344 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 345 LLVMContext &Ctx = Val->getType()->getContext(); 346 347 SmallVector<EVT, 4> SplitEVTs; 348 ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); 349 assert(VRegs.size() == SplitEVTs.size() && 350 "For each split Type there should be exactly one VReg."); 351 352 SmallVector<ArgInfo, 8> RetInfos; 353 SmallVector<unsigned, 8> OrigArgIndices; 354 355 for (unsigned i = 0; i < SplitEVTs.size(); ++i) { 356 ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; 357 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); 358 splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices); 359 } 360 361 SmallVector<ISD::OutputArg, 8> Outs; 362 subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); 363 364 SmallVector<CCValAssign, 16> ArgLocs; 365 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 366 F.getContext()); 367 CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); 368 setLocInfo(ArgLocs, Outs); 369 370 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); 371 if (!RetHandler.handle(ArgLocs, RetInfos)) { 372 return false; 373 } 374 } 375 MIRBuilder.insertInstr(Ret); 376 return true; 377 } 378 379 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, 380 const Function &F, 381 ArrayRef<unsigned> VRegs) const { 382 383 // Quick exit if there aren't any args. 384 if (F.arg_empty()) 385 return true; 386 387 if (F.isVarArg()) { 388 return false; 389 } 390 391 for (auto &Arg : F.args()) { 392 if (!isSupportedType(Arg.getType())) 393 return false; 394 } 395 396 MachineFunction &MF = MIRBuilder.getMF(); 397 const DataLayout &DL = MF.getDataLayout(); 398 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 399 400 SmallVector<ArgInfo, 8> ArgInfos; 401 SmallVector<unsigned, 8> OrigArgIndices; 402 unsigned i = 0; 403 for (auto &Arg : F.args()) { 404 ArgInfo AInfo(VRegs[i], Arg.getType()); 405 setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); 406 splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices); 407 ++i; 408 } 409 410 SmallVector<ISD::InputArg, 8> Ins; 411 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); 412 413 SmallVector<CCValAssign, 16> ArgLocs; 414 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 415 F.getContext()); 416 417 const MipsTargetMachine &TM = 418 static_cast<const MipsTargetMachine &>(MF.getTarget()); 419 const MipsABIInfo &ABI = TM.getABI(); 420 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 421 1); 422 CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); 423 setLocInfo(ArgLocs, Ins); 424 425 IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); 426 if (!Handler.handle(ArgLocs, ArgInfos)) 427 return false; 428 429 return true; 430 } 431 432 bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, 433 CallingConv::ID CallConv, 434 const MachineOperand &Callee, 435 const ArgInfo &OrigRet, 436 ArrayRef<ArgInfo> OrigArgs) const { 437 438 if (CallConv != CallingConv::C) 439 return false; 440 441 for (auto &Arg : OrigArgs) { 442 if (!isSupportedType(Arg.Ty)) 443 return false; 444 if (Arg.Flags.isByVal() || Arg.Flags.isSRet()) 445 return false; 446 } 447 if (OrigRet.Reg && !isSupportedType(OrigRet.Ty)) 448 return false; 449 450 MachineFunction &MF = MIRBuilder.getMF(); 451 const Function &F = MF.getFunction(); 452 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 453 const MipsTargetMachine &TM = 454 static_cast<const MipsTargetMachine &>(MF.getTarget()); 455 const MipsABIInfo &ABI = TM.getABI(); 456 457 MachineInstrBuilder CallSeqStart = 458 MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); 459 460 // FIXME: Add support for pic calling sequences, long call sequences for O32, 461 // N32 and N64. First handle the case when Callee.isReg(). 462 if (Callee.isReg()) 463 return false; 464 465 MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(Mips::JAL); 466 MIB.addDef(Mips::SP, RegState::Implicit); 467 MIB.add(Callee); 468 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 469 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); 470 471 TargetLowering::ArgListTy FuncOrigArgs; 472 FuncOrigArgs.reserve(OrigArgs.size()); 473 474 SmallVector<ArgInfo, 8> ArgInfos; 475 SmallVector<unsigned, 8> OrigArgIndices; 476 unsigned i = 0; 477 for (auto &Arg : OrigArgs) { 478 479 TargetLowering::ArgListEntry Entry; 480 Entry.Ty = Arg.Ty; 481 FuncOrigArgs.push_back(Entry); 482 483 splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices); 484 ++i; 485 } 486 487 SmallVector<ISD::OutputArg, 8> Outs; 488 subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); 489 490 SmallVector<CCValAssign, 8> ArgLocs; 491 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 492 F.getContext()); 493 494 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); 495 const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr; 496 CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); 497 setLocInfo(ArgLocs, Outs); 498 499 OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); 500 if (!RetHandler.handle(ArgLocs, ArgInfos)) { 501 return false; 502 } 503 504 unsigned NextStackOffset = CCInfo.getNextStackOffset(); 505 const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); 506 unsigned StackAlignment = TFL->getStackAlignment(); 507 NextStackOffset = alignTo(NextStackOffset, StackAlignment); 508 CallSeqStart.addImm(NextStackOffset).addImm(0); 509 510 MIRBuilder.insertInstr(MIB); 511 512 if (OrigRet.Reg) { 513 514 ArgInfos.clear(); 515 SmallVector<unsigned, 8> OrigRetIndices; 516 517 splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); 518 519 SmallVector<ISD::InputArg, 8> Ins; 520 subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); 521 522 SmallVector<CCValAssign, 8> ArgLocs; 523 MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, 524 F.getContext()); 525 526 CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call); 527 setLocInfo(ArgLocs, Ins); 528 529 CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); 530 if (!Handler.handle(ArgLocs, ArgInfos)) 531 return false; 532 } 533 534 MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); 535 536 return true; 537 } 538 539 template <typename T> 540 void MipsCallLowering::subTargetRegTypeForCallingConv( 541 const Function &F, ArrayRef<ArgInfo> Args, 542 ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const { 543 const DataLayout &DL = F.getParent()->getDataLayout(); 544 const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); 545 546 unsigned ArgNo = 0; 547 for (auto &Arg : Args) { 548 549 EVT VT = TLI.getValueType(DL, Arg.Ty); 550 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), 551 F.getCallingConv(), VT); 552 unsigned NumRegs = TLI.getNumRegistersForCallingConv( 553 F.getContext(), F.getCallingConv(), VT); 554 555 for (unsigned i = 0; i < NumRegs; ++i) { 556 ISD::ArgFlagsTy Flags = Arg.Flags; 557 558 if (i == 0) 559 Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); 560 else 561 Flags.setOrigAlign(1); 562 563 ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 564 0); 565 } 566 ++ArgNo; 567 } 568 } 569 570 void MipsCallLowering::splitToValueTypes( 571 const ArgInfo &OrigArg, unsigned OriginalIndex, 572 SmallVectorImpl<ArgInfo> &SplitArgs, 573 SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const { 574 575 // TODO : perform structure and array split. For now we only deal with 576 // types that pass isSupportedType check. 577 SplitArgs.push_back(OrigArg); 578 SplitArgsOrigIndices.push_back(OriginalIndex); 579 } 580