1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that RISCV uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "RISCVISelLowering.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAGISel.h" 27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28 #include "llvm/CodeGen/ValueTypes.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/DiagnosticPrinter.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/raw_ostream.h" 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "riscv-lower" 38 39 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 40 const RISCVSubtarget &STI) 41 : TargetLowering(TM), Subtarget(STI) { 42 43 MVT XLenVT = Subtarget.getXLenVT(); 44 45 // Set up the register classes. 46 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 47 48 // Compute derived properties from the register classes. 49 computeRegisterProperties(STI.getRegisterInfo()); 50 51 setStackPointerRegisterToSaveRestore(RISCV::X2); 52 53 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 54 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 55 56 // TODO: add all necessary setOperationAction calls. 57 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 58 59 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 60 setOperationAction(ISD::BR_CC, XLenVT, Expand); 61 setOperationAction(ISD::SELECT, XLenVT, Custom); 62 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 63 64 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 65 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 66 67 setOperationAction(ISD::VASTART, MVT::Other, Custom); 68 setOperationAction(ISD::VAARG, MVT::Other, Expand); 69 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 70 setOperationAction(ISD::VAEND, MVT::Other, Expand); 71 72 for (auto VT : {MVT::i1, MVT::i8, MVT::i16}) 73 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 74 75 setOperationAction(ISD::ADDC, XLenVT, Expand); 76 setOperationAction(ISD::ADDE, XLenVT, Expand); 77 setOperationAction(ISD::SUBC, XLenVT, Expand); 78 setOperationAction(ISD::SUBE, XLenVT, Expand); 79 80 if (!Subtarget.hasStdExtM()) { 81 setOperationAction(ISD::MUL, XLenVT, Expand); 82 setOperationAction(ISD::MULHS, XLenVT, Expand); 83 setOperationAction(ISD::MULHU, XLenVT, Expand); 84 setOperationAction(ISD::SDIV, XLenVT, Expand); 85 setOperationAction(ISD::UDIV, XLenVT, Expand); 86 setOperationAction(ISD::SREM, XLenVT, Expand); 87 setOperationAction(ISD::UREM, XLenVT, Expand); 88 } 89 90 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 91 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 92 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 93 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 94 95 setOperationAction(ISD::SHL_PARTS, XLenVT, Expand); 96 setOperationAction(ISD::SRL_PARTS, XLenVT, Expand); 97 setOperationAction(ISD::SRA_PARTS, XLenVT, Expand); 98 99 setOperationAction(ISD::ROTL, XLenVT, Expand); 100 setOperationAction(ISD::ROTR, XLenVT, Expand); 101 setOperationAction(ISD::BSWAP, XLenVT, Expand); 102 setOperationAction(ISD::CTTZ, XLenVT, Expand); 103 setOperationAction(ISD::CTLZ, XLenVT, Expand); 104 setOperationAction(ISD::CTPOP, XLenVT, Expand); 105 106 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 107 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 108 109 setBooleanContents(ZeroOrOneBooleanContent); 110 111 // Function alignments (log2). 112 setMinFunctionAlignment(3); 113 setPrefFunctionAlignment(3); 114 115 // Effectively disable jump table generation. 116 setMinimumJumpTableEntries(INT_MAX); 117 } 118 119 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 120 EVT VT) const { 121 if (!VT.isVector()) 122 return getPointerTy(DL); 123 return VT.changeVectorElementTypeToInteger(); 124 } 125 126 // Changes the condition code and swaps operands if necessary, so the SetCC 127 // operation matches one of the comparisons supported directly in the RISC-V 128 // ISA. 129 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 130 switch (CC) { 131 default: 132 break; 133 case ISD::SETGT: 134 case ISD::SETLE: 135 case ISD::SETUGT: 136 case ISD::SETULE: 137 CC = ISD::getSetCCSwappedOperands(CC); 138 std::swap(LHS, RHS); 139 break; 140 } 141 } 142 143 // Return the RISC-V branch opcode that matches the given DAG integer 144 // condition code. The CondCode must be one of those supported by the RISC-V 145 // ISA (see normaliseSetCC). 146 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 147 switch (CC) { 148 default: 149 llvm_unreachable("Unsupported CondCode"); 150 case ISD::SETEQ: 151 return RISCV::BEQ; 152 case ISD::SETNE: 153 return RISCV::BNE; 154 case ISD::SETLT: 155 return RISCV::BLT; 156 case ISD::SETGE: 157 return RISCV::BGE; 158 case ISD::SETULT: 159 return RISCV::BLTU; 160 case ISD::SETUGE: 161 return RISCV::BGEU; 162 } 163 } 164 165 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 166 SelectionDAG &DAG) const { 167 switch (Op.getOpcode()) { 168 default: 169 report_fatal_error("unimplemented operand"); 170 case ISD::GlobalAddress: 171 return lowerGlobalAddress(Op, DAG); 172 case ISD::BlockAddress: 173 return lowerBlockAddress(Op, DAG); 174 case ISD::SELECT: 175 return lowerSELECT(Op, DAG); 176 case ISD::VASTART: 177 return lowerVASTART(Op, DAG); 178 case ISD::FRAMEADDR: 179 return LowerFRAMEADDR(Op, DAG); 180 case ISD::RETURNADDR: 181 return LowerRETURNADDR(Op, DAG); 182 } 183 } 184 185 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 186 SelectionDAG &DAG) const { 187 SDLoc DL(Op); 188 EVT Ty = Op.getValueType(); 189 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 190 const GlobalValue *GV = N->getGlobal(); 191 int64_t Offset = N->getOffset(); 192 193 if (isPositionIndependent() || Subtarget.is64Bit()) 194 report_fatal_error("Unable to lowerGlobalAddress"); 195 196 SDValue GAHi = 197 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI); 198 SDValue GALo = 199 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO); 200 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); 201 SDValue MNLo = 202 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); 203 return MNLo; 204 } 205 206 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 207 SelectionDAG &DAG) const { 208 SDLoc DL(Op); 209 EVT Ty = Op.getValueType(); 210 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 211 const BlockAddress *BA = N->getBlockAddress(); 212 int64_t Offset = N->getOffset(); 213 214 if (isPositionIndependent() || Subtarget.is64Bit()) 215 report_fatal_error("Unable to lowerBlockAddress"); 216 217 SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI); 218 SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO); 219 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0); 220 SDValue MNLo = 221 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0); 222 return MNLo; 223 } 224 225 SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op, 226 SelectionDAG &DAG) const { 227 SDLoc DL(Op); 228 EVT Ty = Op.getValueType(); 229 ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op); 230 const char *Sym = N->getSymbol(); 231 232 // TODO: should also handle gp-relative loads. 233 234 if (isPositionIndependent() || Subtarget.is64Bit()) 235 report_fatal_error("Unable to lowerExternalSymbol"); 236 237 SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI); 238 SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO); 239 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); 240 SDValue MNLo = 241 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); 242 return MNLo; 243 } 244 245 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 246 SDValue CondV = Op.getOperand(0); 247 SDValue TrueV = Op.getOperand(1); 248 SDValue FalseV = Op.getOperand(2); 249 SDLoc DL(Op); 250 MVT XLenVT = Subtarget.getXLenVT(); 251 252 // If the result type is XLenVT and CondV is the output of a SETCC node 253 // which also operated on XLenVT inputs, then merge the SETCC node into the 254 // lowered RISCVISD::SELECT_CC to take advantage of the integer 255 // compare+branch instructions. i.e.: 256 // (select (setcc lhs, rhs, cc), truev, falsev) 257 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 258 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 259 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 260 SDValue LHS = CondV.getOperand(0); 261 SDValue RHS = CondV.getOperand(1); 262 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 263 ISD::CondCode CCVal = CC->get(); 264 265 normaliseSetCC(LHS, RHS, CCVal); 266 267 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 268 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 269 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 270 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 271 } 272 273 // Otherwise: 274 // (select condv, truev, falsev) 275 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 276 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 277 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 278 279 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 280 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 281 282 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 283 } 284 285 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 286 MachineFunction &MF = DAG.getMachineFunction(); 287 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 288 289 SDLoc DL(Op); 290 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 291 getPointerTy(MF.getDataLayout())); 292 293 // vastart just stores the address of the VarArgsFrameIndex slot into the 294 // memory location argument. 295 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 296 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 297 MachinePointerInfo(SV)); 298 } 299 300 SDValue RISCVTargetLowering::LowerFRAMEADDR(SDValue Op, 301 SelectionDAG &DAG) const { 302 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 303 MachineFunction &MF = DAG.getMachineFunction(); 304 MachineFrameInfo &MFI = MF.getFrameInfo(); 305 MFI.setFrameAddressIsTaken(true); 306 unsigned FrameReg = RI.getFrameRegister(MF); 307 int XLenInBytes = Subtarget.getXLen() / 8; 308 309 EVT VT = Op.getValueType(); 310 SDLoc DL(Op); 311 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 312 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 313 while (Depth--) { 314 int Offset = -(XLenInBytes * 2); 315 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 316 DAG.getIntPtrConstant(Offset, DL)); 317 FrameAddr = 318 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 319 } 320 return FrameAddr; 321 } 322 323 SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op, 324 SelectionDAG &DAG) const { 325 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 326 MachineFunction &MF = DAG.getMachineFunction(); 327 MachineFrameInfo &MFI = MF.getFrameInfo(); 328 MFI.setReturnAddressIsTaken(true); 329 MVT XLenVT = Subtarget.getXLenVT(); 330 int XLenInBytes = Subtarget.getXLen() / 8; 331 332 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 333 return SDValue(); 334 335 EVT VT = Op.getValueType(); 336 SDLoc DL(Op); 337 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 338 if (Depth) { 339 int Off = -XLenInBytes; 340 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 341 SDValue Offset = DAG.getConstant(Off, DL, VT); 342 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 343 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 344 MachinePointerInfo()); 345 } 346 347 // Return the value of the return address register, marking it an implicit 348 // live-in. 349 unsigned Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 350 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 351 } 352 353 MachineBasicBlock * 354 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 355 MachineBasicBlock *BB) const { 356 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 357 DebugLoc DL = MI.getDebugLoc(); 358 359 assert(MI.getOpcode() == RISCV::Select_GPR_Using_CC_GPR && 360 "Unexpected instr type to insert"); 361 362 // To "insert" a SELECT instruction, we actually have to insert the triangle 363 // control-flow pattern. The incoming instruction knows the destination vreg 364 // to set, the condition code register to branch on, the true/false values to 365 // select between, and the condcode to use to select the appropriate branch. 366 // 367 // We produce the following control flow: 368 // HeadMBB 369 // | \ 370 // | IfFalseMBB 371 // | / 372 // TailMBB 373 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 374 MachineFunction::iterator I = ++BB->getIterator(); 375 376 MachineBasicBlock *HeadMBB = BB; 377 MachineFunction *F = BB->getParent(); 378 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 379 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 380 381 F->insert(I, IfFalseMBB); 382 F->insert(I, TailMBB); 383 // Move all remaining instructions to TailMBB. 384 TailMBB->splice(TailMBB->begin(), HeadMBB, 385 std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end()); 386 // Update machine-CFG edges by transferring all successors of the current 387 // block to the new block which will contain the Phi node for the select. 388 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 389 // Set the successors for HeadMBB. 390 HeadMBB->addSuccessor(IfFalseMBB); 391 HeadMBB->addSuccessor(TailMBB); 392 393 // Insert appropriate branch. 394 unsigned LHS = MI.getOperand(1).getReg(); 395 unsigned RHS = MI.getOperand(2).getReg(); 396 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 397 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 398 399 BuildMI(HeadMBB, DL, TII.get(Opcode)) 400 .addReg(LHS) 401 .addReg(RHS) 402 .addMBB(TailMBB); 403 404 // IfFalseMBB just falls through to TailMBB. 405 IfFalseMBB->addSuccessor(TailMBB); 406 407 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 408 BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI), 409 MI.getOperand(0).getReg()) 410 .addReg(MI.getOperand(4).getReg()) 411 .addMBB(HeadMBB) 412 .addReg(MI.getOperand(5).getReg()) 413 .addMBB(IfFalseMBB); 414 415 MI.eraseFromParent(); // The pseudo instruction is gone now. 416 return TailMBB; 417 } 418 419 // Calling Convention Implementation. 420 // The expectations for frontend ABI lowering vary from target to target. 421 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 422 // details, but this is a longer term goal. For now, we simply try to keep the 423 // role of the frontend as simple and well-defined as possible. The rules can 424 // be summarised as: 425 // * Never split up large scalar arguments. We handle them here. 426 // * If a hardfloat calling convention is being used, and the struct may be 427 // passed in a pair of registers (fp+fp, int+fp), and both registers are 428 // available, then pass as two separate arguments. If either the GPRs or FPRs 429 // are exhausted, then pass according to the rule below. 430 // * If a struct could never be passed in registers or directly in a stack 431 // slot (as it is larger than 2*XLEN and the floating point rules don't 432 // apply), then pass it using a pointer with the byval attribute. 433 // * If a struct is less than 2*XLEN, then coerce to either a two-element 434 // word-sized array or a 2*XLEN scalar (depending on alignment). 435 // * The frontend can determine whether a struct is returned by reference or 436 // not based on its size and fields. If it will be returned by reference, the 437 // frontend must modify the prototype so a pointer with the sret annotation is 438 // passed as the first argument. This is not necessary for large scalar 439 // returns. 440 // * Struct return values and varargs should be coerced to structs containing 441 // register-size fields in the same situations they would be for fixed 442 // arguments. 443 444 static const MCPhysReg ArgGPRs[] = { 445 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 446 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 447 }; 448 449 // Pass a 2*XLEN argument that has been split into two XLEN values through 450 // registers or the stack as necessary. 451 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 452 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 453 MVT ValVT2, MVT LocVT2, 454 ISD::ArgFlagsTy ArgFlags2) { 455 unsigned XLenInBytes = XLen / 8; 456 if (unsigned Reg = State.AllocateReg(ArgGPRs)) { 457 // At least one half can be passed via register. 458 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 459 VA1.getLocVT(), CCValAssign::Full)); 460 } else { 461 // Both halves must be passed on the stack, with proper alignment. 462 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); 463 State.addLoc( 464 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 465 State.AllocateStack(XLenInBytes, StackAlign), 466 VA1.getLocVT(), CCValAssign::Full)); 467 State.addLoc(CCValAssign::getMem( 468 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 469 CCValAssign::Full)); 470 return false; 471 } 472 473 if (unsigned Reg = State.AllocateReg(ArgGPRs)) { 474 // The second half can also be passed via register. 475 State.addLoc( 476 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 477 } else { 478 // The second half is passed via the stack, without additional alignment. 479 State.addLoc(CCValAssign::getMem( 480 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 481 CCValAssign::Full)); 482 } 483 484 return false; 485 } 486 487 // Implements the RISC-V calling convention. Returns true upon failure. 488 static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT, 489 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 490 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy) { 491 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 492 assert(XLen == 32 || XLen == 64); 493 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 494 assert(ValVT == XLenVT && "Unexpected ValVT"); 495 assert(LocVT == XLenVT && "Unexpected LocVT"); 496 497 // Any return value split in to more than two values can't be returned 498 // directly. 499 if (IsRet && ValNo > 1) 500 return true; 501 502 // If this is a variadic argument, the RISC-V calling convention requires 503 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 504 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 505 // be used regardless of whether the original argument was split during 506 // legalisation or not. The argument will not be passed by registers if the 507 // original type is larger than 2*XLEN, so the register alignment rule does 508 // not apply. 509 unsigned TwoXLenInBytes = (2 * XLen) / 8; 510 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && 511 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 512 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 513 // Skip 'odd' register if necessary. 514 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 515 State.AllocateReg(ArgGPRs); 516 } 517 518 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 519 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 520 State.getPendingArgFlags(); 521 522 assert(PendingLocs.size() == PendingArgFlags.size() && 523 "PendingLocs and PendingArgFlags out of sync"); 524 525 // Split arguments might be passed indirectly, so keep track of the pending 526 // values. 527 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 528 LocVT = XLenVT; 529 LocInfo = CCValAssign::Indirect; 530 PendingLocs.push_back( 531 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 532 PendingArgFlags.push_back(ArgFlags); 533 if (!ArgFlags.isSplitEnd()) { 534 return false; 535 } 536 } 537 538 // If the split argument only had two elements, it should be passed directly 539 // in registers or on the stack. 540 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 541 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 542 // Apply the normal calling convention rules to the first half of the 543 // split argument. 544 CCValAssign VA = PendingLocs[0]; 545 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 546 PendingLocs.clear(); 547 PendingArgFlags.clear(); 548 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 549 ArgFlags); 550 } 551 552 // Allocate to a register if possible, or else a stack slot. 553 unsigned Reg = State.AllocateReg(ArgGPRs); 554 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); 555 556 // If we reach this point and PendingLocs is non-empty, we must be at the 557 // end of a split argument that must be passed indirectly. 558 if (!PendingLocs.empty()) { 559 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 560 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 561 562 for (auto &It : PendingLocs) { 563 if (Reg) 564 It.convertToReg(Reg); 565 else 566 It.convertToMem(StackOffset); 567 State.addLoc(It); 568 } 569 PendingLocs.clear(); 570 PendingArgFlags.clear(); 571 return false; 572 } 573 574 assert(LocVT == XLenVT && "Expected an XLenVT at this stage"); 575 576 if (Reg) { 577 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 578 } else { 579 State.addLoc( 580 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 581 } 582 return false; 583 } 584 585 void RISCVTargetLowering::analyzeInputArgs( 586 MachineFunction &MF, CCState &CCInfo, 587 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 588 unsigned NumArgs = Ins.size(); 589 FunctionType *FType = MF.getFunction().getFunctionType(); 590 591 for (unsigned i = 0; i != NumArgs; ++i) { 592 MVT ArgVT = Ins[i].VT; 593 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 594 595 Type *ArgTy = nullptr; 596 if (IsRet) 597 ArgTy = FType->getReturnType(); 598 else if (Ins[i].isOrigArg()) 599 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 600 601 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full, 602 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) { 603 DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 604 << EVT(ArgVT).getEVTString() << '\n'); 605 llvm_unreachable(nullptr); 606 } 607 } 608 } 609 610 void RISCVTargetLowering::analyzeOutputArgs( 611 MachineFunction &MF, CCState &CCInfo, 612 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 613 CallLoweringInfo *CLI) const { 614 unsigned NumArgs = Outs.size(); 615 616 for (unsigned i = 0; i != NumArgs; i++) { 617 MVT ArgVT = Outs[i].VT; 618 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 619 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 620 621 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full, 622 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { 623 DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 624 << EVT(ArgVT).getEVTString() << "\n"); 625 llvm_unreachable(nullptr); 626 } 627 } 628 } 629 630 // The caller is responsible for loading the full value if the argument is 631 // passed with CCValAssign::Indirect. 632 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 633 const CCValAssign &VA, const SDLoc &DL) { 634 MachineFunction &MF = DAG.getMachineFunction(); 635 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 636 EVT LocVT = VA.getLocVT(); 637 SDValue Val; 638 639 unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 640 RegInfo.addLiveIn(VA.getLocReg(), VReg); 641 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 642 643 switch (VA.getLocInfo()) { 644 default: 645 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 646 case CCValAssign::Full: 647 case CCValAssign::Indirect: 648 return Val; 649 } 650 } 651 652 // The caller is responsible for loading the full value if the argument is 653 // passed with CCValAssign::Indirect. 654 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 655 const CCValAssign &VA, const SDLoc &DL) { 656 MachineFunction &MF = DAG.getMachineFunction(); 657 MachineFrameInfo &MFI = MF.getFrameInfo(); 658 EVT LocVT = VA.getLocVT(); 659 EVT ValVT = VA.getValVT(); 660 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 661 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 662 VA.getLocMemOffset(), /*Immutable=*/true); 663 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 664 SDValue Val; 665 666 ISD::LoadExtType ExtType; 667 switch (VA.getLocInfo()) { 668 default: 669 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 670 case CCValAssign::Full: 671 case CCValAssign::Indirect: 672 ExtType = ISD::NON_EXTLOAD; 673 break; 674 } 675 Val = DAG.getExtLoad( 676 ExtType, DL, LocVT, Chain, FIN, 677 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 678 return Val; 679 } 680 681 // Transform physical registers into virtual registers. 682 SDValue RISCVTargetLowering::LowerFormalArguments( 683 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 684 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 685 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 686 687 switch (CallConv) { 688 default: 689 report_fatal_error("Unsupported calling convention"); 690 case CallingConv::C: 691 case CallingConv::Fast: 692 break; 693 } 694 695 MachineFunction &MF = DAG.getMachineFunction(); 696 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 697 MVT XLenVT = Subtarget.getXLenVT(); 698 unsigned XLenInBytes = Subtarget.getXLen() / 8; 699 // Used with vargs to acumulate store chains. 700 std::vector<SDValue> OutChains; 701 702 // Assign locations to all of the incoming arguments. 703 SmallVector<CCValAssign, 16> ArgLocs; 704 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 705 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 706 707 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 708 CCValAssign &VA = ArgLocs[i]; 709 assert(VA.getLocVT() == XLenVT && "Unhandled argument type"); 710 SDValue ArgValue; 711 if (VA.isRegLoc()) 712 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); 713 else 714 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 715 716 if (VA.getLocInfo() == CCValAssign::Indirect) { 717 // If the original argument was split and passed by reference (e.g. i128 718 // on RV32), we need to load all parts of it here (using the same 719 // address). 720 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 721 MachinePointerInfo())); 722 unsigned ArgIndex = Ins[i].OrigArgIndex; 723 assert(Ins[i].PartOffset == 0); 724 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 725 CCValAssign &PartVA = ArgLocs[i + 1]; 726 unsigned PartOffset = Ins[i + 1].PartOffset; 727 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 728 DAG.getIntPtrConstant(PartOffset, DL)); 729 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 730 MachinePointerInfo())); 731 ++i; 732 } 733 continue; 734 } 735 InVals.push_back(ArgValue); 736 } 737 738 if (IsVarArg) { 739 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 740 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 741 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 742 MachineFrameInfo &MFI = MF.getFrameInfo(); 743 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 744 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 745 746 // Offset of the first variable argument from stack pointer, and size of 747 // the vararg save area. For now, the varargs save area is either zero or 748 // large enough to hold a0-a7. 749 int VaArgOffset, VarArgsSaveSize; 750 751 // If all registers are allocated, then all varargs must be passed on the 752 // stack and we don't need to save any argregs. 753 if (ArgRegs.size() == Idx) { 754 VaArgOffset = CCInfo.getNextStackOffset(); 755 VarArgsSaveSize = 0; 756 } else { 757 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 758 VaArgOffset = -VarArgsSaveSize; 759 } 760 761 // Record the frame index of the first variable argument 762 // which is a value necessary to VASTART. 763 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 764 RVFI->setVarArgsFrameIndex(FI); 765 766 // If saving an odd number of registers then create an extra stack slot to 767 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 768 // offsets to even-numbered registered remain 2*XLEN-aligned. 769 if (Idx % 2) { 770 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, 771 true); 772 VarArgsSaveSize += XLenInBytes; 773 } 774 775 // Copy the integer registers that may have been used for passing varargs 776 // to the vararg save area. 777 for (unsigned I = Idx; I < ArgRegs.size(); 778 ++I, VaArgOffset += XLenInBytes) { 779 const unsigned Reg = RegInfo.createVirtualRegister(RC); 780 RegInfo.addLiveIn(ArgRegs[I], Reg); 781 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 782 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 783 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 784 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 785 MachinePointerInfo::getFixedStack(MF, FI)); 786 cast<StoreSDNode>(Store.getNode()) 787 ->getMemOperand() 788 ->setValue((Value *)nullptr); 789 OutChains.push_back(Store); 790 } 791 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 792 } 793 794 // All stores are grouped in one node to allow the matching between 795 // the size of Ins and InVals. This only happens for vararg functions. 796 if (!OutChains.empty()) { 797 OutChains.push_back(Chain); 798 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 799 } 800 801 return Chain; 802 } 803 804 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 805 // and output parameter nodes. 806 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 807 SmallVectorImpl<SDValue> &InVals) const { 808 SelectionDAG &DAG = CLI.DAG; 809 SDLoc &DL = CLI.DL; 810 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 811 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 812 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 813 SDValue Chain = CLI.Chain; 814 SDValue Callee = CLI.Callee; 815 CLI.IsTailCall = false; 816 CallingConv::ID CallConv = CLI.CallConv; 817 bool IsVarArg = CLI.IsVarArg; 818 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 819 MVT XLenVT = Subtarget.getXLenVT(); 820 821 MachineFunction &MF = DAG.getMachineFunction(); 822 823 // Analyze the operands of the call, assigning locations to each operand. 824 SmallVector<CCValAssign, 16> ArgLocs; 825 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 826 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 827 828 // Get a count of how many bytes are to be pushed on the stack. 829 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 830 831 // Create local copies for byval args 832 SmallVector<SDValue, 8> ByValArgs; 833 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 834 ISD::ArgFlagsTy Flags = Outs[i].Flags; 835 if (!Flags.isByVal()) 836 continue; 837 838 SDValue Arg = OutVals[i]; 839 unsigned Size = Flags.getByValSize(); 840 unsigned Align = Flags.getByValAlign(); 841 842 int FI = MF.getFrameInfo().CreateStackObject(Size, Align, /*isSS=*/false); 843 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 844 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 845 846 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Align, 847 /*IsVolatile=*/false, 848 /*AlwaysInline=*/false, 849 /*isTailCall=*/false, MachinePointerInfo(), 850 MachinePointerInfo()); 851 ByValArgs.push_back(FIPtr); 852 } 853 854 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 855 856 // Copy argument values to their designated locations. 857 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 858 SmallVector<SDValue, 8> MemOpChains; 859 SDValue StackPtr; 860 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 861 CCValAssign &VA = ArgLocs[i]; 862 SDValue ArgValue = OutVals[i]; 863 ISD::ArgFlagsTy Flags = Outs[i].Flags; 864 865 // Promote the value if needed. 866 // For now, only handle fully promoted and indirect arguments. 867 switch (VA.getLocInfo()) { 868 case CCValAssign::Full: 869 break; 870 case CCValAssign::Indirect: { 871 // Store the argument in a stack slot and pass its address. 872 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 873 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 874 MemOpChains.push_back( 875 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 876 MachinePointerInfo::getFixedStack(MF, FI))); 877 // If the original argument was split (e.g. i128), we need 878 // to store all parts of it here (and pass just one address). 879 unsigned ArgIndex = Outs[i].OrigArgIndex; 880 assert(Outs[i].PartOffset == 0); 881 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 882 SDValue PartValue = OutVals[i + 1]; 883 unsigned PartOffset = Outs[i + 1].PartOffset; 884 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 885 DAG.getIntPtrConstant(PartOffset, DL)); 886 MemOpChains.push_back( 887 DAG.getStore(Chain, DL, PartValue, Address, 888 MachinePointerInfo::getFixedStack(MF, FI))); 889 ++i; 890 } 891 ArgValue = SpillSlot; 892 break; 893 } 894 default: 895 llvm_unreachable("Unknown loc info!"); 896 } 897 898 // Use local copy if it is a byval arg. 899 if (Flags.isByVal()) 900 ArgValue = ByValArgs[j++]; 901 902 if (VA.isRegLoc()) { 903 // Queue up the argument copies and emit them at the end. 904 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 905 } else { 906 assert(VA.isMemLoc() && "Argument not register or memory"); 907 908 // Work out the address of the stack slot. 909 if (!StackPtr.getNode()) 910 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 911 SDValue Address = 912 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 913 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 914 915 // Emit the store. 916 MemOpChains.push_back( 917 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 918 } 919 } 920 921 // Join the stores, which are independent of one another. 922 if (!MemOpChains.empty()) 923 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 924 925 SDValue Glue; 926 927 // Build a sequence of copy-to-reg nodes, chained and glued together. 928 for (auto &Reg : RegsToPass) { 929 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 930 Glue = Chain.getValue(1); 931 } 932 933 if (isa<GlobalAddressSDNode>(Callee)) { 934 Callee = lowerGlobalAddress(Callee, DAG); 935 } else if (isa<ExternalSymbolSDNode>(Callee)) { 936 Callee = lowerExternalSymbol(Callee, DAG); 937 } 938 939 // The first call operand is the chain and the second is the target address. 940 SmallVector<SDValue, 8> Ops; 941 Ops.push_back(Chain); 942 Ops.push_back(Callee); 943 944 // Add argument registers to the end of the list so that they are 945 // known live into the call. 946 for (auto &Reg : RegsToPass) 947 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 948 949 // Add a register mask operand representing the call-preserved registers. 950 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 951 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 952 assert(Mask && "Missing call preserved mask for calling convention"); 953 Ops.push_back(DAG.getRegisterMask(Mask)); 954 955 // Glue the call to the argument copies, if any. 956 if (Glue.getNode()) 957 Ops.push_back(Glue); 958 959 // Emit the call. 960 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 961 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 962 Glue = Chain.getValue(1); 963 964 // Mark the end of the call, which is glued to the call itself. 965 Chain = DAG.getCALLSEQ_END(Chain, 966 DAG.getConstant(NumBytes, DL, PtrVT, true), 967 DAG.getConstant(0, DL, PtrVT, true), 968 Glue, DL); 969 Glue = Chain.getValue(1); 970 971 // Assign locations to each value returned by this call. 972 SmallVector<CCValAssign, 16> RVLocs; 973 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 974 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 975 976 // Copy all of the result registers out of their specified physreg. 977 for (auto &VA : RVLocs) { 978 // Copy the value out, gluing the copy to the end of the call sequence. 979 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 980 VA.getLocVT(), Glue); 981 Chain = RetValue.getValue(1); 982 Glue = RetValue.getValue(2); 983 984 assert(VA.getLocInfo() == CCValAssign::Full && "Unknown loc info!"); 985 InVals.push_back(RetValue); 986 } 987 988 return Chain; 989 } 990 991 bool RISCVTargetLowering::CanLowerReturn( 992 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 993 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 994 SmallVector<CCValAssign, 16> RVLocs; 995 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 996 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 997 MVT VT = Outs[i].VT; 998 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 999 if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags, 1000 CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) 1001 return false; 1002 } 1003 return true; 1004 } 1005 1006 SDValue 1007 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1008 bool IsVarArg, 1009 const SmallVectorImpl<ISD::OutputArg> &Outs, 1010 const SmallVectorImpl<SDValue> &OutVals, 1011 const SDLoc &DL, SelectionDAG &DAG) const { 1012 // Stores the assignment of the return value to a location. 1013 SmallVector<CCValAssign, 16> RVLocs; 1014 1015 // Info about the registers and stack slot. 1016 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 1017 *DAG.getContext()); 1018 1019 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 1020 nullptr); 1021 1022 SDValue Flag; 1023 SmallVector<SDValue, 4> RetOps(1, Chain); 1024 1025 // Copy the result values into the output registers. 1026 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 1027 SDValue Val = OutVals[i]; 1028 CCValAssign &VA = RVLocs[i]; 1029 assert(VA.isRegLoc() && "Can only return in registers!"); 1030 assert(VA.getLocInfo() == CCValAssign::Full && 1031 "Unexpected CCValAssign::LocInfo"); 1032 1033 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag); 1034 1035 // Guarantee that all emitted copies are stuck together. 1036 Flag = Chain.getValue(1); 1037 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1038 } 1039 1040 RetOps[0] = Chain; // Update chain. 1041 1042 // Add the flag if we have it. 1043 if (Flag.getNode()) { 1044 RetOps.push_back(Flag); 1045 } 1046 1047 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 1048 } 1049 1050 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 1051 switch ((RISCVISD::NodeType)Opcode) { 1052 case RISCVISD::FIRST_NUMBER: 1053 break; 1054 case RISCVISD::RET_FLAG: 1055 return "RISCVISD::RET_FLAG"; 1056 case RISCVISD::CALL: 1057 return "RISCVISD::CALL"; 1058 case RISCVISD::SELECT_CC: 1059 return "RISCVISD::SELECT_CC"; 1060 } 1061 return nullptr; 1062 } 1063 1064 std::pair<unsigned, const TargetRegisterClass *> 1065 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1066 StringRef Constraint, 1067 MVT VT) const { 1068 // First, see if this is a constraint that directly corresponds to a 1069 // RISCV register class. 1070 if (Constraint.size() == 1) { 1071 switch (Constraint[0]) { 1072 case 'r': 1073 return std::make_pair(0U, &RISCV::GPRRegClass); 1074 default: 1075 break; 1076 } 1077 } 1078 1079 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1080 } 1081