1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that RISCV uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "RISCVISelLowering.h" 16 #include "RISCV.h" 17 #include "RISCVRegisterInfo.h" 18 #include "RISCVSubtarget.h" 19 #include "RISCVTargetMachine.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/SelectionDAGISel.h" 26 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 27 #include "llvm/CodeGen/ValueTypes.h" 28 #include "llvm/IR/DiagnosticInfo.h" 29 #include "llvm/IR/DiagnosticPrinter.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/raw_ostream.h" 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "riscv-lower" 37 38 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 39 const RISCVSubtarget &STI) 40 : TargetLowering(TM), Subtarget(STI) { 41 42 MVT XLenVT = Subtarget.getXLenVT(); 43 44 // Set up the register classes. 45 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 46 47 // Compute derived properties from the register classes. 48 computeRegisterProperties(STI.getRegisterInfo()); 49 50 setStackPointerRegisterToSaveRestore(RISCV::X2); 51 52 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 53 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 54 55 // TODO: add all necessary setOperationAction calls. 56 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 57 setOperationAction(ISD::BR_CC, XLenVT, Expand); 58 setOperationAction(ISD::SELECT, XLenVT, Custom); 59 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 60 61 for (auto VT : {MVT::i1, MVT::i8, MVT::i16}) 62 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 63 64 setOperationAction(ISD::ADDC, XLenVT, Expand); 65 setOperationAction(ISD::ADDE, XLenVT, Expand); 66 setOperationAction(ISD::SUBC, XLenVT, Expand); 67 setOperationAction(ISD::SUBE, XLenVT, Expand); 68 69 setOperationAction(ISD::SREM, XLenVT, Expand); 70 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 71 setOperationAction(ISD::SDIV, XLenVT, Expand); 72 setOperationAction(ISD::UREM, XLenVT, Expand); 73 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 74 setOperationAction(ISD::UDIV, XLenVT, Expand); 75 76 setOperationAction(ISD::MUL, XLenVT, Expand); 77 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 78 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 79 setOperationAction(ISD::MULHS, XLenVT, Expand); 80 setOperationAction(ISD::MULHU, XLenVT, Expand); 81 82 setOperationAction(ISD::SHL_PARTS, XLenVT, Expand); 83 setOperationAction(ISD::SRL_PARTS, XLenVT, Expand); 84 setOperationAction(ISD::SRA_PARTS, XLenVT, Expand); 85 86 setOperationAction(ISD::ROTL, XLenVT, Expand); 87 setOperationAction(ISD::ROTR, XLenVT, Expand); 88 setOperationAction(ISD::BSWAP, XLenVT, Expand); 89 setOperationAction(ISD::CTTZ, XLenVT, Expand); 90 setOperationAction(ISD::CTLZ, XLenVT, Expand); 91 setOperationAction(ISD::CTPOP, XLenVT, Expand); 92 93 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 94 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 95 96 setBooleanContents(ZeroOrOneBooleanContent); 97 98 // Function alignments (log2). 99 setMinFunctionAlignment(3); 100 setPrefFunctionAlignment(3); 101 102 // Effectively disable jump table generation. 103 setMinimumJumpTableEntries(INT_MAX); 104 } 105 106 // Changes the condition code and swaps operands if necessary, so the SetCC 107 // operation matches one of the comparisons supported directly in the RISC-V 108 // ISA. 109 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 110 switch (CC) { 111 default: 112 break; 113 case ISD::SETGT: 114 case ISD::SETLE: 115 case ISD::SETUGT: 116 case ISD::SETULE: 117 CC = ISD::getSetCCSwappedOperands(CC); 118 std::swap(LHS, RHS); 119 break; 120 } 121 } 122 123 // Return the RISC-V branch opcode that matches the given DAG integer 124 // condition code. The CondCode must be one of those supported by the RISC-V 125 // ISA (see normaliseSetCC). 126 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 127 switch (CC) { 128 default: 129 llvm_unreachable("Unsupported CondCode"); 130 case ISD::SETEQ: 131 return RISCV::BEQ; 132 case ISD::SETNE: 133 return RISCV::BNE; 134 case ISD::SETLT: 135 return RISCV::BLT; 136 case ISD::SETGE: 137 return RISCV::BGE; 138 case ISD::SETULT: 139 return RISCV::BLTU; 140 case ISD::SETUGE: 141 return RISCV::BGEU; 142 } 143 } 144 145 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 146 SelectionDAG &DAG) const { 147 switch (Op.getOpcode()) { 148 default: 149 report_fatal_error("unimplemented operand"); 150 case ISD::GlobalAddress: 151 return lowerGlobalAddress(Op, DAG); 152 case ISD::BlockAddress: 153 return lowerBlockAddress(Op, DAG); 154 case ISD::SELECT: 155 return lowerSELECT(Op, DAG); 156 } 157 } 158 159 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 160 SelectionDAG &DAG) const { 161 SDLoc DL(Op); 162 EVT Ty = Op.getValueType(); 163 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 164 const GlobalValue *GV = N->getGlobal(); 165 int64_t Offset = N->getOffset(); 166 167 if (isPositionIndependent() || Subtarget.is64Bit()) 168 report_fatal_error("Unable to lowerGlobalAddress"); 169 170 SDValue GAHi = 171 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI); 172 SDValue GALo = 173 DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO); 174 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); 175 SDValue MNLo = 176 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); 177 return MNLo; 178 } 179 180 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 181 SelectionDAG &DAG) const { 182 SDLoc DL(Op); 183 EVT Ty = Op.getValueType(); 184 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 185 const BlockAddress *BA = N->getBlockAddress(); 186 int64_t Offset = N->getOffset(); 187 188 if (isPositionIndependent() || Subtarget.is64Bit()) 189 report_fatal_error("Unable to lowerBlockAddress"); 190 191 SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI); 192 SDValue BALo = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_LO); 193 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, BAHi), 0); 194 SDValue MNLo = 195 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, BALo), 0); 196 return MNLo; 197 } 198 199 SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op, 200 SelectionDAG &DAG) const { 201 SDLoc DL(Op); 202 EVT Ty = Op.getValueType(); 203 ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op); 204 const char *Sym = N->getSymbol(); 205 206 // TODO: should also handle gp-relative loads. 207 208 if (isPositionIndependent() || Subtarget.is64Bit()) 209 report_fatal_error("Unable to lowerExternalSymbol"); 210 211 SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI); 212 SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO); 213 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); 214 SDValue MNLo = 215 SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); 216 return MNLo; 217 } 218 219 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 220 SDValue CondV = Op.getOperand(0); 221 SDValue TrueV = Op.getOperand(1); 222 SDValue FalseV = Op.getOperand(2); 223 SDLoc DL(Op); 224 MVT XLenVT = Subtarget.getXLenVT(); 225 226 // If the result type is XLenVT and CondV is the output of a SETCC node 227 // which also operated on XLenVT inputs, then merge the SETCC node into the 228 // lowered RISCVISD::SELECT_CC to take advantage of the integer 229 // compare+branch instructions. i.e.: 230 // (select (setcc lhs, rhs, cc), truev, falsev) 231 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 232 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 233 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 234 SDValue LHS = CondV.getOperand(0); 235 SDValue RHS = CondV.getOperand(1); 236 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 237 ISD::CondCode CCVal = CC->get(); 238 239 normaliseSetCC(LHS, RHS, CCVal); 240 241 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 242 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 243 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 244 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 245 } 246 247 // Otherwise: 248 // (select condv, truev, falsev) 249 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 250 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 251 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 252 253 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 254 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 255 256 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 257 } 258 259 MachineBasicBlock * 260 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 261 MachineBasicBlock *BB) const { 262 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 263 DebugLoc DL = MI.getDebugLoc(); 264 265 assert(MI.getOpcode() == RISCV::Select_GPR_Using_CC_GPR && 266 "Unexpected instr type to insert"); 267 268 // To "insert" a SELECT instruction, we actually have to insert the triangle 269 // control-flow pattern. The incoming instruction knows the destination vreg 270 // to set, the condition code register to branch on, the true/false values to 271 // select between, and the condcode to use to select the appropriate branch. 272 // 273 // We produce the following control flow: 274 // HeadMBB 275 // | \ 276 // | IfFalseMBB 277 // | / 278 // TailMBB 279 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 280 MachineFunction::iterator I = ++BB->getIterator(); 281 282 MachineBasicBlock *HeadMBB = BB; 283 MachineFunction *F = BB->getParent(); 284 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 285 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 286 287 F->insert(I, IfFalseMBB); 288 F->insert(I, TailMBB); 289 // Move all remaining instructions to TailMBB. 290 TailMBB->splice(TailMBB->begin(), HeadMBB, 291 std::next(MachineBasicBlock::iterator(MI)), HeadMBB->end()); 292 // Update machine-CFG edges by transferring all successors of the current 293 // block to the new block which will contain the Phi node for the select. 294 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 295 // Set the successors for HeadMBB. 296 HeadMBB->addSuccessor(IfFalseMBB); 297 HeadMBB->addSuccessor(TailMBB); 298 299 // Insert appropriate branch. 300 unsigned LHS = MI.getOperand(1).getReg(); 301 unsigned RHS = MI.getOperand(2).getReg(); 302 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 303 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 304 305 BuildMI(HeadMBB, DL, TII.get(Opcode)) 306 .addReg(LHS) 307 .addReg(RHS) 308 .addMBB(TailMBB); 309 310 // IfFalseMBB just falls through to TailMBB. 311 IfFalseMBB->addSuccessor(TailMBB); 312 313 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 314 BuildMI(*TailMBB, TailMBB->begin(), DL, TII.get(RISCV::PHI), 315 MI.getOperand(0).getReg()) 316 .addReg(MI.getOperand(4).getReg()) 317 .addMBB(HeadMBB) 318 .addReg(MI.getOperand(5).getReg()) 319 .addMBB(IfFalseMBB); 320 321 MI.eraseFromParent(); // The pseudo instruction is gone now. 322 return TailMBB; 323 } 324 325 // Calling Convention Implementation. 326 #include "RISCVGenCallingConv.inc" 327 328 // Transform physical registers into virtual registers. 329 SDValue RISCVTargetLowering::LowerFormalArguments( 330 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 331 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 332 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 333 334 switch (CallConv) { 335 default: 336 report_fatal_error("Unsupported calling convention"); 337 case CallingConv::C: 338 case CallingConv::Fast: 339 break; 340 } 341 342 MachineFunction &MF = DAG.getMachineFunction(); 343 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 344 MVT XLenVT = Subtarget.getXLenVT(); 345 346 if (IsVarArg) 347 report_fatal_error("VarArg not supported"); 348 349 // Assign locations to all of the incoming arguments. 350 SmallVector<CCValAssign, 16> ArgLocs; 351 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 352 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV32); 353 354 for (auto &VA : ArgLocs) { 355 if (!VA.isRegLoc()) 356 report_fatal_error("Defined with too many args"); 357 358 // Arguments passed in registers. 359 EVT RegVT = VA.getLocVT(); 360 if (RegVT != XLenVT) { 361 DEBUG(dbgs() << "LowerFormalArguments Unhandled argument type: " 362 << RegVT.getEVTString() << "\n"); 363 report_fatal_error("unhandled argument type"); 364 } 365 const unsigned VReg = 366 RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 367 RegInfo.addLiveIn(VA.getLocReg(), VReg); 368 SDValue ArgIn = DAG.getCopyFromReg(Chain, DL, VReg, RegVT); 369 370 InVals.push_back(ArgIn); 371 } 372 return Chain; 373 } 374 375 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 376 // and output parameter nodes. 377 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 378 SmallVectorImpl<SDValue> &InVals) const { 379 SelectionDAG &DAG = CLI.DAG; 380 SDLoc &DL = CLI.DL; 381 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 382 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 383 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 384 SDValue Chain = CLI.Chain; 385 SDValue Callee = CLI.Callee; 386 CLI.IsTailCall = false; 387 CallingConv::ID CallConv = CLI.CallConv; 388 bool IsVarArg = CLI.IsVarArg; 389 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 390 391 if (IsVarArg) { 392 report_fatal_error("LowerCall with varargs not implemented"); 393 } 394 395 MachineFunction &MF = DAG.getMachineFunction(); 396 397 // Analyze the operands of the call, assigning locations to each operand. 398 SmallVector<CCValAssign, 16> ArgLocs; 399 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 400 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV32); 401 402 // Get a count of how many bytes are to be pushed on the stack. 403 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 404 405 for (auto &Arg : Outs) { 406 if (!Arg.Flags.isByVal()) 407 continue; 408 report_fatal_error("Passing arguments byval not yet implemented"); 409 } 410 411 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 412 413 // Copy argument values to their designated locations. 414 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 415 SDValue StackPtr; 416 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 417 CCValAssign &VA = ArgLocs[I]; 418 SDValue ArgValue = OutVals[I]; 419 420 // Promote the value if needed. 421 // For now, only handle fully promoted arguments. 422 switch (VA.getLocInfo()) { 423 case CCValAssign::Full: 424 break; 425 default: 426 llvm_unreachable("Unknown loc info!"); 427 } 428 429 if (VA.isRegLoc()) { 430 // Queue up the argument copies and emit them at the end. 431 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 432 } else { 433 assert(VA.isMemLoc() && "Argument not register or memory"); 434 report_fatal_error("Passing arguments via the stack not yet implemented"); 435 } 436 } 437 438 SDValue Glue; 439 440 // Build a sequence of copy-to-reg nodes, chained and glued together. 441 for (auto &Reg : RegsToPass) { 442 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 443 Glue = Chain.getValue(1); 444 } 445 446 if (isa<GlobalAddressSDNode>(Callee)) { 447 Callee = lowerGlobalAddress(Callee, DAG); 448 } else if (isa<ExternalSymbolSDNode>(Callee)) { 449 Callee = lowerExternalSymbol(Callee, DAG); 450 } 451 452 // The first call operand is the chain and the second is the target address. 453 SmallVector<SDValue, 8> Ops; 454 Ops.push_back(Chain); 455 Ops.push_back(Callee); 456 457 // Add argument registers to the end of the list so that they are 458 // known live into the call. 459 for (auto &Reg : RegsToPass) 460 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 461 462 // Add a register mask operand representing the call-preserved registers. 463 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 464 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 465 assert(Mask && "Missing call preserved mask for calling convention"); 466 Ops.push_back(DAG.getRegisterMask(Mask)); 467 468 // Glue the call to the argument copies, if any. 469 if (Glue.getNode()) 470 Ops.push_back(Glue); 471 472 // Emit the call. 473 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 474 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 475 Glue = Chain.getValue(1); 476 477 // Mark the end of the call, which is glued to the call itself. 478 Chain = DAG.getCALLSEQ_END(Chain, 479 DAG.getConstant(NumBytes, DL, PtrVT, true), 480 DAG.getConstant(0, DL, PtrVT, true), 481 Glue, DL); 482 Glue = Chain.getValue(1); 483 484 // Assign locations to each value returned by this call. 485 SmallVector<CCValAssign, 16> RVLocs; 486 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 487 RetCCInfo.AnalyzeCallResult(Ins, RetCC_RISCV32); 488 489 // Copy all of the result registers out of their specified physreg. 490 for (auto &VA : RVLocs) { 491 // Copy the value out, gluing the copy to the end of the call sequence. 492 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), 493 VA.getLocVT(), Glue); 494 Chain = RetValue.getValue(1); 495 Glue = RetValue.getValue(2); 496 497 InVals.push_back(Chain.getValue(0)); 498 } 499 500 return Chain; 501 } 502 503 SDValue 504 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 505 bool IsVarArg, 506 const SmallVectorImpl<ISD::OutputArg> &Outs, 507 const SmallVectorImpl<SDValue> &OutVals, 508 const SDLoc &DL, SelectionDAG &DAG) const { 509 if (IsVarArg) { 510 report_fatal_error("VarArg not supported"); 511 } 512 513 // Stores the assignment of the return value to a location. 514 SmallVector<CCValAssign, 16> RVLocs; 515 516 // Info about the registers and stack slot. 517 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 518 *DAG.getContext()); 519 520 CCInfo.AnalyzeReturn(Outs, RetCC_RISCV32); 521 522 SDValue Flag; 523 SmallVector<SDValue, 4> RetOps(1, Chain); 524 525 // Copy the result values into the output registers. 526 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 527 CCValAssign &VA = RVLocs[i]; 528 assert(VA.isRegLoc() && "Can only return in registers!"); 529 530 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); 531 532 // Guarantee that all emitted copies are stuck together. 533 Flag = Chain.getValue(1); 534 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 535 } 536 537 RetOps[0] = Chain; // Update chain. 538 539 // Add the flag if we have it. 540 if (Flag.getNode()) { 541 RetOps.push_back(Flag); 542 } 543 544 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 545 } 546 547 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 548 switch ((RISCVISD::NodeType)Opcode) { 549 case RISCVISD::FIRST_NUMBER: 550 break; 551 case RISCVISD::RET_FLAG: 552 return "RISCVISD::RET_FLAG"; 553 case RISCVISD::CALL: 554 return "RISCVISD::CALL"; 555 case RISCVISD::SELECT_CC: 556 return "RISCVISD::SELECT_CC"; 557 } 558 return nullptr; 559 } 560