1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVRegisterInfo.h" 18 #include "RISCVSubtarget.h" 19 #include "RISCVTargetMachine.h" 20 #include "Utils/RISCVMatInt.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/SelectionDAGISel.h" 29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 30 #include "llvm/CodeGen/ValueTypes.h" 31 #include "llvm/IR/DiagnosticInfo.h" 32 #include "llvm/IR/DiagnosticPrinter.h" 33 #include "llvm/IR/IntrinsicsRISCV.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/raw_ostream.h" 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "riscv-lower" 41 42 STATISTIC(NumTailCalls, "Number of tail calls"); 43 44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 45 const RISCVSubtarget &STI) 46 : TargetLowering(TM), Subtarget(STI) { 47 48 if (Subtarget.isRV32E()) 49 report_fatal_error("Codegen not yet implemented for RV32E"); 50 51 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 52 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 53 54 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 55 !Subtarget.hasStdExtF()) { 56 errs() << "Hard-float 'f' ABI can't be used for a target that " 57 "doesn't support the F instruction set extension (ignoring " 58 "target-abi)\n"; 59 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 60 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 61 !Subtarget.hasStdExtD()) { 62 errs() << "Hard-float 'd' ABI can't be used for a target that " 63 "doesn't support the D instruction set extension (ignoring " 64 "target-abi)\n"; 65 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 66 } 67 68 switch (ABI) { 69 default: 70 report_fatal_error("Don't know how to lower this ABI"); 71 case RISCVABI::ABI_ILP32: 72 case RISCVABI::ABI_ILP32F: 73 case RISCVABI::ABI_ILP32D: 74 case RISCVABI::ABI_LP64: 75 case RISCVABI::ABI_LP64F: 76 case RISCVABI::ABI_LP64D: 77 break; 78 } 79 80 MVT XLenVT = Subtarget.getXLenVT(); 81 82 // Set up the register classes. 83 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 84 85 if (Subtarget.hasStdExtF()) 86 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 87 if (Subtarget.hasStdExtD()) 88 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 89 90 // Compute derived properties from the register classes. 91 computeRegisterProperties(STI.getRegisterInfo()); 92 93 setStackPointerRegisterToSaveRestore(RISCV::X2); 94 95 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 96 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 97 98 // TODO: add all necessary setOperationAction calls. 99 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 100 101 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 102 setOperationAction(ISD::BR_CC, XLenVT, Expand); 103 setOperationAction(ISD::SELECT, XLenVT, Custom); 104 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 105 106 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 107 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 108 109 setOperationAction(ISD::VASTART, MVT::Other, Custom); 110 setOperationAction(ISD::VAARG, MVT::Other, Expand); 111 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 112 setOperationAction(ISD::VAEND, MVT::Other, Expand); 113 114 for (auto VT : {MVT::i1, MVT::i8, MVT::i16}) 115 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 116 117 if (Subtarget.is64Bit()) { 118 setOperationAction(ISD::ADD, MVT::i32, Custom); 119 setOperationAction(ISD::SUB, MVT::i32, Custom); 120 setOperationAction(ISD::SHL, MVT::i32, Custom); 121 setOperationAction(ISD::SRA, MVT::i32, Custom); 122 setOperationAction(ISD::SRL, MVT::i32, Custom); 123 } 124 125 if (!Subtarget.hasStdExtM()) { 126 setOperationAction(ISD::MUL, XLenVT, Expand); 127 setOperationAction(ISD::MULHS, XLenVT, Expand); 128 setOperationAction(ISD::MULHU, XLenVT, Expand); 129 setOperationAction(ISD::SDIV, XLenVT, Expand); 130 setOperationAction(ISD::UDIV, XLenVT, Expand); 131 setOperationAction(ISD::SREM, XLenVT, Expand); 132 setOperationAction(ISD::UREM, XLenVT, Expand); 133 } 134 135 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 136 setOperationAction(ISD::MUL, MVT::i32, Custom); 137 setOperationAction(ISD::SDIV, MVT::i32, Custom); 138 setOperationAction(ISD::UDIV, MVT::i32, Custom); 139 setOperationAction(ISD::UREM, MVT::i32, Custom); 140 } 141 142 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 143 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 144 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 145 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 146 147 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 148 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 149 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 150 151 setOperationAction(ISD::ROTL, XLenVT, Expand); 152 setOperationAction(ISD::ROTR, XLenVT, Expand); 153 setOperationAction(ISD::BSWAP, XLenVT, Expand); 154 setOperationAction(ISD::CTTZ, XLenVT, Expand); 155 setOperationAction(ISD::CTLZ, XLenVT, Expand); 156 setOperationAction(ISD::CTPOP, XLenVT, Expand); 157 158 ISD::CondCode FPCCToExtend[] = { 159 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 160 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 161 ISD::SETGE, ISD::SETNE}; 162 163 ISD::NodeType FPOpToExtend[] = { 164 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 165 ISD::FP_TO_FP16}; 166 167 if (Subtarget.hasStdExtF()) { 168 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 169 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 170 for (auto CC : FPCCToExtend) 171 setCondCodeAction(CC, MVT::f32, Expand); 172 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 173 setOperationAction(ISD::SELECT, MVT::f32, Custom); 174 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 175 for (auto Op : FPOpToExtend) 176 setOperationAction(Op, MVT::f32, Expand); 177 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 178 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 179 } 180 181 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 182 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 183 184 if (Subtarget.hasStdExtD()) { 185 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 186 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 187 for (auto CC : FPCCToExtend) 188 setCondCodeAction(CC, MVT::f64, Expand); 189 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 190 setOperationAction(ISD::SELECT, MVT::f64, Custom); 191 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 192 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 193 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 194 for (auto Op : FPOpToExtend) 195 setOperationAction(Op, MVT::f64, Expand); 196 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 197 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 198 } 199 200 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 201 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 202 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 203 204 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 205 206 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 207 // Unfortunately this can't be determined just from the ISA naming string. 208 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 209 Subtarget.is64Bit() ? Legal : Custom); 210 211 setOperationAction(ISD::TRAP, MVT::Other, Legal); 212 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 213 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 214 215 if (Subtarget.hasStdExtA()) { 216 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 217 setMinCmpXchgSizeInBits(32); 218 } else { 219 setMaxAtomicSizeInBitsSupported(0); 220 } 221 222 setBooleanContents(ZeroOrOneBooleanContent); 223 224 // Function alignments. 225 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 226 setMinFunctionAlignment(FunctionAlignment); 227 setPrefFunctionAlignment(FunctionAlignment); 228 229 // Effectively disable jump table generation. 230 setMinimumJumpTableEntries(INT_MAX); 231 } 232 233 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 234 EVT VT) const { 235 if (!VT.isVector()) 236 return getPointerTy(DL); 237 return VT.changeVectorElementTypeToInteger(); 238 } 239 240 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 241 const CallInst &I, 242 MachineFunction &MF, 243 unsigned Intrinsic) const { 244 switch (Intrinsic) { 245 default: 246 return false; 247 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 248 case Intrinsic::riscv_masked_atomicrmw_add_i32: 249 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 250 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 251 case Intrinsic::riscv_masked_atomicrmw_max_i32: 252 case Intrinsic::riscv_masked_atomicrmw_min_i32: 253 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 254 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 255 case Intrinsic::riscv_masked_cmpxchg_i32: 256 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 257 Info.opc = ISD::INTRINSIC_W_CHAIN; 258 Info.memVT = MVT::getVT(PtrTy->getElementType()); 259 Info.ptrVal = I.getArgOperand(0); 260 Info.offset = 0; 261 Info.align = Align(4); 262 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 263 MachineMemOperand::MOVolatile; 264 return true; 265 } 266 } 267 268 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 269 const AddrMode &AM, Type *Ty, 270 unsigned AS, 271 Instruction *I) const { 272 // No global is ever allowed as a base. 273 if (AM.BaseGV) 274 return false; 275 276 // Require a 12-bit signed offset. 277 if (!isInt<12>(AM.BaseOffs)) 278 return false; 279 280 switch (AM.Scale) { 281 case 0: // "r+i" or just "i", depending on HasBaseReg. 282 break; 283 case 1: 284 if (!AM.HasBaseReg) // allow "r+i". 285 break; 286 return false; // disallow "r+r" or "r+r+i". 287 default: 288 return false; 289 } 290 291 return true; 292 } 293 294 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 295 return isInt<12>(Imm); 296 } 297 298 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 299 return isInt<12>(Imm); 300 } 301 302 // On RV32, 64-bit integers are split into their high and low parts and held 303 // in two different registers, so the trunc is free since the low register can 304 // just be used. 305 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 306 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 307 return false; 308 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 309 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 310 return (SrcBits == 64 && DestBits == 32); 311 } 312 313 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 314 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 315 !SrcVT.isInteger() || !DstVT.isInteger()) 316 return false; 317 unsigned SrcBits = SrcVT.getSizeInBits(); 318 unsigned DestBits = DstVT.getSizeInBits(); 319 return (SrcBits == 64 && DestBits == 32); 320 } 321 322 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 323 // Zexts are free if they can be combined with a load. 324 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 325 EVT MemVT = LD->getMemoryVT(); 326 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 327 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 328 (LD->getExtensionType() == ISD::NON_EXTLOAD || 329 LD->getExtensionType() == ISD::ZEXTLOAD)) 330 return true; 331 } 332 333 return TargetLowering::isZExtFree(Val, VT2); 334 } 335 336 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 337 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 338 } 339 340 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 341 bool ForCodeSize) const { 342 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 343 return false; 344 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 345 return false; 346 if (Imm.isNegZero()) 347 return false; 348 return Imm.isZero(); 349 } 350 351 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 352 return (VT == MVT::f32 && Subtarget.hasStdExtF()) || 353 (VT == MVT::f64 && Subtarget.hasStdExtD()); 354 } 355 356 // Changes the condition code and swaps operands if necessary, so the SetCC 357 // operation matches one of the comparisons supported directly in the RISC-V 358 // ISA. 359 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 360 switch (CC) { 361 default: 362 break; 363 case ISD::SETGT: 364 case ISD::SETLE: 365 case ISD::SETUGT: 366 case ISD::SETULE: 367 CC = ISD::getSetCCSwappedOperands(CC); 368 std::swap(LHS, RHS); 369 break; 370 } 371 } 372 373 // Return the RISC-V branch opcode that matches the given DAG integer 374 // condition code. The CondCode must be one of those supported by the RISC-V 375 // ISA (see normaliseSetCC). 376 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 377 switch (CC) { 378 default: 379 llvm_unreachable("Unsupported CondCode"); 380 case ISD::SETEQ: 381 return RISCV::BEQ; 382 case ISD::SETNE: 383 return RISCV::BNE; 384 case ISD::SETLT: 385 return RISCV::BLT; 386 case ISD::SETGE: 387 return RISCV::BGE; 388 case ISD::SETULT: 389 return RISCV::BLTU; 390 case ISD::SETUGE: 391 return RISCV::BGEU; 392 } 393 } 394 395 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 396 SelectionDAG &DAG) const { 397 switch (Op.getOpcode()) { 398 default: 399 report_fatal_error("unimplemented operand"); 400 case ISD::GlobalAddress: 401 return lowerGlobalAddress(Op, DAG); 402 case ISD::BlockAddress: 403 return lowerBlockAddress(Op, DAG); 404 case ISD::ConstantPool: 405 return lowerConstantPool(Op, DAG); 406 case ISD::GlobalTLSAddress: 407 return lowerGlobalTLSAddress(Op, DAG); 408 case ISD::SELECT: 409 return lowerSELECT(Op, DAG); 410 case ISD::VASTART: 411 return lowerVASTART(Op, DAG); 412 case ISD::FRAMEADDR: 413 return lowerFRAMEADDR(Op, DAG); 414 case ISD::RETURNADDR: 415 return lowerRETURNADDR(Op, DAG); 416 case ISD::SHL_PARTS: 417 return lowerShiftLeftParts(Op, DAG); 418 case ISD::SRA_PARTS: 419 return lowerShiftRightParts(Op, DAG, true); 420 case ISD::SRL_PARTS: 421 return lowerShiftRightParts(Op, DAG, false); 422 case ISD::BITCAST: { 423 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() && 424 "Unexpected custom legalisation"); 425 SDLoc DL(Op); 426 SDValue Op0 = Op.getOperand(0); 427 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32) 428 return SDValue(); 429 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 430 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 431 return FPConv; 432 } 433 case ISD::INTRINSIC_WO_CHAIN: 434 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 435 } 436 } 437 438 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 439 SelectionDAG &DAG, unsigned Flags) { 440 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 441 } 442 443 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 444 SelectionDAG &DAG, unsigned Flags) { 445 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 446 Flags); 447 } 448 449 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 450 SelectionDAG &DAG, unsigned Flags) { 451 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 452 N->getOffset(), Flags); 453 } 454 455 template <class NodeTy> 456 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 457 bool IsLocal) const { 458 SDLoc DL(N); 459 EVT Ty = getPointerTy(DAG.getDataLayout()); 460 461 if (isPositionIndependent()) { 462 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 463 if (IsLocal) 464 // Use PC-relative addressing to access the symbol. This generates the 465 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 466 // %pcrel_lo(auipc)). 467 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 468 469 // Use PC-relative addressing to access the GOT for this symbol, then load 470 // the address from the GOT. This generates the pattern (PseudoLA sym), 471 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 472 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 473 } 474 475 switch (getTargetMachine().getCodeModel()) { 476 default: 477 report_fatal_error("Unsupported code model for lowering"); 478 case CodeModel::Small: { 479 // Generate a sequence for accessing addresses within the first 2 GiB of 480 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 481 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 482 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 483 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 484 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 485 } 486 case CodeModel::Medium: { 487 // Generate a sequence for accessing addresses within any 2GiB range within 488 // the address space. This generates the pattern (PseudoLLA sym), which 489 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 490 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 491 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 492 } 493 } 494 } 495 496 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 497 SelectionDAG &DAG) const { 498 SDLoc DL(Op); 499 EVT Ty = Op.getValueType(); 500 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 501 int64_t Offset = N->getOffset(); 502 MVT XLenVT = Subtarget.getXLenVT(); 503 504 const GlobalValue *GV = N->getGlobal(); 505 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 506 SDValue Addr = getAddr(N, DAG, IsLocal); 507 508 // In order to maximise the opportunity for common subexpression elimination, 509 // emit a separate ADD node for the global address offset instead of folding 510 // it in the global address node. Later peephole optimisations may choose to 511 // fold it back in when profitable. 512 if (Offset != 0) 513 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 514 DAG.getConstant(Offset, DL, XLenVT)); 515 return Addr; 516 } 517 518 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 519 SelectionDAG &DAG) const { 520 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 521 522 return getAddr(N, DAG); 523 } 524 525 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 526 SelectionDAG &DAG) const { 527 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 528 529 return getAddr(N, DAG); 530 } 531 532 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 533 SelectionDAG &DAG, 534 bool UseGOT) const { 535 SDLoc DL(N); 536 EVT Ty = getPointerTy(DAG.getDataLayout()); 537 const GlobalValue *GV = N->getGlobal(); 538 MVT XLenVT = Subtarget.getXLenVT(); 539 540 if (UseGOT) { 541 // Use PC-relative addressing to access the GOT for this TLS symbol, then 542 // load the address from the GOT and add the thread pointer. This generates 543 // the pattern (PseudoLA_TLS_IE sym), which expands to 544 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 545 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 546 SDValue Load = 547 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 548 549 // Add the thread pointer. 550 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 551 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 552 } 553 554 // Generate a sequence for accessing the address relative to the thread 555 // pointer, with the appropriate adjustment for the thread pointer offset. 556 // This generates the pattern 557 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 558 SDValue AddrHi = 559 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 560 SDValue AddrAdd = 561 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 562 SDValue AddrLo = 563 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 564 565 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 566 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 567 SDValue MNAdd = SDValue( 568 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 569 0); 570 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 571 } 572 573 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 574 SelectionDAG &DAG) const { 575 SDLoc DL(N); 576 EVT Ty = getPointerTy(DAG.getDataLayout()); 577 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 578 const GlobalValue *GV = N->getGlobal(); 579 580 // Use a PC-relative addressing mode to access the global dynamic GOT address. 581 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 582 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 583 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 584 SDValue Load = 585 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 586 587 // Prepare argument list to generate call. 588 ArgListTy Args; 589 ArgListEntry Entry; 590 Entry.Node = Load; 591 Entry.Ty = CallTy; 592 Args.push_back(Entry); 593 594 // Setup call to __tls_get_addr. 595 TargetLowering::CallLoweringInfo CLI(DAG); 596 CLI.setDebugLoc(DL) 597 .setChain(DAG.getEntryNode()) 598 .setLibCallee(CallingConv::C, CallTy, 599 DAG.getExternalSymbol("__tls_get_addr", Ty), 600 std::move(Args)); 601 602 return LowerCallTo(CLI).first; 603 } 604 605 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 606 SelectionDAG &DAG) const { 607 SDLoc DL(Op); 608 EVT Ty = Op.getValueType(); 609 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 610 int64_t Offset = N->getOffset(); 611 MVT XLenVT = Subtarget.getXLenVT(); 612 613 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 614 615 SDValue Addr; 616 switch (Model) { 617 case TLSModel::LocalExec: 618 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 619 break; 620 case TLSModel::InitialExec: 621 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 622 break; 623 case TLSModel::LocalDynamic: 624 case TLSModel::GeneralDynamic: 625 Addr = getDynamicTLSAddr(N, DAG); 626 break; 627 } 628 629 // In order to maximise the opportunity for common subexpression elimination, 630 // emit a separate ADD node for the global address offset instead of folding 631 // it in the global address node. Later peephole optimisations may choose to 632 // fold it back in when profitable. 633 if (Offset != 0) 634 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 635 DAG.getConstant(Offset, DL, XLenVT)); 636 return Addr; 637 } 638 639 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 640 SDValue CondV = Op.getOperand(0); 641 SDValue TrueV = Op.getOperand(1); 642 SDValue FalseV = Op.getOperand(2); 643 SDLoc DL(Op); 644 MVT XLenVT = Subtarget.getXLenVT(); 645 646 // If the result type is XLenVT and CondV is the output of a SETCC node 647 // which also operated on XLenVT inputs, then merge the SETCC node into the 648 // lowered RISCVISD::SELECT_CC to take advantage of the integer 649 // compare+branch instructions. i.e.: 650 // (select (setcc lhs, rhs, cc), truev, falsev) 651 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 652 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 653 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 654 SDValue LHS = CondV.getOperand(0); 655 SDValue RHS = CondV.getOperand(1); 656 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 657 ISD::CondCode CCVal = CC->get(); 658 659 normaliseSetCC(LHS, RHS, CCVal); 660 661 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 662 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 663 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 664 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 665 } 666 667 // Otherwise: 668 // (select condv, truev, falsev) 669 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 670 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 671 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 672 673 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 674 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 675 676 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops); 677 } 678 679 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 680 MachineFunction &MF = DAG.getMachineFunction(); 681 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 682 683 SDLoc DL(Op); 684 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 685 getPointerTy(MF.getDataLayout())); 686 687 // vastart just stores the address of the VarArgsFrameIndex slot into the 688 // memory location argument. 689 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 690 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 691 MachinePointerInfo(SV)); 692 } 693 694 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 695 SelectionDAG &DAG) const { 696 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 697 MachineFunction &MF = DAG.getMachineFunction(); 698 MachineFrameInfo &MFI = MF.getFrameInfo(); 699 MFI.setFrameAddressIsTaken(true); 700 Register FrameReg = RI.getFrameRegister(MF); 701 int XLenInBytes = Subtarget.getXLen() / 8; 702 703 EVT VT = Op.getValueType(); 704 SDLoc DL(Op); 705 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 706 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 707 while (Depth--) { 708 int Offset = -(XLenInBytes * 2); 709 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 710 DAG.getIntPtrConstant(Offset, DL)); 711 FrameAddr = 712 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 713 } 714 return FrameAddr; 715 } 716 717 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 718 SelectionDAG &DAG) const { 719 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 720 MachineFunction &MF = DAG.getMachineFunction(); 721 MachineFrameInfo &MFI = MF.getFrameInfo(); 722 MFI.setReturnAddressIsTaken(true); 723 MVT XLenVT = Subtarget.getXLenVT(); 724 int XLenInBytes = Subtarget.getXLen() / 8; 725 726 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 727 return SDValue(); 728 729 EVT VT = Op.getValueType(); 730 SDLoc DL(Op); 731 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 732 if (Depth) { 733 int Off = -XLenInBytes; 734 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 735 SDValue Offset = DAG.getConstant(Off, DL, VT); 736 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 737 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 738 MachinePointerInfo()); 739 } 740 741 // Return the value of the return address register, marking it an implicit 742 // live-in. 743 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 744 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 745 } 746 747 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 748 SelectionDAG &DAG) const { 749 SDLoc DL(Op); 750 SDValue Lo = Op.getOperand(0); 751 SDValue Hi = Op.getOperand(1); 752 SDValue Shamt = Op.getOperand(2); 753 EVT VT = Lo.getValueType(); 754 755 // if Shamt-XLEN < 0: // Shamt < XLEN 756 // Lo = Lo << Shamt 757 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 758 // else: 759 // Lo = 0 760 // Hi = Lo << (Shamt-XLEN) 761 762 SDValue Zero = DAG.getConstant(0, DL, VT); 763 SDValue One = DAG.getConstant(1, DL, VT); 764 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 765 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 766 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 767 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 768 769 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 770 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 771 SDValue ShiftRightLo = 772 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 773 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 774 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 775 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 776 777 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 778 779 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 780 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 781 782 SDValue Parts[2] = {Lo, Hi}; 783 return DAG.getMergeValues(Parts, DL); 784 } 785 786 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 787 bool IsSRA) const { 788 SDLoc DL(Op); 789 SDValue Lo = Op.getOperand(0); 790 SDValue Hi = Op.getOperand(1); 791 SDValue Shamt = Op.getOperand(2); 792 EVT VT = Lo.getValueType(); 793 794 // SRA expansion: 795 // if Shamt-XLEN < 0: // Shamt < XLEN 796 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 797 // Hi = Hi >>s Shamt 798 // else: 799 // Lo = Hi >>s (Shamt-XLEN); 800 // Hi = Hi >>s (XLEN-1) 801 // 802 // SRL expansion: 803 // if Shamt-XLEN < 0: // Shamt < XLEN 804 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 805 // Hi = Hi >>u Shamt 806 // else: 807 // Lo = Hi >>u (Shamt-XLEN); 808 // Hi = 0; 809 810 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 811 812 SDValue Zero = DAG.getConstant(0, DL, VT); 813 SDValue One = DAG.getConstant(1, DL, VT); 814 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 815 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 816 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 817 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 818 819 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 820 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 821 SDValue ShiftLeftHi = 822 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 823 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 824 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 825 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 826 SDValue HiFalse = 827 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 828 829 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 830 831 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 832 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 833 834 SDValue Parts[2] = {Lo, Hi}; 835 return DAG.getMergeValues(Parts, DL); 836 } 837 838 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 839 SelectionDAG &DAG) const { 840 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 841 SDLoc DL(Op); 842 switch (IntNo) { 843 default: 844 return SDValue(); // Don't custom lower most intrinsics. 845 case Intrinsic::thread_pointer: { 846 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 847 return DAG.getRegister(RISCV::X4, PtrVT); 848 } 849 } 850 } 851 852 // Returns the opcode of the target-specific SDNode that implements the 32-bit 853 // form of the given Opcode. 854 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 855 switch (Opcode) { 856 default: 857 llvm_unreachable("Unexpected opcode"); 858 case ISD::SHL: 859 return RISCVISD::SLLW; 860 case ISD::SRA: 861 return RISCVISD::SRAW; 862 case ISD::SRL: 863 return RISCVISD::SRLW; 864 case ISD::SDIV: 865 return RISCVISD::DIVW; 866 case ISD::UDIV: 867 return RISCVISD::DIVUW; 868 case ISD::UREM: 869 return RISCVISD::REMUW; 870 } 871 } 872 873 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 874 // Because i32 isn't a legal type for RV64, these operations would otherwise 875 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 876 // later one because the fact the operation was originally of type i32 is 877 // lost. 878 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { 879 SDLoc DL(N); 880 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 881 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 882 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 883 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 884 // ReplaceNodeResults requires we maintain the same type for the return value. 885 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 886 } 887 888 // Converts the given 32-bit operation to a i64 operation with signed extension 889 // semantic to reduce the signed extension instructions. 890 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 891 SDLoc DL(N); 892 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 893 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 894 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 895 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 896 DAG.getValueType(MVT::i32)); 897 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 898 } 899 900 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 901 SmallVectorImpl<SDValue> &Results, 902 SelectionDAG &DAG) const { 903 SDLoc DL(N); 904 switch (N->getOpcode()) { 905 default: 906 llvm_unreachable("Don't know how to custom type legalize this operation!"); 907 case ISD::READCYCLECOUNTER: { 908 assert(!Subtarget.is64Bit() && 909 "READCYCLECOUNTER only has custom type legalization on riscv32"); 910 911 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 912 SDValue RCW = 913 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 914 915 Results.push_back( 916 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 917 Results.push_back(RCW.getValue(2)); 918 break; 919 } 920 case ISD::ADD: 921 case ISD::SUB: 922 case ISD::MUL: 923 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 924 "Unexpected custom legalisation"); 925 if (N->getOperand(1).getOpcode() == ISD::Constant) 926 return; 927 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 928 break; 929 case ISD::SHL: 930 case ISD::SRA: 931 case ISD::SRL: 932 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 933 "Unexpected custom legalisation"); 934 if (N->getOperand(1).getOpcode() == ISD::Constant) 935 return; 936 Results.push_back(customLegalizeToWOp(N, DAG)); 937 break; 938 case ISD::SDIV: 939 case ISD::UDIV: 940 case ISD::UREM: 941 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 942 Subtarget.hasStdExtM() && "Unexpected custom legalisation"); 943 if (N->getOperand(0).getOpcode() == ISD::Constant || 944 N->getOperand(1).getOpcode() == ISD::Constant) 945 return; 946 Results.push_back(customLegalizeToWOp(N, DAG)); 947 break; 948 case ISD::BITCAST: { 949 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 950 Subtarget.hasStdExtF() && "Unexpected custom legalisation"); 951 SDLoc DL(N); 952 SDValue Op0 = N->getOperand(0); 953 if (Op0.getValueType() != MVT::f32) 954 return; 955 SDValue FPConv = 956 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 957 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 958 break; 959 } 960 } 961 } 962 963 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 964 DAGCombinerInfo &DCI) const { 965 SelectionDAG &DAG = DCI.DAG; 966 967 switch (N->getOpcode()) { 968 default: 969 break; 970 case RISCVISD::SplitF64: { 971 SDValue Op0 = N->getOperand(0); 972 // If the input to SplitF64 is just BuildPairF64 then the operation is 973 // redundant. Instead, use BuildPairF64's operands directly. 974 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 975 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 976 977 SDLoc DL(N); 978 979 // It's cheaper to materialise two 32-bit integers than to load a double 980 // from the constant pool and transfer it to integer registers through the 981 // stack. 982 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 983 APInt V = C->getValueAPF().bitcastToAPInt(); 984 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 985 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 986 return DCI.CombineTo(N, Lo, Hi); 987 } 988 989 // This is a target-specific version of a DAGCombine performed in 990 // DAGCombiner::visitBITCAST. It performs the equivalent of: 991 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 992 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 993 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 994 !Op0.getNode()->hasOneUse()) 995 break; 996 SDValue NewSplitF64 = 997 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 998 Op0.getOperand(0)); 999 SDValue Lo = NewSplitF64.getValue(0); 1000 SDValue Hi = NewSplitF64.getValue(1); 1001 APInt SignBit = APInt::getSignMask(32); 1002 if (Op0.getOpcode() == ISD::FNEG) { 1003 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 1004 DAG.getConstant(SignBit, DL, MVT::i32)); 1005 return DCI.CombineTo(N, Lo, NewHi); 1006 } 1007 assert(Op0.getOpcode() == ISD::FABS); 1008 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 1009 DAG.getConstant(~SignBit, DL, MVT::i32)); 1010 return DCI.CombineTo(N, Lo, NewHi); 1011 } 1012 case RISCVISD::SLLW: 1013 case RISCVISD::SRAW: 1014 case RISCVISD::SRLW: { 1015 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 1016 SDValue LHS = N->getOperand(0); 1017 SDValue RHS = N->getOperand(1); 1018 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 1019 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 1020 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) || 1021 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI))) 1022 return SDValue(); 1023 break; 1024 } 1025 case RISCVISD::FMV_X_ANYEXTW_RV64: { 1026 SDLoc DL(N); 1027 SDValue Op0 = N->getOperand(0); 1028 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 1029 // conversion is unnecessary and can be replaced with an ANY_EXTEND 1030 // of the FMV_W_X_RV64 operand. 1031 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 1032 SDValue AExtOp = 1033 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0)); 1034 return DCI.CombineTo(N, AExtOp); 1035 } 1036 1037 // This is a target-specific version of a DAGCombine performed in 1038 // DAGCombiner::visitBITCAST. It performs the equivalent of: 1039 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1040 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1041 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1042 !Op0.getNode()->hasOneUse()) 1043 break; 1044 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 1045 Op0.getOperand(0)); 1046 APInt SignBit = APInt::getSignMask(32).sext(64); 1047 if (Op0.getOpcode() == ISD::FNEG) { 1048 return DCI.CombineTo(N, 1049 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 1050 DAG.getConstant(SignBit, DL, MVT::i64))); 1051 } 1052 assert(Op0.getOpcode() == ISD::FABS); 1053 return DCI.CombineTo(N, 1054 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 1055 DAG.getConstant(~SignBit, DL, MVT::i64))); 1056 } 1057 } 1058 1059 return SDValue(); 1060 } 1061 1062 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 1063 const SDNode *N, CombineLevel Level) const { 1064 // The following folds are only desirable if `(OP _, c1 << c2)` can be 1065 // materialised in fewer instructions than `(OP _, c1)`: 1066 // 1067 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 1068 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 1069 SDValue N0 = N->getOperand(0); 1070 EVT Ty = N0.getValueType(); 1071 if (Ty.isScalarInteger() && 1072 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 1073 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 1074 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1075 if (C1 && C2) { 1076 APInt C1Int = C1->getAPIntValue(); 1077 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 1078 1079 // We can materialise `c1 << c2` into an add immediate, so it's "free", 1080 // and the combine should happen, to potentially allow further combines 1081 // later. 1082 if (ShiftedC1Int.getMinSignedBits() <= 64 && 1083 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 1084 return true; 1085 1086 // We can materialise `c1` in an add immediate, so it's "free", and the 1087 // combine should be prevented. 1088 if (C1Int.getMinSignedBits() <= 64 && 1089 isLegalAddImmediate(C1Int.getSExtValue())) 1090 return false; 1091 1092 // Neither constant will fit into an immediate, so find materialisation 1093 // costs. 1094 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 1095 Subtarget.is64Bit()); 1096 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 1097 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 1098 1099 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 1100 // combine should be prevented. 1101 if (C1Cost < ShiftedC1Cost) 1102 return false; 1103 } 1104 } 1105 return true; 1106 } 1107 1108 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 1109 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 1110 unsigned Depth) const { 1111 switch (Op.getOpcode()) { 1112 default: 1113 break; 1114 case RISCVISD::SLLW: 1115 case RISCVISD::SRAW: 1116 case RISCVISD::SRLW: 1117 case RISCVISD::DIVW: 1118 case RISCVISD::DIVUW: 1119 case RISCVISD::REMUW: 1120 // TODO: As the result is sign-extended, this is conservatively correct. A 1121 // more precise answer could be calculated for SRAW depending on known 1122 // bits in the shift amount. 1123 return 33; 1124 } 1125 1126 return 1; 1127 } 1128 1129 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 1130 MachineBasicBlock *BB) { 1131 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 1132 1133 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 1134 // Should the count have wrapped while it was being read, we need to try 1135 // again. 1136 // ... 1137 // read: 1138 // rdcycleh x3 # load high word of cycle 1139 // rdcycle x2 # load low word of cycle 1140 // rdcycleh x4 # load high word of cycle 1141 // bne x3, x4, read # check if high word reads match, otherwise try again 1142 // ... 1143 1144 MachineFunction &MF = *BB->getParent(); 1145 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1146 MachineFunction::iterator It = ++BB->getIterator(); 1147 1148 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1149 MF.insert(It, LoopMBB); 1150 1151 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1152 MF.insert(It, DoneMBB); 1153 1154 // Transfer the remainder of BB and its successor edges to DoneMBB. 1155 DoneMBB->splice(DoneMBB->begin(), BB, 1156 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1157 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 1158 1159 BB->addSuccessor(LoopMBB); 1160 1161 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1162 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1163 Register LoReg = MI.getOperand(0).getReg(); 1164 Register HiReg = MI.getOperand(1).getReg(); 1165 DebugLoc DL = MI.getDebugLoc(); 1166 1167 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 1168 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 1169 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1170 .addReg(RISCV::X0); 1171 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 1172 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 1173 .addReg(RISCV::X0); 1174 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 1175 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1176 .addReg(RISCV::X0); 1177 1178 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 1179 .addReg(HiReg) 1180 .addReg(ReadAgainReg) 1181 .addMBB(LoopMBB); 1182 1183 LoopMBB->addSuccessor(LoopMBB); 1184 LoopMBB->addSuccessor(DoneMBB); 1185 1186 MI.eraseFromParent(); 1187 1188 return DoneMBB; 1189 } 1190 1191 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 1192 MachineBasicBlock *BB) { 1193 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 1194 1195 MachineFunction &MF = *BB->getParent(); 1196 DebugLoc DL = MI.getDebugLoc(); 1197 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1198 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1199 Register LoReg = MI.getOperand(0).getReg(); 1200 Register HiReg = MI.getOperand(1).getReg(); 1201 Register SrcReg = MI.getOperand(2).getReg(); 1202 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 1203 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 1204 1205 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 1206 RI); 1207 MachineMemOperand *MMO = 1208 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 1209 MachineMemOperand::MOLoad, 8, Align(8)); 1210 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 1211 .addFrameIndex(FI) 1212 .addImm(0) 1213 .addMemOperand(MMO); 1214 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 1215 .addFrameIndex(FI) 1216 .addImm(4) 1217 .addMemOperand(MMO); 1218 MI.eraseFromParent(); // The pseudo instruction is gone now. 1219 return BB; 1220 } 1221 1222 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 1223 MachineBasicBlock *BB) { 1224 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 1225 "Unexpected instruction"); 1226 1227 MachineFunction &MF = *BB->getParent(); 1228 DebugLoc DL = MI.getDebugLoc(); 1229 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1230 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1231 Register DstReg = MI.getOperand(0).getReg(); 1232 Register LoReg = MI.getOperand(1).getReg(); 1233 Register HiReg = MI.getOperand(2).getReg(); 1234 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 1235 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(); 1236 1237 MachineMemOperand *MMO = 1238 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), 1239 MachineMemOperand::MOStore, 8, Align(8)); 1240 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1241 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 1242 .addFrameIndex(FI) 1243 .addImm(0) 1244 .addMemOperand(MMO); 1245 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1246 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 1247 .addFrameIndex(FI) 1248 .addImm(4) 1249 .addMemOperand(MMO); 1250 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 1251 MI.eraseFromParent(); // The pseudo instruction is gone now. 1252 return BB; 1253 } 1254 1255 static bool isSelectPseudo(MachineInstr &MI) { 1256 switch (MI.getOpcode()) { 1257 default: 1258 return false; 1259 case RISCV::Select_GPR_Using_CC_GPR: 1260 case RISCV::Select_FPR32_Using_CC_GPR: 1261 case RISCV::Select_FPR64_Using_CC_GPR: 1262 return true; 1263 } 1264 } 1265 1266 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 1267 MachineBasicBlock *BB) { 1268 // To "insert" Select_* instructions, we actually have to insert the triangle 1269 // control-flow pattern. The incoming instructions know the destination vreg 1270 // to set, the condition code register to branch on, the true/false values to 1271 // select between, and the condcode to use to select the appropriate branch. 1272 // 1273 // We produce the following control flow: 1274 // HeadMBB 1275 // | \ 1276 // | IfFalseMBB 1277 // | / 1278 // TailMBB 1279 // 1280 // When we find a sequence of selects we attempt to optimize their emission 1281 // by sharing the control flow. Currently we only handle cases where we have 1282 // multiple selects with the exact same condition (same LHS, RHS and CC). 1283 // The selects may be interleaved with other instructions if the other 1284 // instructions meet some requirements we deem safe: 1285 // - They are debug instructions. Otherwise, 1286 // - They do not have side-effects, do not access memory and their inputs do 1287 // not depend on the results of the select pseudo-instructions. 1288 // The TrueV/FalseV operands of the selects cannot depend on the result of 1289 // previous selects in the sequence. 1290 // These conditions could be further relaxed. See the X86 target for a 1291 // related approach and more information. 1292 Register LHS = MI.getOperand(1).getReg(); 1293 Register RHS = MI.getOperand(2).getReg(); 1294 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 1295 1296 SmallVector<MachineInstr *, 4> SelectDebugValues; 1297 SmallSet<Register, 4> SelectDests; 1298 SelectDests.insert(MI.getOperand(0).getReg()); 1299 1300 MachineInstr *LastSelectPseudo = &MI; 1301 1302 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 1303 SequenceMBBI != E; ++SequenceMBBI) { 1304 if (SequenceMBBI->isDebugInstr()) 1305 continue; 1306 else if (isSelectPseudo(*SequenceMBBI)) { 1307 if (SequenceMBBI->getOperand(1).getReg() != LHS || 1308 SequenceMBBI->getOperand(2).getReg() != RHS || 1309 SequenceMBBI->getOperand(3).getImm() != CC || 1310 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 1311 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 1312 break; 1313 LastSelectPseudo = &*SequenceMBBI; 1314 SequenceMBBI->collectDebugValues(SelectDebugValues); 1315 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 1316 } else { 1317 if (SequenceMBBI->hasUnmodeledSideEffects() || 1318 SequenceMBBI->mayLoadOrStore()) 1319 break; 1320 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 1321 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 1322 })) 1323 break; 1324 } 1325 } 1326 1327 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 1328 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1329 DebugLoc DL = MI.getDebugLoc(); 1330 MachineFunction::iterator I = ++BB->getIterator(); 1331 1332 MachineBasicBlock *HeadMBB = BB; 1333 MachineFunction *F = BB->getParent(); 1334 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 1335 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 1336 1337 F->insert(I, IfFalseMBB); 1338 F->insert(I, TailMBB); 1339 1340 // Transfer debug instructions associated with the selects to TailMBB. 1341 for (MachineInstr *DebugInstr : SelectDebugValues) { 1342 TailMBB->push_back(DebugInstr->removeFromParent()); 1343 } 1344 1345 // Move all instructions after the sequence to TailMBB. 1346 TailMBB->splice(TailMBB->end(), HeadMBB, 1347 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 1348 // Update machine-CFG edges by transferring all successors of the current 1349 // block to the new block which will contain the Phi nodes for the selects. 1350 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 1351 // Set the successors for HeadMBB. 1352 HeadMBB->addSuccessor(IfFalseMBB); 1353 HeadMBB->addSuccessor(TailMBB); 1354 1355 // Insert appropriate branch. 1356 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 1357 1358 BuildMI(HeadMBB, DL, TII.get(Opcode)) 1359 .addReg(LHS) 1360 .addReg(RHS) 1361 .addMBB(TailMBB); 1362 1363 // IfFalseMBB just falls through to TailMBB. 1364 IfFalseMBB->addSuccessor(TailMBB); 1365 1366 // Create PHIs for all of the select pseudo-instructions. 1367 auto SelectMBBI = MI.getIterator(); 1368 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 1369 auto InsertionPoint = TailMBB->begin(); 1370 while (SelectMBBI != SelectEnd) { 1371 auto Next = std::next(SelectMBBI); 1372 if (isSelectPseudo(*SelectMBBI)) { 1373 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 1374 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 1375 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 1376 .addReg(SelectMBBI->getOperand(4).getReg()) 1377 .addMBB(HeadMBB) 1378 .addReg(SelectMBBI->getOperand(5).getReg()) 1379 .addMBB(IfFalseMBB); 1380 SelectMBBI->eraseFromParent(); 1381 } 1382 SelectMBBI = Next; 1383 } 1384 1385 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 1386 return TailMBB; 1387 } 1388 1389 MachineBasicBlock * 1390 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1391 MachineBasicBlock *BB) const { 1392 switch (MI.getOpcode()) { 1393 default: 1394 llvm_unreachable("Unexpected instr type to insert"); 1395 case RISCV::ReadCycleWide: 1396 assert(!Subtarget.is64Bit() && 1397 "ReadCycleWrite is only to be used on riscv32"); 1398 return emitReadCycleWidePseudo(MI, BB); 1399 case RISCV::Select_GPR_Using_CC_GPR: 1400 case RISCV::Select_FPR32_Using_CC_GPR: 1401 case RISCV::Select_FPR64_Using_CC_GPR: 1402 return emitSelectPseudo(MI, BB); 1403 case RISCV::BuildPairF64Pseudo: 1404 return emitBuildPairF64Pseudo(MI, BB); 1405 case RISCV::SplitF64Pseudo: 1406 return emitSplitF64Pseudo(MI, BB); 1407 } 1408 } 1409 1410 // Calling Convention Implementation. 1411 // The expectations for frontend ABI lowering vary from target to target. 1412 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 1413 // details, but this is a longer term goal. For now, we simply try to keep the 1414 // role of the frontend as simple and well-defined as possible. The rules can 1415 // be summarised as: 1416 // * Never split up large scalar arguments. We handle them here. 1417 // * If a hardfloat calling convention is being used, and the struct may be 1418 // passed in a pair of registers (fp+fp, int+fp), and both registers are 1419 // available, then pass as two separate arguments. If either the GPRs or FPRs 1420 // are exhausted, then pass according to the rule below. 1421 // * If a struct could never be passed in registers or directly in a stack 1422 // slot (as it is larger than 2*XLEN and the floating point rules don't 1423 // apply), then pass it using a pointer with the byval attribute. 1424 // * If a struct is less than 2*XLEN, then coerce to either a two-element 1425 // word-sized array or a 2*XLEN scalar (depending on alignment). 1426 // * The frontend can determine whether a struct is returned by reference or 1427 // not based on its size and fields. If it will be returned by reference, the 1428 // frontend must modify the prototype so a pointer with the sret annotation is 1429 // passed as the first argument. This is not necessary for large scalar 1430 // returns. 1431 // * Struct return values and varargs should be coerced to structs containing 1432 // register-size fields in the same situations they would be for fixed 1433 // arguments. 1434 1435 static const MCPhysReg ArgGPRs[] = { 1436 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 1437 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 1438 }; 1439 static const MCPhysReg ArgFPR32s[] = { 1440 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 1441 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 1442 }; 1443 static const MCPhysReg ArgFPR64s[] = { 1444 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 1445 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 1446 }; 1447 1448 // Pass a 2*XLEN argument that has been split into two XLEN values through 1449 // registers or the stack as necessary. 1450 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 1451 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 1452 MVT ValVT2, MVT LocVT2, 1453 ISD::ArgFlagsTy ArgFlags2) { 1454 unsigned XLenInBytes = XLen / 8; 1455 if (Register Reg = State.AllocateReg(ArgGPRs)) { 1456 // At least one half can be passed via register. 1457 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 1458 VA1.getLocVT(), CCValAssign::Full)); 1459 } else { 1460 // Both halves must be passed on the stack, with proper alignment. 1461 unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); 1462 State.addLoc( 1463 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 1464 State.AllocateStack(XLenInBytes, StackAlign), 1465 VA1.getLocVT(), CCValAssign::Full)); 1466 State.addLoc(CCValAssign::getMem( 1467 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 1468 CCValAssign::Full)); 1469 return false; 1470 } 1471 1472 if (Register Reg = State.AllocateReg(ArgGPRs)) { 1473 // The second half can also be passed via register. 1474 State.addLoc( 1475 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 1476 } else { 1477 // The second half is passed via the stack, without additional alignment. 1478 State.addLoc(CCValAssign::getMem( 1479 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, 1480 CCValAssign::Full)); 1481 } 1482 1483 return false; 1484 } 1485 1486 // Implements the RISC-V calling convention. Returns true upon failure. 1487 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 1488 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 1489 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 1490 bool IsRet, Type *OrigTy) { 1491 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 1492 assert(XLen == 32 || XLen == 64); 1493 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 1494 1495 // Any return value split in to more than two values can't be returned 1496 // directly. 1497 if (IsRet && ValNo > 1) 1498 return true; 1499 1500 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a 1501 // variadic argument, or if no F32 argument registers are available. 1502 bool UseGPRForF32 = true; 1503 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 1504 // variadic argument, or if no F64 argument registers are available. 1505 bool UseGPRForF64 = true; 1506 1507 switch (ABI) { 1508 default: 1509 llvm_unreachable("Unexpected ABI"); 1510 case RISCVABI::ABI_ILP32: 1511 case RISCVABI::ABI_LP64: 1512 break; 1513 case RISCVABI::ABI_ILP32F: 1514 case RISCVABI::ABI_LP64F: 1515 UseGPRForF32 = !IsFixed; 1516 break; 1517 case RISCVABI::ABI_ILP32D: 1518 case RISCVABI::ABI_LP64D: 1519 UseGPRForF32 = !IsFixed; 1520 UseGPRForF64 = !IsFixed; 1521 break; 1522 } 1523 1524 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) 1525 UseGPRForF32 = true; 1526 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s)) 1527 UseGPRForF64 = true; 1528 1529 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local 1530 // variables rather than directly checking against the target ABI. 1531 1532 if (UseGPRForF32 && ValVT == MVT::f32) { 1533 LocVT = XLenVT; 1534 LocInfo = CCValAssign::BCvt; 1535 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 1536 LocVT = MVT::i64; 1537 LocInfo = CCValAssign::BCvt; 1538 } 1539 1540 // If this is a variadic argument, the RISC-V calling convention requires 1541 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 1542 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 1543 // be used regardless of whether the original argument was split during 1544 // legalisation or not. The argument will not be passed by registers if the 1545 // original type is larger than 2*XLEN, so the register alignment rule does 1546 // not apply. 1547 unsigned TwoXLenInBytes = (2 * XLen) / 8; 1548 if (!IsFixed && ArgFlags.getOrigAlign() == TwoXLenInBytes && 1549 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 1550 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 1551 // Skip 'odd' register if necessary. 1552 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 1553 State.AllocateReg(ArgGPRs); 1554 } 1555 1556 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 1557 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 1558 State.getPendingArgFlags(); 1559 1560 assert(PendingLocs.size() == PendingArgFlags.size() && 1561 "PendingLocs and PendingArgFlags out of sync"); 1562 1563 // Handle passing f64 on RV32D with a soft float ABI or when floating point 1564 // registers are exhausted. 1565 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 1566 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 1567 "Can't lower f64 if it is split"); 1568 // Depending on available argument GPRS, f64 may be passed in a pair of 1569 // GPRs, split between a GPR and the stack, or passed completely on the 1570 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 1571 // cases. 1572 Register Reg = State.AllocateReg(ArgGPRs); 1573 LocVT = MVT::i32; 1574 if (!Reg) { 1575 unsigned StackOffset = State.AllocateStack(8, 8); 1576 State.addLoc( 1577 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 1578 return false; 1579 } 1580 if (!State.AllocateReg(ArgGPRs)) 1581 State.AllocateStack(4, 4); 1582 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1583 return false; 1584 } 1585 1586 // Split arguments might be passed indirectly, so keep track of the pending 1587 // values. 1588 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 1589 LocVT = XLenVT; 1590 LocInfo = CCValAssign::Indirect; 1591 PendingLocs.push_back( 1592 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 1593 PendingArgFlags.push_back(ArgFlags); 1594 if (!ArgFlags.isSplitEnd()) { 1595 return false; 1596 } 1597 } 1598 1599 // If the split argument only had two elements, it should be passed directly 1600 // in registers or on the stack. 1601 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 1602 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 1603 // Apply the normal calling convention rules to the first half of the 1604 // split argument. 1605 CCValAssign VA = PendingLocs[0]; 1606 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 1607 PendingLocs.clear(); 1608 PendingArgFlags.clear(); 1609 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 1610 ArgFlags); 1611 } 1612 1613 // Allocate to a register if possible, or else a stack slot. 1614 Register Reg; 1615 if (ValVT == MVT::f32 && !UseGPRForF32) 1616 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s); 1617 else if (ValVT == MVT::f64 && !UseGPRForF64) 1618 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s); 1619 else 1620 Reg = State.AllocateReg(ArgGPRs); 1621 unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); 1622 1623 // If we reach this point and PendingLocs is non-empty, we must be at the 1624 // end of a split argument that must be passed indirectly. 1625 if (!PendingLocs.empty()) { 1626 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 1627 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 1628 1629 for (auto &It : PendingLocs) { 1630 if (Reg) 1631 It.convertToReg(Reg); 1632 else 1633 It.convertToMem(StackOffset); 1634 State.addLoc(It); 1635 } 1636 PendingLocs.clear(); 1637 PendingArgFlags.clear(); 1638 return false; 1639 } 1640 1641 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) && 1642 "Expected an XLenVT at this stage"); 1643 1644 if (Reg) { 1645 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1646 return false; 1647 } 1648 1649 // When an f32 or f64 is passed on the stack, no bit-conversion is needed. 1650 if (ValVT == MVT::f32 || ValVT == MVT::f64) { 1651 LocVT = ValVT; 1652 LocInfo = CCValAssign::Full; 1653 } 1654 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 1655 return false; 1656 } 1657 1658 void RISCVTargetLowering::analyzeInputArgs( 1659 MachineFunction &MF, CCState &CCInfo, 1660 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 1661 unsigned NumArgs = Ins.size(); 1662 FunctionType *FType = MF.getFunction().getFunctionType(); 1663 1664 for (unsigned i = 0; i != NumArgs; ++i) { 1665 MVT ArgVT = Ins[i].VT; 1666 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 1667 1668 Type *ArgTy = nullptr; 1669 if (IsRet) 1670 ArgTy = FType->getReturnType(); 1671 else if (Ins[i].isOrigArg()) 1672 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 1673 1674 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 1675 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 1676 ArgFlags, CCInfo, /*IsRet=*/true, IsRet, ArgTy)) { 1677 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 1678 << EVT(ArgVT).getEVTString() << '\n'); 1679 llvm_unreachable(nullptr); 1680 } 1681 } 1682 } 1683 1684 void RISCVTargetLowering::analyzeOutputArgs( 1685 MachineFunction &MF, CCState &CCInfo, 1686 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 1687 CallLoweringInfo *CLI) const { 1688 unsigned NumArgs = Outs.size(); 1689 1690 for (unsigned i = 0; i != NumArgs; i++) { 1691 MVT ArgVT = Outs[i].VT; 1692 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 1693 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 1694 1695 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 1696 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 1697 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { 1698 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 1699 << EVT(ArgVT).getEVTString() << "\n"); 1700 llvm_unreachable(nullptr); 1701 } 1702 } 1703 } 1704 1705 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 1706 // values. 1707 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 1708 const CCValAssign &VA, const SDLoc &DL) { 1709 switch (VA.getLocInfo()) { 1710 default: 1711 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1712 case CCValAssign::Full: 1713 break; 1714 case CCValAssign::BCvt: 1715 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { 1716 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 1717 break; 1718 } 1719 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 1720 break; 1721 } 1722 return Val; 1723 } 1724 1725 // The caller is responsible for loading the full value if the argument is 1726 // passed with CCValAssign::Indirect. 1727 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 1728 const CCValAssign &VA, const SDLoc &DL) { 1729 MachineFunction &MF = DAG.getMachineFunction(); 1730 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1731 EVT LocVT = VA.getLocVT(); 1732 SDValue Val; 1733 const TargetRegisterClass *RC; 1734 1735 switch (LocVT.getSimpleVT().SimpleTy) { 1736 default: 1737 llvm_unreachable("Unexpected register type"); 1738 case MVT::i32: 1739 case MVT::i64: 1740 RC = &RISCV::GPRRegClass; 1741 break; 1742 case MVT::f32: 1743 RC = &RISCV::FPR32RegClass; 1744 break; 1745 case MVT::f64: 1746 RC = &RISCV::FPR64RegClass; 1747 break; 1748 } 1749 1750 Register VReg = RegInfo.createVirtualRegister(RC); 1751 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1752 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 1753 1754 if (VA.getLocInfo() == CCValAssign::Indirect) 1755 return Val; 1756 1757 return convertLocVTToValVT(DAG, Val, VA, DL); 1758 } 1759 1760 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 1761 const CCValAssign &VA, const SDLoc &DL) { 1762 EVT LocVT = VA.getLocVT(); 1763 1764 switch (VA.getLocInfo()) { 1765 default: 1766 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1767 case CCValAssign::Full: 1768 break; 1769 case CCValAssign::BCvt: 1770 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) { 1771 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 1772 break; 1773 } 1774 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 1775 break; 1776 } 1777 return Val; 1778 } 1779 1780 // The caller is responsible for loading the full value if the argument is 1781 // passed with CCValAssign::Indirect. 1782 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 1783 const CCValAssign &VA, const SDLoc &DL) { 1784 MachineFunction &MF = DAG.getMachineFunction(); 1785 MachineFrameInfo &MFI = MF.getFrameInfo(); 1786 EVT LocVT = VA.getLocVT(); 1787 EVT ValVT = VA.getValVT(); 1788 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 1789 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 1790 VA.getLocMemOffset(), /*Immutable=*/true); 1791 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 1792 SDValue Val; 1793 1794 ISD::LoadExtType ExtType; 1795 switch (VA.getLocInfo()) { 1796 default: 1797 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 1798 case CCValAssign::Full: 1799 case CCValAssign::Indirect: 1800 case CCValAssign::BCvt: 1801 ExtType = ISD::NON_EXTLOAD; 1802 break; 1803 } 1804 Val = DAG.getExtLoad( 1805 ExtType, DL, LocVT, Chain, FIN, 1806 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 1807 return Val; 1808 } 1809 1810 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 1811 const CCValAssign &VA, const SDLoc &DL) { 1812 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 1813 "Unexpected VA"); 1814 MachineFunction &MF = DAG.getMachineFunction(); 1815 MachineFrameInfo &MFI = MF.getFrameInfo(); 1816 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1817 1818 if (VA.isMemLoc()) { 1819 // f64 is passed on the stack. 1820 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 1821 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1822 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 1823 MachinePointerInfo::getFixedStack(MF, FI)); 1824 } 1825 1826 assert(VA.isRegLoc() && "Expected register VA assignment"); 1827 1828 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1829 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 1830 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 1831 SDValue Hi; 1832 if (VA.getLocReg() == RISCV::X17) { 1833 // Second half of f64 is passed on the stack. 1834 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 1835 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1836 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 1837 MachinePointerInfo::getFixedStack(MF, FI)); 1838 } else { 1839 // Second half of f64 is passed in another GPR. 1840 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1841 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 1842 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 1843 } 1844 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 1845 } 1846 1847 // FastCC has less than 1% performance improvement for some particular 1848 // benchmark. But theoretically, it may has benenfit for some cases. 1849 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 1850 CCValAssign::LocInfo LocInfo, 1851 ISD::ArgFlagsTy ArgFlags, CCState &State) { 1852 1853 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 1854 // X5 and X6 might be used for save-restore libcall. 1855 static const MCPhysReg GPRList[] = { 1856 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 1857 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 1858 RISCV::X29, RISCV::X30, RISCV::X31}; 1859 if (unsigned Reg = State.AllocateReg(GPRList)) { 1860 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1861 return false; 1862 } 1863 } 1864 1865 if (LocVT == MVT::f32) { 1866 static const MCPhysReg FPR32List[] = { 1867 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 1868 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 1869 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 1870 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 1871 if (unsigned Reg = State.AllocateReg(FPR32List)) { 1872 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1873 return false; 1874 } 1875 } 1876 1877 if (LocVT == MVT::f64) { 1878 static const MCPhysReg FPR64List[] = { 1879 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 1880 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 1881 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 1882 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 1883 if (unsigned Reg = State.AllocateReg(FPR64List)) { 1884 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 1885 return false; 1886 } 1887 } 1888 1889 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 1890 unsigned Offset4 = State.AllocateStack(4, 4); 1891 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 1892 return false; 1893 } 1894 1895 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 1896 unsigned Offset5 = State.AllocateStack(8, 8); 1897 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 1898 return false; 1899 } 1900 1901 return true; // CC didn't match. 1902 } 1903 1904 // Transform physical registers into virtual registers. 1905 SDValue RISCVTargetLowering::LowerFormalArguments( 1906 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 1907 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1908 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1909 1910 switch (CallConv) { 1911 default: 1912 report_fatal_error("Unsupported calling convention"); 1913 case CallingConv::C: 1914 case CallingConv::Fast: 1915 break; 1916 } 1917 1918 MachineFunction &MF = DAG.getMachineFunction(); 1919 1920 const Function &Func = MF.getFunction(); 1921 if (Func.hasFnAttribute("interrupt")) { 1922 if (!Func.arg_empty()) 1923 report_fatal_error( 1924 "Functions with the interrupt attribute cannot have arguments!"); 1925 1926 StringRef Kind = 1927 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 1928 1929 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 1930 report_fatal_error( 1931 "Function interrupt attribute argument not supported!"); 1932 } 1933 1934 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1935 MVT XLenVT = Subtarget.getXLenVT(); 1936 unsigned XLenInBytes = Subtarget.getXLen() / 8; 1937 // Used with vargs to acumulate store chains. 1938 std::vector<SDValue> OutChains; 1939 1940 // Assign locations to all of the incoming arguments. 1941 SmallVector<CCValAssign, 16> ArgLocs; 1942 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 1943 1944 if (CallConv == CallingConv::Fast) 1945 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 1946 else 1947 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 1948 1949 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1950 CCValAssign &VA = ArgLocs[i]; 1951 SDValue ArgValue; 1952 // Passing f64 on RV32D with a soft float ABI must be handled as a special 1953 // case. 1954 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 1955 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 1956 else if (VA.isRegLoc()) 1957 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); 1958 else 1959 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 1960 1961 if (VA.getLocInfo() == CCValAssign::Indirect) { 1962 // If the original argument was split and passed by reference (e.g. i128 1963 // on RV32), we need to load all parts of it here (using the same 1964 // address). 1965 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 1966 MachinePointerInfo())); 1967 unsigned ArgIndex = Ins[i].OrigArgIndex; 1968 assert(Ins[i].PartOffset == 0); 1969 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 1970 CCValAssign &PartVA = ArgLocs[i + 1]; 1971 unsigned PartOffset = Ins[i + 1].PartOffset; 1972 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 1973 DAG.getIntPtrConstant(PartOffset, DL)); 1974 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 1975 MachinePointerInfo())); 1976 ++i; 1977 } 1978 continue; 1979 } 1980 InVals.push_back(ArgValue); 1981 } 1982 1983 if (IsVarArg) { 1984 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 1985 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 1986 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 1987 MachineFrameInfo &MFI = MF.getFrameInfo(); 1988 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1989 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 1990 1991 // Offset of the first variable argument from stack pointer, and size of 1992 // the vararg save area. For now, the varargs save area is either zero or 1993 // large enough to hold a0-a7. 1994 int VaArgOffset, VarArgsSaveSize; 1995 1996 // If all registers are allocated, then all varargs must be passed on the 1997 // stack and we don't need to save any argregs. 1998 if (ArgRegs.size() == Idx) { 1999 VaArgOffset = CCInfo.getNextStackOffset(); 2000 VarArgsSaveSize = 0; 2001 } else { 2002 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 2003 VaArgOffset = -VarArgsSaveSize; 2004 } 2005 2006 // Record the frame index of the first variable argument 2007 // which is a value necessary to VASTART. 2008 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 2009 RVFI->setVarArgsFrameIndex(FI); 2010 2011 // If saving an odd number of registers then create an extra stack slot to 2012 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 2013 // offsets to even-numbered registered remain 2*XLEN-aligned. 2014 if (Idx % 2) { 2015 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 2016 VarArgsSaveSize += XLenInBytes; 2017 } 2018 2019 // Copy the integer registers that may have been used for passing varargs 2020 // to the vararg save area. 2021 for (unsigned I = Idx; I < ArgRegs.size(); 2022 ++I, VaArgOffset += XLenInBytes) { 2023 const Register Reg = RegInfo.createVirtualRegister(RC); 2024 RegInfo.addLiveIn(ArgRegs[I], Reg); 2025 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 2026 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 2027 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2028 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 2029 MachinePointerInfo::getFixedStack(MF, FI)); 2030 cast<StoreSDNode>(Store.getNode()) 2031 ->getMemOperand() 2032 ->setValue((Value *)nullptr); 2033 OutChains.push_back(Store); 2034 } 2035 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 2036 } 2037 2038 // All stores are grouped in one node to allow the matching between 2039 // the size of Ins and InVals. This only happens for vararg functions. 2040 if (!OutChains.empty()) { 2041 OutChains.push_back(Chain); 2042 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 2043 } 2044 2045 return Chain; 2046 } 2047 2048 /// isEligibleForTailCallOptimization - Check whether the call is eligible 2049 /// for tail call optimization. 2050 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 2051 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 2052 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 2053 const SmallVector<CCValAssign, 16> &ArgLocs) const { 2054 2055 auto &Callee = CLI.Callee; 2056 auto CalleeCC = CLI.CallConv; 2057 auto &Outs = CLI.Outs; 2058 auto &Caller = MF.getFunction(); 2059 auto CallerCC = Caller.getCallingConv(); 2060 2061 // Exception-handling functions need a special set of instructions to 2062 // indicate a return to the hardware. Tail-calling another function would 2063 // probably break this. 2064 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 2065 // should be expanded as new function attributes are introduced. 2066 if (Caller.hasFnAttribute("interrupt")) 2067 return false; 2068 2069 // Do not tail call opt if the stack is used to pass parameters. 2070 if (CCInfo.getNextStackOffset() != 0) 2071 return false; 2072 2073 // Do not tail call opt if any parameters need to be passed indirectly. 2074 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 2075 // passed indirectly. So the address of the value will be passed in a 2076 // register, or if not available, then the address is put on the stack. In 2077 // order to pass indirectly, space on the stack often needs to be allocated 2078 // in order to store the value. In this case the CCInfo.getNextStackOffset() 2079 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 2080 // are passed CCValAssign::Indirect. 2081 for (auto &VA : ArgLocs) 2082 if (VA.getLocInfo() == CCValAssign::Indirect) 2083 return false; 2084 2085 // Do not tail call opt if either caller or callee uses struct return 2086 // semantics. 2087 auto IsCallerStructRet = Caller.hasStructRetAttr(); 2088 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 2089 if (IsCallerStructRet || IsCalleeStructRet) 2090 return false; 2091 2092 // Externally-defined functions with weak linkage should not be 2093 // tail-called. The behaviour of branch instructions in this situation (as 2094 // used for tail calls) is implementation-defined, so we cannot rely on the 2095 // linker replacing the tail call with a return. 2096 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2097 const GlobalValue *GV = G->getGlobal(); 2098 if (GV->hasExternalWeakLinkage()) 2099 return false; 2100 } 2101 2102 // The callee has to preserve all registers the caller needs to preserve. 2103 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2104 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2105 if (CalleeCC != CallerCC) { 2106 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2107 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2108 return false; 2109 } 2110 2111 // Byval parameters hand the function a pointer directly into the stack area 2112 // we want to reuse during a tail call. Working around this *is* possible 2113 // but less efficient and uglier in LowerCall. 2114 for (auto &Arg : Outs) 2115 if (Arg.Flags.isByVal()) 2116 return false; 2117 2118 return true; 2119 } 2120 2121 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 2122 // and output parameter nodes. 2123 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 2124 SmallVectorImpl<SDValue> &InVals) const { 2125 SelectionDAG &DAG = CLI.DAG; 2126 SDLoc &DL = CLI.DL; 2127 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2128 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2129 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2130 SDValue Chain = CLI.Chain; 2131 SDValue Callee = CLI.Callee; 2132 bool &IsTailCall = CLI.IsTailCall; 2133 CallingConv::ID CallConv = CLI.CallConv; 2134 bool IsVarArg = CLI.IsVarArg; 2135 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2136 MVT XLenVT = Subtarget.getXLenVT(); 2137 2138 MachineFunction &MF = DAG.getMachineFunction(); 2139 2140 // Analyze the operands of the call, assigning locations to each operand. 2141 SmallVector<CCValAssign, 16> ArgLocs; 2142 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2143 2144 if (CallConv == CallingConv::Fast) 2145 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 2146 else 2147 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 2148 2149 // Check if it's really possible to do a tail call. 2150 if (IsTailCall) 2151 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 2152 2153 if (IsTailCall) 2154 ++NumTailCalls; 2155 else if (CLI.CB && CLI.CB->isMustTailCall()) 2156 report_fatal_error("failed to perform tail call elimination on a call " 2157 "site marked musttail"); 2158 2159 // Get a count of how many bytes are to be pushed on the stack. 2160 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 2161 2162 // Create local copies for byval args 2163 SmallVector<SDValue, 8> ByValArgs; 2164 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2165 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2166 if (!Flags.isByVal()) 2167 continue; 2168 2169 SDValue Arg = OutVals[i]; 2170 unsigned Size = Flags.getByValSize(); 2171 Align Alignment = Flags.getNonZeroByValAlign(); 2172 2173 int FI = 2174 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 2175 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2176 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 2177 2178 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 2179 /*IsVolatile=*/false, 2180 /*AlwaysInline=*/false, IsTailCall, 2181 MachinePointerInfo(), MachinePointerInfo()); 2182 ByValArgs.push_back(FIPtr); 2183 } 2184 2185 if (!IsTailCall) 2186 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 2187 2188 // Copy argument values to their designated locations. 2189 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 2190 SmallVector<SDValue, 8> MemOpChains; 2191 SDValue StackPtr; 2192 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 2193 CCValAssign &VA = ArgLocs[i]; 2194 SDValue ArgValue = OutVals[i]; 2195 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2196 2197 // Handle passing f64 on RV32D with a soft float ABI as a special case. 2198 bool IsF64OnRV32DSoftABI = 2199 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 2200 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 2201 SDValue SplitF64 = DAG.getNode( 2202 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 2203 SDValue Lo = SplitF64.getValue(0); 2204 SDValue Hi = SplitF64.getValue(1); 2205 2206 Register RegLo = VA.getLocReg(); 2207 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 2208 2209 if (RegLo == RISCV::X17) { 2210 // Second half of f64 is passed on the stack. 2211 // Work out the address of the stack slot. 2212 if (!StackPtr.getNode()) 2213 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2214 // Emit the store. 2215 MemOpChains.push_back( 2216 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 2217 } else { 2218 // Second half of f64 is passed in another GPR. 2219 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2220 Register RegHigh = RegLo + 1; 2221 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 2222 } 2223 continue; 2224 } 2225 2226 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 2227 // as any other MemLoc. 2228 2229 // Promote the value if needed. 2230 // For now, only handle fully promoted and indirect arguments. 2231 if (VA.getLocInfo() == CCValAssign::Indirect) { 2232 // Store the argument in a stack slot and pass its address. 2233 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 2234 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2235 MemOpChains.push_back( 2236 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 2237 MachinePointerInfo::getFixedStack(MF, FI))); 2238 // If the original argument was split (e.g. i128), we need 2239 // to store all parts of it here (and pass just one address). 2240 unsigned ArgIndex = Outs[i].OrigArgIndex; 2241 assert(Outs[i].PartOffset == 0); 2242 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 2243 SDValue PartValue = OutVals[i + 1]; 2244 unsigned PartOffset = Outs[i + 1].PartOffset; 2245 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 2246 DAG.getIntPtrConstant(PartOffset, DL)); 2247 MemOpChains.push_back( 2248 DAG.getStore(Chain, DL, PartValue, Address, 2249 MachinePointerInfo::getFixedStack(MF, FI))); 2250 ++i; 2251 } 2252 ArgValue = SpillSlot; 2253 } else { 2254 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 2255 } 2256 2257 // Use local copy if it is a byval arg. 2258 if (Flags.isByVal()) 2259 ArgValue = ByValArgs[j++]; 2260 2261 if (VA.isRegLoc()) { 2262 // Queue up the argument copies and emit them at the end. 2263 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 2264 } else { 2265 assert(VA.isMemLoc() && "Argument not register or memory"); 2266 assert(!IsTailCall && "Tail call not allowed if stack is used " 2267 "for passing parameters"); 2268 2269 // Work out the address of the stack slot. 2270 if (!StackPtr.getNode()) 2271 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2272 SDValue Address = 2273 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 2274 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 2275 2276 // Emit the store. 2277 MemOpChains.push_back( 2278 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 2279 } 2280 } 2281 2282 // Join the stores, which are independent of one another. 2283 if (!MemOpChains.empty()) 2284 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2285 2286 SDValue Glue; 2287 2288 // Build a sequence of copy-to-reg nodes, chained and glued together. 2289 for (auto &Reg : RegsToPass) { 2290 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 2291 Glue = Chain.getValue(1); 2292 } 2293 2294 // Validate that none of the argument registers have been marked as 2295 // reserved, if so report an error. Do the same for the return address if this 2296 // is not a tailcall. 2297 validateCCReservedRegs(RegsToPass, MF); 2298 if (!IsTailCall && 2299 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 2300 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2301 MF.getFunction(), 2302 "Return address register required, but has been reserved."}); 2303 2304 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 2305 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 2306 // split it and then direct call can be matched by PseudoCALL. 2307 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 2308 const GlobalValue *GV = S->getGlobal(); 2309 2310 unsigned OpFlags = RISCVII::MO_CALL; 2311 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 2312 OpFlags = RISCVII::MO_PLT; 2313 2314 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 2315 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2316 unsigned OpFlags = RISCVII::MO_CALL; 2317 2318 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 2319 nullptr)) 2320 OpFlags = RISCVII::MO_PLT; 2321 2322 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 2323 } 2324 2325 // The first call operand is the chain and the second is the target address. 2326 SmallVector<SDValue, 8> Ops; 2327 Ops.push_back(Chain); 2328 Ops.push_back(Callee); 2329 2330 // Add argument registers to the end of the list so that they are 2331 // known live into the call. 2332 for (auto &Reg : RegsToPass) 2333 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 2334 2335 if (!IsTailCall) { 2336 // Add a register mask operand representing the call-preserved registers. 2337 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2338 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 2339 assert(Mask && "Missing call preserved mask for calling convention"); 2340 Ops.push_back(DAG.getRegisterMask(Mask)); 2341 } 2342 2343 // Glue the call to the argument copies, if any. 2344 if (Glue.getNode()) 2345 Ops.push_back(Glue); 2346 2347 // Emit the call. 2348 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2349 2350 if (IsTailCall) { 2351 MF.getFrameInfo().setHasTailCall(); 2352 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 2353 } 2354 2355 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 2356 Glue = Chain.getValue(1); 2357 2358 // Mark the end of the call, which is glued to the call itself. 2359 Chain = DAG.getCALLSEQ_END(Chain, 2360 DAG.getConstant(NumBytes, DL, PtrVT, true), 2361 DAG.getConstant(0, DL, PtrVT, true), 2362 Glue, DL); 2363 Glue = Chain.getValue(1); 2364 2365 // Assign locations to each value returned by this call. 2366 SmallVector<CCValAssign, 16> RVLocs; 2367 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 2368 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 2369 2370 // Copy all of the result registers out of their specified physreg. 2371 for (auto &VA : RVLocs) { 2372 // Copy the value out 2373 SDValue RetValue = 2374 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 2375 // Glue the RetValue to the end of the call sequence 2376 Chain = RetValue.getValue(1); 2377 Glue = RetValue.getValue(2); 2378 2379 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 2380 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 2381 SDValue RetValue2 = 2382 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 2383 Chain = RetValue2.getValue(1); 2384 Glue = RetValue2.getValue(2); 2385 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 2386 RetValue2); 2387 } 2388 2389 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 2390 2391 InVals.push_back(RetValue); 2392 } 2393 2394 return Chain; 2395 } 2396 2397 bool RISCVTargetLowering::CanLowerReturn( 2398 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 2399 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 2400 SmallVector<CCValAssign, 16> RVLocs; 2401 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 2402 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2403 MVT VT = Outs[i].VT; 2404 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2405 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2406 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 2407 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) 2408 return false; 2409 } 2410 return true; 2411 } 2412 2413 SDValue 2414 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 2415 bool IsVarArg, 2416 const SmallVectorImpl<ISD::OutputArg> &Outs, 2417 const SmallVectorImpl<SDValue> &OutVals, 2418 const SDLoc &DL, SelectionDAG &DAG) const { 2419 const MachineFunction &MF = DAG.getMachineFunction(); 2420 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 2421 2422 // Stores the assignment of the return value to a location. 2423 SmallVector<CCValAssign, 16> RVLocs; 2424 2425 // Info about the registers and stack slot. 2426 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 2427 *DAG.getContext()); 2428 2429 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 2430 nullptr); 2431 2432 SDValue Glue; 2433 SmallVector<SDValue, 4> RetOps(1, Chain); 2434 2435 // Copy the result values into the output registers. 2436 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 2437 SDValue Val = OutVals[i]; 2438 CCValAssign &VA = RVLocs[i]; 2439 assert(VA.isRegLoc() && "Can only return in registers!"); 2440 2441 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 2442 // Handle returning f64 on RV32D with a soft float ABI. 2443 assert(VA.isRegLoc() && "Expected return via registers"); 2444 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 2445 DAG.getVTList(MVT::i32, MVT::i32), Val); 2446 SDValue Lo = SplitF64.getValue(0); 2447 SDValue Hi = SplitF64.getValue(1); 2448 Register RegLo = VA.getLocReg(); 2449 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2450 Register RegHi = RegLo + 1; 2451 2452 if (STI.isRegisterReservedByUser(RegLo) || 2453 STI.isRegisterReservedByUser(RegHi)) 2454 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2455 MF.getFunction(), 2456 "Return value register required, but has been reserved."}); 2457 2458 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 2459 Glue = Chain.getValue(1); 2460 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 2461 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 2462 Glue = Chain.getValue(1); 2463 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 2464 } else { 2465 // Handle a 'normal' return. 2466 Val = convertValVTToLocVT(DAG, Val, VA, DL); 2467 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 2468 2469 if (STI.isRegisterReservedByUser(VA.getLocReg())) 2470 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2471 MF.getFunction(), 2472 "Return value register required, but has been reserved."}); 2473 2474 // Guarantee that all emitted copies are stuck together. 2475 Glue = Chain.getValue(1); 2476 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2477 } 2478 } 2479 2480 RetOps[0] = Chain; // Update chain. 2481 2482 // Add the glue node if we have it. 2483 if (Glue.getNode()) { 2484 RetOps.push_back(Glue); 2485 } 2486 2487 // Interrupt service routines use different return instructions. 2488 const Function &Func = DAG.getMachineFunction().getFunction(); 2489 if (Func.hasFnAttribute("interrupt")) { 2490 if (!Func.getReturnType()->isVoidTy()) 2491 report_fatal_error( 2492 "Functions with the interrupt attribute must have void return type!"); 2493 2494 MachineFunction &MF = DAG.getMachineFunction(); 2495 StringRef Kind = 2496 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 2497 2498 unsigned RetOpc; 2499 if (Kind == "user") 2500 RetOpc = RISCVISD::URET_FLAG; 2501 else if (Kind == "supervisor") 2502 RetOpc = RISCVISD::SRET_FLAG; 2503 else 2504 RetOpc = RISCVISD::MRET_FLAG; 2505 2506 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 2507 } 2508 2509 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 2510 } 2511 2512 void RISCVTargetLowering::validateCCReservedRegs( 2513 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 2514 MachineFunction &MF) const { 2515 const Function &F = MF.getFunction(); 2516 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 2517 2518 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) { 2519 return STI.isRegisterReservedByUser(Reg.first); 2520 })) 2521 F.getContext().diagnose(DiagnosticInfoUnsupported{ 2522 F, "Argument register required, but has been reserved."}); 2523 } 2524 2525 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 2526 return CI->isTailCall(); 2527 } 2528 2529 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 2530 switch ((RISCVISD::NodeType)Opcode) { 2531 case RISCVISD::FIRST_NUMBER: 2532 break; 2533 case RISCVISD::RET_FLAG: 2534 return "RISCVISD::RET_FLAG"; 2535 case RISCVISD::URET_FLAG: 2536 return "RISCVISD::URET_FLAG"; 2537 case RISCVISD::SRET_FLAG: 2538 return "RISCVISD::SRET_FLAG"; 2539 case RISCVISD::MRET_FLAG: 2540 return "RISCVISD::MRET_FLAG"; 2541 case RISCVISD::CALL: 2542 return "RISCVISD::CALL"; 2543 case RISCVISD::SELECT_CC: 2544 return "RISCVISD::SELECT_CC"; 2545 case RISCVISD::BuildPairF64: 2546 return "RISCVISD::BuildPairF64"; 2547 case RISCVISD::SplitF64: 2548 return "RISCVISD::SplitF64"; 2549 case RISCVISD::TAIL: 2550 return "RISCVISD::TAIL"; 2551 case RISCVISD::SLLW: 2552 return "RISCVISD::SLLW"; 2553 case RISCVISD::SRAW: 2554 return "RISCVISD::SRAW"; 2555 case RISCVISD::SRLW: 2556 return "RISCVISD::SRLW"; 2557 case RISCVISD::DIVW: 2558 return "RISCVISD::DIVW"; 2559 case RISCVISD::DIVUW: 2560 return "RISCVISD::DIVUW"; 2561 case RISCVISD::REMUW: 2562 return "RISCVISD::REMUW"; 2563 case RISCVISD::FMV_W_X_RV64: 2564 return "RISCVISD::FMV_W_X_RV64"; 2565 case RISCVISD::FMV_X_ANYEXTW_RV64: 2566 return "RISCVISD::FMV_X_ANYEXTW_RV64"; 2567 case RISCVISD::READ_CYCLE_WIDE: 2568 return "RISCVISD::READ_CYCLE_WIDE"; 2569 } 2570 return nullptr; 2571 } 2572 2573 /// getConstraintType - Given a constraint letter, return the type of 2574 /// constraint it is for this target. 2575 RISCVTargetLowering::ConstraintType 2576 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 2577 if (Constraint.size() == 1) { 2578 switch (Constraint[0]) { 2579 default: 2580 break; 2581 case 'f': 2582 return C_RegisterClass; 2583 case 'I': 2584 case 'J': 2585 case 'K': 2586 return C_Immediate; 2587 case 'A': 2588 return C_Memory; 2589 } 2590 } 2591 return TargetLowering::getConstraintType(Constraint); 2592 } 2593 2594 std::pair<unsigned, const TargetRegisterClass *> 2595 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 2596 StringRef Constraint, 2597 MVT VT) const { 2598 // First, see if this is a constraint that directly corresponds to a 2599 // RISCV register class. 2600 if (Constraint.size() == 1) { 2601 switch (Constraint[0]) { 2602 case 'r': 2603 return std::make_pair(0U, &RISCV::GPRRegClass); 2604 case 'f': 2605 if (Subtarget.hasStdExtF() && VT == MVT::f32) 2606 return std::make_pair(0U, &RISCV::FPR32RegClass); 2607 if (Subtarget.hasStdExtD() && VT == MVT::f64) 2608 return std::make_pair(0U, &RISCV::FPR64RegClass); 2609 break; 2610 default: 2611 break; 2612 } 2613 } 2614 2615 // Clang will correctly decode the usage of register name aliases into their 2616 // official names. However, other frontends like `rustc` do not. This allows 2617 // users of these frontends to use the ABI names for registers in LLVM-style 2618 // register constraints. 2619 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower()) 2620 .Case("{zero}", RISCV::X0) 2621 .Case("{ra}", RISCV::X1) 2622 .Case("{sp}", RISCV::X2) 2623 .Case("{gp}", RISCV::X3) 2624 .Case("{tp}", RISCV::X4) 2625 .Case("{t0}", RISCV::X5) 2626 .Case("{t1}", RISCV::X6) 2627 .Case("{t2}", RISCV::X7) 2628 .Cases("{s0}", "{fp}", RISCV::X8) 2629 .Case("{s1}", RISCV::X9) 2630 .Case("{a0}", RISCV::X10) 2631 .Case("{a1}", RISCV::X11) 2632 .Case("{a2}", RISCV::X12) 2633 .Case("{a3}", RISCV::X13) 2634 .Case("{a4}", RISCV::X14) 2635 .Case("{a5}", RISCV::X15) 2636 .Case("{a6}", RISCV::X16) 2637 .Case("{a7}", RISCV::X17) 2638 .Case("{s2}", RISCV::X18) 2639 .Case("{s3}", RISCV::X19) 2640 .Case("{s4}", RISCV::X20) 2641 .Case("{s5}", RISCV::X21) 2642 .Case("{s6}", RISCV::X22) 2643 .Case("{s7}", RISCV::X23) 2644 .Case("{s8}", RISCV::X24) 2645 .Case("{s9}", RISCV::X25) 2646 .Case("{s10}", RISCV::X26) 2647 .Case("{s11}", RISCV::X27) 2648 .Case("{t3}", RISCV::X28) 2649 .Case("{t4}", RISCV::X29) 2650 .Case("{t5}", RISCV::X30) 2651 .Case("{t6}", RISCV::X31) 2652 .Default(RISCV::NoRegister); 2653 if (XRegFromAlias != RISCV::NoRegister) 2654 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 2655 2656 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 2657 // TableGen record rather than the AsmName to choose registers for InlineAsm 2658 // constraints, plus we want to match those names to the widest floating point 2659 // register type available, manually select floating point registers here. 2660 // 2661 // The second case is the ABI name of the register, so that frontends can also 2662 // use the ABI names in register constraint lists. 2663 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) { 2664 std::pair<Register, Register> FReg = 2665 StringSwitch<std::pair<Register, Register>>(Constraint.lower()) 2666 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D}) 2667 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D}) 2668 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D}) 2669 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D}) 2670 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D}) 2671 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D}) 2672 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D}) 2673 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D}) 2674 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D}) 2675 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D}) 2676 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D}) 2677 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D}) 2678 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D}) 2679 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D}) 2680 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D}) 2681 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D}) 2682 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D}) 2683 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D}) 2684 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D}) 2685 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D}) 2686 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D}) 2687 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D}) 2688 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D}) 2689 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D}) 2690 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D}) 2691 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D}) 2692 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D}) 2693 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D}) 2694 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D}) 2695 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D}) 2696 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D}) 2697 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D}) 2698 .Default({RISCV::NoRegister, RISCV::NoRegister}); 2699 if (FReg.first != RISCV::NoRegister) 2700 return Subtarget.hasStdExtD() 2701 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass) 2702 : std::make_pair(FReg.first, &RISCV::FPR32RegClass); 2703 } 2704 2705 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 2706 } 2707 2708 unsigned 2709 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 2710 // Currently only support length 1 constraints. 2711 if (ConstraintCode.size() == 1) { 2712 switch (ConstraintCode[0]) { 2713 case 'A': 2714 return InlineAsm::Constraint_A; 2715 default: 2716 break; 2717 } 2718 } 2719 2720 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 2721 } 2722 2723 void RISCVTargetLowering::LowerAsmOperandForConstraint( 2724 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 2725 SelectionDAG &DAG) const { 2726 // Currently only support length 1 constraints. 2727 if (Constraint.length() == 1) { 2728 switch (Constraint[0]) { 2729 case 'I': 2730 // Validate & create a 12-bit signed immediate operand. 2731 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2732 uint64_t CVal = C->getSExtValue(); 2733 if (isInt<12>(CVal)) 2734 Ops.push_back( 2735 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 2736 } 2737 return; 2738 case 'J': 2739 // Validate & create an integer zero operand. 2740 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 2741 if (C->getZExtValue() == 0) 2742 Ops.push_back( 2743 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 2744 return; 2745 case 'K': 2746 // Validate & create a 5-bit unsigned immediate operand. 2747 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 2748 uint64_t CVal = C->getZExtValue(); 2749 if (isUInt<5>(CVal)) 2750 Ops.push_back( 2751 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 2752 } 2753 return; 2754 default: 2755 break; 2756 } 2757 } 2758 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 2759 } 2760 2761 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 2762 Instruction *Inst, 2763 AtomicOrdering Ord) const { 2764 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 2765 return Builder.CreateFence(Ord); 2766 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 2767 return Builder.CreateFence(AtomicOrdering::Release); 2768 return nullptr; 2769 } 2770 2771 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 2772 Instruction *Inst, 2773 AtomicOrdering Ord) const { 2774 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 2775 return Builder.CreateFence(AtomicOrdering::Acquire); 2776 return nullptr; 2777 } 2778 2779 TargetLowering::AtomicExpansionKind 2780 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 2781 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 2782 // point operations can't be used in an lr/sc sequence without breaking the 2783 // forward-progress guarantee. 2784 if (AI->isFloatingPointOperation()) 2785 return AtomicExpansionKind::CmpXChg; 2786 2787 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 2788 if (Size == 8 || Size == 16) 2789 return AtomicExpansionKind::MaskedIntrinsic; 2790 return AtomicExpansionKind::None; 2791 } 2792 2793 static Intrinsic::ID 2794 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 2795 if (XLen == 32) { 2796 switch (BinOp) { 2797 default: 2798 llvm_unreachable("Unexpected AtomicRMW BinOp"); 2799 case AtomicRMWInst::Xchg: 2800 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 2801 case AtomicRMWInst::Add: 2802 return Intrinsic::riscv_masked_atomicrmw_add_i32; 2803 case AtomicRMWInst::Sub: 2804 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 2805 case AtomicRMWInst::Nand: 2806 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 2807 case AtomicRMWInst::Max: 2808 return Intrinsic::riscv_masked_atomicrmw_max_i32; 2809 case AtomicRMWInst::Min: 2810 return Intrinsic::riscv_masked_atomicrmw_min_i32; 2811 case AtomicRMWInst::UMax: 2812 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 2813 case AtomicRMWInst::UMin: 2814 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 2815 } 2816 } 2817 2818 if (XLen == 64) { 2819 switch (BinOp) { 2820 default: 2821 llvm_unreachable("Unexpected AtomicRMW BinOp"); 2822 case AtomicRMWInst::Xchg: 2823 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 2824 case AtomicRMWInst::Add: 2825 return Intrinsic::riscv_masked_atomicrmw_add_i64; 2826 case AtomicRMWInst::Sub: 2827 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 2828 case AtomicRMWInst::Nand: 2829 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 2830 case AtomicRMWInst::Max: 2831 return Intrinsic::riscv_masked_atomicrmw_max_i64; 2832 case AtomicRMWInst::Min: 2833 return Intrinsic::riscv_masked_atomicrmw_min_i64; 2834 case AtomicRMWInst::UMax: 2835 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 2836 case AtomicRMWInst::UMin: 2837 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 2838 } 2839 } 2840 2841 llvm_unreachable("Unexpected XLen\n"); 2842 } 2843 2844 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 2845 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 2846 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 2847 unsigned XLen = Subtarget.getXLen(); 2848 Value *Ordering = 2849 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 2850 Type *Tys[] = {AlignedAddr->getType()}; 2851 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 2852 AI->getModule(), 2853 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 2854 2855 if (XLen == 64) { 2856 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 2857 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 2858 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 2859 } 2860 2861 Value *Result; 2862 2863 // Must pass the shift amount needed to sign extend the loaded value prior 2864 // to performing a signed comparison for min/max. ShiftAmt is the number of 2865 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 2866 // is the number of bits to left+right shift the value in order to 2867 // sign-extend. 2868 if (AI->getOperation() == AtomicRMWInst::Min || 2869 AI->getOperation() == AtomicRMWInst::Max) { 2870 const DataLayout &DL = AI->getModule()->getDataLayout(); 2871 unsigned ValWidth = 2872 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 2873 Value *SextShamt = 2874 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 2875 Result = Builder.CreateCall(LrwOpScwLoop, 2876 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 2877 } else { 2878 Result = 2879 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 2880 } 2881 2882 if (XLen == 64) 2883 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 2884 return Result; 2885 } 2886 2887 TargetLowering::AtomicExpansionKind 2888 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 2889 AtomicCmpXchgInst *CI) const { 2890 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 2891 if (Size == 8 || Size == 16) 2892 return AtomicExpansionKind::MaskedIntrinsic; 2893 return AtomicExpansionKind::None; 2894 } 2895 2896 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 2897 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 2898 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 2899 unsigned XLen = Subtarget.getXLen(); 2900 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 2901 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 2902 if (XLen == 64) { 2903 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 2904 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 2905 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 2906 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 2907 } 2908 Type *Tys[] = {AlignedAddr->getType()}; 2909 Function *MaskedCmpXchg = 2910 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 2911 Value *Result = Builder.CreateCall( 2912 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 2913 if (XLen == 64) 2914 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 2915 return Result; 2916 } 2917 2918 Register RISCVTargetLowering::getExceptionPointerRegister( 2919 const Constant *PersonalityFn) const { 2920 return RISCV::X10; 2921 } 2922 2923 Register RISCVTargetLowering::getExceptionSelectorRegister( 2924 const Constant *PersonalityFn) const { 2925 return RISCV::X11; 2926 } 2927 2928 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 2929 // Return false to suppress the unnecessary extensions if the LibCall 2930 // arguments or return value is f32 type for LP64 ABI. 2931 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 2932 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 2933 return false; 2934 2935 return true; 2936 } 2937 2938 #define GET_REGISTER_MATCHER 2939 #include "RISCVGenAsmMatcher.inc" 2940 2941 Register 2942 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 2943 const MachineFunction &MF) const { 2944 Register Reg = MatchRegisterAltName(RegName); 2945 if (Reg == RISCV::NoRegister) 2946 Reg = MatchRegisterName(RegName); 2947 if (Reg == RISCV::NoRegister) 2948 report_fatal_error( 2949 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 2950 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 2951 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 2952 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 2953 StringRef(RegName) + "\".")); 2954 return Reg; 2955 } 2956