1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "RISCV.h" 16 #include "RISCVMachineFunctionInfo.h" 17 #include "RISCVRegisterInfo.h" 18 #include "RISCVSubtarget.h" 19 #include "RISCVTargetMachine.h" 20 #include "Utils/RISCVMatInt.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/CodeGen/ValueTypes.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "llvm/IR/DiagnosticPrinter.h" 32 #include "llvm/IR/IntrinsicsRISCV.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/MathExtras.h" 36 #include "llvm/Support/raw_ostream.h" 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "riscv-lower" 41 42 STATISTIC(NumTailCalls, "Number of tail calls"); 43 44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 45 const RISCVSubtarget &STI) 46 : TargetLowering(TM), Subtarget(STI) { 47 48 if (Subtarget.isRV32E()) 49 report_fatal_error("Codegen not yet implemented for RV32E"); 50 51 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 52 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 53 54 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 55 !Subtarget.hasStdExtF()) { 56 errs() << "Hard-float 'f' ABI can't be used for a target that " 57 "doesn't support the F instruction set extension (ignoring " 58 "target-abi)\n"; 59 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 60 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 61 !Subtarget.hasStdExtD()) { 62 errs() << "Hard-float 'd' ABI can't be used for a target that " 63 "doesn't support the D instruction set extension (ignoring " 64 "target-abi)\n"; 65 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 66 } 67 68 switch (ABI) { 69 default: 70 report_fatal_error("Don't know how to lower this ABI"); 71 case RISCVABI::ABI_ILP32: 72 case RISCVABI::ABI_ILP32F: 73 case RISCVABI::ABI_ILP32D: 74 case RISCVABI::ABI_LP64: 75 case RISCVABI::ABI_LP64F: 76 case RISCVABI::ABI_LP64D: 77 break; 78 } 79 80 MVT XLenVT = Subtarget.getXLenVT(); 81 82 // Set up the register classes. 83 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 84 85 if (Subtarget.hasStdExtZfh()) 86 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 87 if (Subtarget.hasStdExtF()) 88 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 89 if (Subtarget.hasStdExtD()) 90 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 91 92 if (Subtarget.hasStdExtV()) { 93 addRegisterClass(RISCVVMVTs::vbool64_t, &RISCV::VRRegClass); 94 addRegisterClass(RISCVVMVTs::vbool32_t, &RISCV::VRRegClass); 95 addRegisterClass(RISCVVMVTs::vbool16_t, &RISCV::VRRegClass); 96 addRegisterClass(RISCVVMVTs::vbool8_t, &RISCV::VRRegClass); 97 addRegisterClass(RISCVVMVTs::vbool4_t, &RISCV::VRRegClass); 98 addRegisterClass(RISCVVMVTs::vbool2_t, &RISCV::VRRegClass); 99 addRegisterClass(RISCVVMVTs::vbool1_t, &RISCV::VRRegClass); 100 101 addRegisterClass(RISCVVMVTs::vint8mf8_t, &RISCV::VRRegClass); 102 addRegisterClass(RISCVVMVTs::vint8mf4_t, &RISCV::VRRegClass); 103 addRegisterClass(RISCVVMVTs::vint8mf2_t, &RISCV::VRRegClass); 104 addRegisterClass(RISCVVMVTs::vint8m1_t, &RISCV::VRRegClass); 105 addRegisterClass(RISCVVMVTs::vint8m2_t, &RISCV::VRM2RegClass); 106 addRegisterClass(RISCVVMVTs::vint8m4_t, &RISCV::VRM4RegClass); 107 addRegisterClass(RISCVVMVTs::vint8m8_t, &RISCV::VRM8RegClass); 108 109 addRegisterClass(RISCVVMVTs::vint16mf4_t, &RISCV::VRRegClass); 110 addRegisterClass(RISCVVMVTs::vint16mf2_t, &RISCV::VRRegClass); 111 addRegisterClass(RISCVVMVTs::vint16m1_t, &RISCV::VRRegClass); 112 addRegisterClass(RISCVVMVTs::vint16m2_t, &RISCV::VRM2RegClass); 113 addRegisterClass(RISCVVMVTs::vint16m4_t, &RISCV::VRM4RegClass); 114 addRegisterClass(RISCVVMVTs::vint16m8_t, &RISCV::VRM8RegClass); 115 116 addRegisterClass(RISCVVMVTs::vint32mf2_t, &RISCV::VRRegClass); 117 addRegisterClass(RISCVVMVTs::vint32m1_t, &RISCV::VRRegClass); 118 addRegisterClass(RISCVVMVTs::vint32m2_t, &RISCV::VRM2RegClass); 119 addRegisterClass(RISCVVMVTs::vint32m4_t, &RISCV::VRM4RegClass); 120 addRegisterClass(RISCVVMVTs::vint32m8_t, &RISCV::VRM8RegClass); 121 122 addRegisterClass(RISCVVMVTs::vint64m1_t, &RISCV::VRRegClass); 123 addRegisterClass(RISCVVMVTs::vint64m2_t, &RISCV::VRM2RegClass); 124 addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass); 125 addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass); 126 127 addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass); 128 addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass); 129 addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass); 130 addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass); 131 addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass); 132 133 addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass); 134 addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass); 135 addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass); 136 addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass); 137 } 138 139 // Compute derived properties from the register classes. 140 computeRegisterProperties(STI.getRegisterInfo()); 141 142 setStackPointerRegisterToSaveRestore(RISCV::X2); 143 144 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 145 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 146 147 // TODO: add all necessary setOperationAction calls. 148 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 149 150 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 151 setOperationAction(ISD::BR_CC, XLenVT, Expand); 152 setOperationAction(ISD::SELECT, XLenVT, Custom); 153 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 154 155 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 156 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 157 158 setOperationAction(ISD::VASTART, MVT::Other, Custom); 159 setOperationAction(ISD::VAARG, MVT::Other, Expand); 160 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 161 setOperationAction(ISD::VAEND, MVT::Other, Expand); 162 163 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 164 if (!Subtarget.hasStdExtZbb()) { 165 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 166 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 167 } 168 169 if (Subtarget.is64Bit()) { 170 setOperationAction(ISD::ADD, MVT::i32, Custom); 171 setOperationAction(ISD::SUB, MVT::i32, Custom); 172 setOperationAction(ISD::SHL, MVT::i32, Custom); 173 setOperationAction(ISD::SRA, MVT::i32, Custom); 174 setOperationAction(ISD::SRL, MVT::i32, Custom); 175 } 176 177 if (!Subtarget.hasStdExtM()) { 178 setOperationAction(ISD::MUL, XLenVT, Expand); 179 setOperationAction(ISD::MULHS, XLenVT, Expand); 180 setOperationAction(ISD::MULHU, XLenVT, Expand); 181 setOperationAction(ISD::SDIV, XLenVT, Expand); 182 setOperationAction(ISD::UDIV, XLenVT, Expand); 183 setOperationAction(ISD::SREM, XLenVT, Expand); 184 setOperationAction(ISD::UREM, XLenVT, Expand); 185 } 186 187 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 188 setOperationAction(ISD::MUL, MVT::i32, Custom); 189 setOperationAction(ISD::SDIV, MVT::i32, Custom); 190 setOperationAction(ISD::UDIV, MVT::i32, Custom); 191 setOperationAction(ISD::UREM, MVT::i32, Custom); 192 } 193 194 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 195 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 196 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 197 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 198 199 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 200 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 201 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 202 203 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 204 if (Subtarget.is64Bit()) { 205 setOperationAction(ISD::ROTL, MVT::i32, Custom); 206 setOperationAction(ISD::ROTR, MVT::i32, Custom); 207 } 208 } else { 209 setOperationAction(ISD::ROTL, XLenVT, Expand); 210 setOperationAction(ISD::ROTR, XLenVT, Expand); 211 } 212 213 if (Subtarget.hasStdExtZbp()) { 214 setOperationAction(ISD::BITREVERSE, XLenVT, Custom); 215 setOperationAction(ISD::BSWAP, XLenVT, Custom); 216 217 if (Subtarget.is64Bit()) { 218 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); 219 setOperationAction(ISD::BSWAP, MVT::i32, Custom); 220 } 221 } else { 222 setOperationAction(ISD::BSWAP, XLenVT, Expand); 223 } 224 225 if (Subtarget.hasStdExtZbb()) { 226 setOperationAction(ISD::SMIN, XLenVT, Legal); 227 setOperationAction(ISD::SMAX, XLenVT, Legal); 228 setOperationAction(ISD::UMIN, XLenVT, Legal); 229 setOperationAction(ISD::UMAX, XLenVT, Legal); 230 } else { 231 setOperationAction(ISD::CTTZ, XLenVT, Expand); 232 setOperationAction(ISD::CTLZ, XLenVT, Expand); 233 setOperationAction(ISD::CTPOP, XLenVT, Expand); 234 } 235 236 if (Subtarget.hasStdExtZbt()) { 237 setOperationAction(ISD::FSHL, XLenVT, Legal); 238 setOperationAction(ISD::FSHR, XLenVT, Legal); 239 240 if (Subtarget.is64Bit()) { 241 setOperationAction(ISD::FSHL, MVT::i32, Custom); 242 setOperationAction(ISD::FSHR, MVT::i32, Custom); 243 } 244 } 245 246 ISD::CondCode FPCCToExpand[] = { 247 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 248 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 249 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 250 251 ISD::NodeType FPOpToExpand[] = { 252 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 253 ISD::FP_TO_FP16}; 254 255 if (Subtarget.hasStdExtZfh()) 256 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 257 258 if (Subtarget.hasStdExtZfh()) { 259 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 260 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 261 for (auto CC : FPCCToExpand) 262 setCondCodeAction(CC, MVT::f16, Expand); 263 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 264 setOperationAction(ISD::SELECT, MVT::f16, Custom); 265 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 266 for (auto Op : FPOpToExpand) 267 setOperationAction(Op, MVT::f16, Expand); 268 } 269 270 if (Subtarget.hasStdExtF()) { 271 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 272 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 273 for (auto CC : FPCCToExpand) 274 setCondCodeAction(CC, MVT::f32, Expand); 275 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 276 setOperationAction(ISD::SELECT, MVT::f32, Custom); 277 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 278 for (auto Op : FPOpToExpand) 279 setOperationAction(Op, MVT::f32, Expand); 280 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 281 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 282 } 283 284 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 285 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 286 287 if (Subtarget.hasStdExtD()) { 288 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 289 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 290 for (auto CC : FPCCToExpand) 291 setCondCodeAction(CC, MVT::f64, Expand); 292 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 293 setOperationAction(ISD::SELECT, MVT::f64, Custom); 294 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 295 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 296 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 297 for (auto Op : FPOpToExpand) 298 setOperationAction(Op, MVT::f64, Expand); 299 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 300 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 301 } 302 303 if (Subtarget.is64Bit()) { 304 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 305 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 306 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 307 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 308 } 309 310 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 311 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 312 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 313 314 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 315 316 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 317 // Unfortunately this can't be determined just from the ISA naming string. 318 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 319 Subtarget.is64Bit() ? Legal : Custom); 320 321 setOperationAction(ISD::TRAP, MVT::Other, Legal); 322 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 323 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 324 325 if (Subtarget.hasStdExtA()) { 326 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 327 setMinCmpXchgSizeInBits(32); 328 } else { 329 setMaxAtomicSizeInBitsSupported(0); 330 } 331 332 setBooleanContents(ZeroOrOneBooleanContent); 333 334 if (Subtarget.hasStdExtV()) 335 setBooleanVectorContents(ZeroOrOneBooleanContent); 336 337 // Function alignments. 338 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 339 setMinFunctionAlignment(FunctionAlignment); 340 setPrefFunctionAlignment(FunctionAlignment); 341 342 // Effectively disable jump table generation. 343 setMinimumJumpTableEntries(INT_MAX); 344 345 // Jumps are expensive, compared to logic 346 setJumpIsExpensive(); 347 348 // We can use any register for comparisons 349 setHasMultipleConditionRegisters(); 350 351 if (Subtarget.hasStdExtZbp()) { 352 setTargetDAGCombine(ISD::OR); 353 } 354 } 355 356 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 357 EVT VT) const { 358 if (!VT.isVector()) 359 return getPointerTy(DL); 360 return VT.changeVectorElementTypeToInteger(); 361 } 362 363 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 364 const CallInst &I, 365 MachineFunction &MF, 366 unsigned Intrinsic) const { 367 switch (Intrinsic) { 368 default: 369 return false; 370 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 371 case Intrinsic::riscv_masked_atomicrmw_add_i32: 372 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 373 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 374 case Intrinsic::riscv_masked_atomicrmw_max_i32: 375 case Intrinsic::riscv_masked_atomicrmw_min_i32: 376 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 377 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 378 case Intrinsic::riscv_masked_cmpxchg_i32: 379 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 380 Info.opc = ISD::INTRINSIC_W_CHAIN; 381 Info.memVT = MVT::getVT(PtrTy->getElementType()); 382 Info.ptrVal = I.getArgOperand(0); 383 Info.offset = 0; 384 Info.align = Align(4); 385 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 386 MachineMemOperand::MOVolatile; 387 return true; 388 } 389 } 390 391 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 392 const AddrMode &AM, Type *Ty, 393 unsigned AS, 394 Instruction *I) const { 395 // No global is ever allowed as a base. 396 if (AM.BaseGV) 397 return false; 398 399 // Require a 12-bit signed offset. 400 if (!isInt<12>(AM.BaseOffs)) 401 return false; 402 403 switch (AM.Scale) { 404 case 0: // "r+i" or just "i", depending on HasBaseReg. 405 break; 406 case 1: 407 if (!AM.HasBaseReg) // allow "r+i". 408 break; 409 return false; // disallow "r+r" or "r+r+i". 410 default: 411 return false; 412 } 413 414 return true; 415 } 416 417 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 418 return isInt<12>(Imm); 419 } 420 421 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 422 return isInt<12>(Imm); 423 } 424 425 // On RV32, 64-bit integers are split into their high and low parts and held 426 // in two different registers, so the trunc is free since the low register can 427 // just be used. 428 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 429 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 430 return false; 431 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 432 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 433 return (SrcBits == 64 && DestBits == 32); 434 } 435 436 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 437 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 438 !SrcVT.isInteger() || !DstVT.isInteger()) 439 return false; 440 unsigned SrcBits = SrcVT.getSizeInBits(); 441 unsigned DestBits = DstVT.getSizeInBits(); 442 return (SrcBits == 64 && DestBits == 32); 443 } 444 445 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 446 // Zexts are free if they can be combined with a load. 447 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 448 EVT MemVT = LD->getMemoryVT(); 449 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 450 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 451 (LD->getExtensionType() == ISD::NON_EXTLOAD || 452 LD->getExtensionType() == ISD::ZEXTLOAD)) 453 return true; 454 } 455 456 return TargetLowering::isZExtFree(Val, VT2); 457 } 458 459 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 460 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 461 } 462 463 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 464 return Subtarget.hasStdExtZbb(); 465 } 466 467 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 468 return Subtarget.hasStdExtZbb(); 469 } 470 471 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 472 bool ForCodeSize) const { 473 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 474 return false; 475 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 476 return false; 477 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 478 return false; 479 if (Imm.isNegZero()) 480 return false; 481 return Imm.isZero(); 482 } 483 484 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 485 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 486 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 487 (VT == MVT::f64 && Subtarget.hasStdExtD()); 488 } 489 490 // Changes the condition code and swaps operands if necessary, so the SetCC 491 // operation matches one of the comparisons supported directly in the RISC-V 492 // ISA. 493 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 494 switch (CC) { 495 default: 496 break; 497 case ISD::SETGT: 498 case ISD::SETLE: 499 case ISD::SETUGT: 500 case ISD::SETULE: 501 CC = ISD::getSetCCSwappedOperands(CC); 502 std::swap(LHS, RHS); 503 break; 504 } 505 } 506 507 // Return the RISC-V branch opcode that matches the given DAG integer 508 // condition code. The CondCode must be one of those supported by the RISC-V 509 // ISA (see normaliseSetCC). 510 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 511 switch (CC) { 512 default: 513 llvm_unreachable("Unsupported CondCode"); 514 case ISD::SETEQ: 515 return RISCV::BEQ; 516 case ISD::SETNE: 517 return RISCV::BNE; 518 case ISD::SETLT: 519 return RISCV::BLT; 520 case ISD::SETGE: 521 return RISCV::BGE; 522 case ISD::SETULT: 523 return RISCV::BLTU; 524 case ISD::SETUGE: 525 return RISCV::BGEU; 526 } 527 } 528 529 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 530 SelectionDAG &DAG) const { 531 switch (Op.getOpcode()) { 532 default: 533 report_fatal_error("unimplemented operand"); 534 case ISD::GlobalAddress: 535 return lowerGlobalAddress(Op, DAG); 536 case ISD::BlockAddress: 537 return lowerBlockAddress(Op, DAG); 538 case ISD::ConstantPool: 539 return lowerConstantPool(Op, DAG); 540 case ISD::GlobalTLSAddress: 541 return lowerGlobalTLSAddress(Op, DAG); 542 case ISD::SELECT: 543 return lowerSELECT(Op, DAG); 544 case ISD::VASTART: 545 return lowerVASTART(Op, DAG); 546 case ISD::FRAMEADDR: 547 return lowerFRAMEADDR(Op, DAG); 548 case ISD::RETURNADDR: 549 return lowerRETURNADDR(Op, DAG); 550 case ISD::SHL_PARTS: 551 return lowerShiftLeftParts(Op, DAG); 552 case ISD::SRA_PARTS: 553 return lowerShiftRightParts(Op, DAG, true); 554 case ISD::SRL_PARTS: 555 return lowerShiftRightParts(Op, DAG, false); 556 case ISD::BITCAST: { 557 assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || 558 Subtarget.hasStdExtZfh()) && 559 "Unexpected custom legalisation"); 560 SDLoc DL(Op); 561 SDValue Op0 = Op.getOperand(0); 562 if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) { 563 if (Op0.getValueType() != MVT::i16) 564 return SDValue(); 565 SDValue NewOp0 = 566 DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0); 567 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 568 return FPConv; 569 } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() && 570 Subtarget.hasStdExtF()) { 571 if (Op0.getValueType() != MVT::i32) 572 return SDValue(); 573 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 574 SDValue FPConv = 575 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 576 return FPConv; 577 } 578 return SDValue(); 579 } 580 case ISD::INTRINSIC_WO_CHAIN: 581 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 582 case ISD::BSWAP: 583 case ISD::BITREVERSE: { 584 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 585 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 586 MVT VT = Op.getSimpleValueType(); 587 SDLoc DL(Op); 588 // Start with the maximum immediate value which is the bitwidth - 1. 589 unsigned Imm = VT.getSizeInBits() - 1; 590 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 591 if (Op.getOpcode() == ISD::BSWAP) 592 Imm &= ~0x7U; 593 return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), 594 DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); 595 } 596 } 597 } 598 599 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 600 SelectionDAG &DAG, unsigned Flags) { 601 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 602 } 603 604 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 605 SelectionDAG &DAG, unsigned Flags) { 606 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 607 Flags); 608 } 609 610 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 611 SelectionDAG &DAG, unsigned Flags) { 612 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 613 N->getOffset(), Flags); 614 } 615 616 template <class NodeTy> 617 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 618 bool IsLocal) const { 619 SDLoc DL(N); 620 EVT Ty = getPointerTy(DAG.getDataLayout()); 621 622 if (isPositionIndependent()) { 623 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 624 if (IsLocal) 625 // Use PC-relative addressing to access the symbol. This generates the 626 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 627 // %pcrel_lo(auipc)). 628 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 629 630 // Use PC-relative addressing to access the GOT for this symbol, then load 631 // the address from the GOT. This generates the pattern (PseudoLA sym), 632 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 633 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 634 } 635 636 switch (getTargetMachine().getCodeModel()) { 637 default: 638 report_fatal_error("Unsupported code model for lowering"); 639 case CodeModel::Small: { 640 // Generate a sequence for accessing addresses within the first 2 GiB of 641 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 642 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 643 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 644 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 645 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 646 } 647 case CodeModel::Medium: { 648 // Generate a sequence for accessing addresses within any 2GiB range within 649 // the address space. This generates the pattern (PseudoLLA sym), which 650 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 651 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 652 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 653 } 654 } 655 } 656 657 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 658 SelectionDAG &DAG) const { 659 SDLoc DL(Op); 660 EVT Ty = Op.getValueType(); 661 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 662 int64_t Offset = N->getOffset(); 663 MVT XLenVT = Subtarget.getXLenVT(); 664 665 const GlobalValue *GV = N->getGlobal(); 666 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 667 SDValue Addr = getAddr(N, DAG, IsLocal); 668 669 // In order to maximise the opportunity for common subexpression elimination, 670 // emit a separate ADD node for the global address offset instead of folding 671 // it in the global address node. Later peephole optimisations may choose to 672 // fold it back in when profitable. 673 if (Offset != 0) 674 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 675 DAG.getConstant(Offset, DL, XLenVT)); 676 return Addr; 677 } 678 679 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 680 SelectionDAG &DAG) const { 681 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 682 683 return getAddr(N, DAG); 684 } 685 686 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 687 SelectionDAG &DAG) const { 688 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 689 690 return getAddr(N, DAG); 691 } 692 693 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 694 SelectionDAG &DAG, 695 bool UseGOT) const { 696 SDLoc DL(N); 697 EVT Ty = getPointerTy(DAG.getDataLayout()); 698 const GlobalValue *GV = N->getGlobal(); 699 MVT XLenVT = Subtarget.getXLenVT(); 700 701 if (UseGOT) { 702 // Use PC-relative addressing to access the GOT for this TLS symbol, then 703 // load the address from the GOT and add the thread pointer. This generates 704 // the pattern (PseudoLA_TLS_IE sym), which expands to 705 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 706 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 707 SDValue Load = 708 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 709 710 // Add the thread pointer. 711 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 712 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 713 } 714 715 // Generate a sequence for accessing the address relative to the thread 716 // pointer, with the appropriate adjustment for the thread pointer offset. 717 // This generates the pattern 718 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 719 SDValue AddrHi = 720 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 721 SDValue AddrAdd = 722 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 723 SDValue AddrLo = 724 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 725 726 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 727 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 728 SDValue MNAdd = SDValue( 729 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 730 0); 731 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 732 } 733 734 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 735 SelectionDAG &DAG) const { 736 SDLoc DL(N); 737 EVT Ty = getPointerTy(DAG.getDataLayout()); 738 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 739 const GlobalValue *GV = N->getGlobal(); 740 741 // Use a PC-relative addressing mode to access the global dynamic GOT address. 742 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 743 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 744 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 745 SDValue Load = 746 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 747 748 // Prepare argument list to generate call. 749 ArgListTy Args; 750 ArgListEntry Entry; 751 Entry.Node = Load; 752 Entry.Ty = CallTy; 753 Args.push_back(Entry); 754 755 // Setup call to __tls_get_addr. 756 TargetLowering::CallLoweringInfo CLI(DAG); 757 CLI.setDebugLoc(DL) 758 .setChain(DAG.getEntryNode()) 759 .setLibCallee(CallingConv::C, CallTy, 760 DAG.getExternalSymbol("__tls_get_addr", Ty), 761 std::move(Args)); 762 763 return LowerCallTo(CLI).first; 764 } 765 766 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 767 SelectionDAG &DAG) const { 768 SDLoc DL(Op); 769 EVT Ty = Op.getValueType(); 770 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 771 int64_t Offset = N->getOffset(); 772 MVT XLenVT = Subtarget.getXLenVT(); 773 774 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 775 776 if (DAG.getMachineFunction().getFunction().getCallingConv() == 777 CallingConv::GHC) 778 report_fatal_error("In GHC calling convention TLS is not supported"); 779 780 SDValue Addr; 781 switch (Model) { 782 case TLSModel::LocalExec: 783 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 784 break; 785 case TLSModel::InitialExec: 786 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 787 break; 788 case TLSModel::LocalDynamic: 789 case TLSModel::GeneralDynamic: 790 Addr = getDynamicTLSAddr(N, DAG); 791 break; 792 } 793 794 // In order to maximise the opportunity for common subexpression elimination, 795 // emit a separate ADD node for the global address offset instead of folding 796 // it in the global address node. Later peephole optimisations may choose to 797 // fold it back in when profitable. 798 if (Offset != 0) 799 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 800 DAG.getConstant(Offset, DL, XLenVT)); 801 return Addr; 802 } 803 804 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 805 SDValue CondV = Op.getOperand(0); 806 SDValue TrueV = Op.getOperand(1); 807 SDValue FalseV = Op.getOperand(2); 808 SDLoc DL(Op); 809 MVT XLenVT = Subtarget.getXLenVT(); 810 811 // If the result type is XLenVT and CondV is the output of a SETCC node 812 // which also operated on XLenVT inputs, then merge the SETCC node into the 813 // lowered RISCVISD::SELECT_CC to take advantage of the integer 814 // compare+branch instructions. i.e.: 815 // (select (setcc lhs, rhs, cc), truev, falsev) 816 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 817 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 818 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 819 SDValue LHS = CondV.getOperand(0); 820 SDValue RHS = CondV.getOperand(1); 821 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 822 ISD::CondCode CCVal = CC->get(); 823 824 normaliseSetCC(LHS, RHS, CCVal); 825 826 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 827 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 828 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 829 } 830 831 // Otherwise: 832 // (select condv, truev, falsev) 833 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 834 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 835 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 836 837 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 838 839 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 840 } 841 842 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 843 MachineFunction &MF = DAG.getMachineFunction(); 844 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 845 846 SDLoc DL(Op); 847 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 848 getPointerTy(MF.getDataLayout())); 849 850 // vastart just stores the address of the VarArgsFrameIndex slot into the 851 // memory location argument. 852 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 853 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 854 MachinePointerInfo(SV)); 855 } 856 857 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 858 SelectionDAG &DAG) const { 859 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 860 MachineFunction &MF = DAG.getMachineFunction(); 861 MachineFrameInfo &MFI = MF.getFrameInfo(); 862 MFI.setFrameAddressIsTaken(true); 863 Register FrameReg = RI.getFrameRegister(MF); 864 int XLenInBytes = Subtarget.getXLen() / 8; 865 866 EVT VT = Op.getValueType(); 867 SDLoc DL(Op); 868 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 869 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 870 while (Depth--) { 871 int Offset = -(XLenInBytes * 2); 872 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 873 DAG.getIntPtrConstant(Offset, DL)); 874 FrameAddr = 875 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 876 } 877 return FrameAddr; 878 } 879 880 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 881 SelectionDAG &DAG) const { 882 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 883 MachineFunction &MF = DAG.getMachineFunction(); 884 MachineFrameInfo &MFI = MF.getFrameInfo(); 885 MFI.setReturnAddressIsTaken(true); 886 MVT XLenVT = Subtarget.getXLenVT(); 887 int XLenInBytes = Subtarget.getXLen() / 8; 888 889 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 890 return SDValue(); 891 892 EVT VT = Op.getValueType(); 893 SDLoc DL(Op); 894 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 895 if (Depth) { 896 int Off = -XLenInBytes; 897 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 898 SDValue Offset = DAG.getConstant(Off, DL, VT); 899 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 900 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 901 MachinePointerInfo()); 902 } 903 904 // Return the value of the return address register, marking it an implicit 905 // live-in. 906 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 907 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 908 } 909 910 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 911 SelectionDAG &DAG) const { 912 SDLoc DL(Op); 913 SDValue Lo = Op.getOperand(0); 914 SDValue Hi = Op.getOperand(1); 915 SDValue Shamt = Op.getOperand(2); 916 EVT VT = Lo.getValueType(); 917 918 // if Shamt-XLEN < 0: // Shamt < XLEN 919 // Lo = Lo << Shamt 920 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 921 // else: 922 // Lo = 0 923 // Hi = Lo << (Shamt-XLEN) 924 925 SDValue Zero = DAG.getConstant(0, DL, VT); 926 SDValue One = DAG.getConstant(1, DL, VT); 927 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 928 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 929 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 930 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 931 932 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 933 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 934 SDValue ShiftRightLo = 935 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 936 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 937 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 938 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 939 940 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 941 942 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 943 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 944 945 SDValue Parts[2] = {Lo, Hi}; 946 return DAG.getMergeValues(Parts, DL); 947 } 948 949 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 950 bool IsSRA) const { 951 SDLoc DL(Op); 952 SDValue Lo = Op.getOperand(0); 953 SDValue Hi = Op.getOperand(1); 954 SDValue Shamt = Op.getOperand(2); 955 EVT VT = Lo.getValueType(); 956 957 // SRA expansion: 958 // if Shamt-XLEN < 0: // Shamt < XLEN 959 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 960 // Hi = Hi >>s Shamt 961 // else: 962 // Lo = Hi >>s (Shamt-XLEN); 963 // Hi = Hi >>s (XLEN-1) 964 // 965 // SRL expansion: 966 // if Shamt-XLEN < 0: // Shamt < XLEN 967 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 968 // Hi = Hi >>u Shamt 969 // else: 970 // Lo = Hi >>u (Shamt-XLEN); 971 // Hi = 0; 972 973 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 974 975 SDValue Zero = DAG.getConstant(0, DL, VT); 976 SDValue One = DAG.getConstant(1, DL, VT); 977 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 978 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 979 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 980 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 981 982 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 983 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 984 SDValue ShiftLeftHi = 985 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 986 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 987 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 988 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 989 SDValue HiFalse = 990 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 991 992 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 993 994 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 995 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 996 997 SDValue Parts[2] = {Lo, Hi}; 998 return DAG.getMergeValues(Parts, DL); 999 } 1000 1001 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 1002 SelectionDAG &DAG) const { 1003 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1004 SDLoc DL(Op); 1005 switch (IntNo) { 1006 default: 1007 return SDValue(); // Don't custom lower most intrinsics. 1008 case Intrinsic::thread_pointer: { 1009 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1010 return DAG.getRegister(RISCV::X4, PtrVT); 1011 } 1012 } 1013 } 1014 1015 // Returns the opcode of the target-specific SDNode that implements the 32-bit 1016 // form of the given Opcode. 1017 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 1018 switch (Opcode) { 1019 default: 1020 llvm_unreachable("Unexpected opcode"); 1021 case ISD::SHL: 1022 return RISCVISD::SLLW; 1023 case ISD::SRA: 1024 return RISCVISD::SRAW; 1025 case ISD::SRL: 1026 return RISCVISD::SRLW; 1027 case ISD::SDIV: 1028 return RISCVISD::DIVW; 1029 case ISD::UDIV: 1030 return RISCVISD::DIVUW; 1031 case ISD::UREM: 1032 return RISCVISD::REMUW; 1033 case ISD::ROTL: 1034 return RISCVISD::ROLW; 1035 case ISD::ROTR: 1036 return RISCVISD::RORW; 1037 case RISCVISD::GREVI: 1038 return RISCVISD::GREVIW; 1039 case RISCVISD::GORCI: 1040 return RISCVISD::GORCIW; 1041 } 1042 } 1043 1044 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 1045 // Because i32 isn't a legal type for RV64, these operations would otherwise 1046 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 1047 // later one because the fact the operation was originally of type i32 is 1048 // lost. 1049 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { 1050 SDLoc DL(N); 1051 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 1052 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1053 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 1054 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 1055 // ReplaceNodeResults requires we maintain the same type for the return value. 1056 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 1057 } 1058 1059 // Converts the given 32-bit operation to a i64 operation with signed extension 1060 // semantic to reduce the signed extension instructions. 1061 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 1062 SDLoc DL(N); 1063 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1064 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 1065 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 1066 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 1067 DAG.getValueType(MVT::i32)); 1068 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 1069 } 1070 1071 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 1072 SmallVectorImpl<SDValue> &Results, 1073 SelectionDAG &DAG) const { 1074 SDLoc DL(N); 1075 switch (N->getOpcode()) { 1076 default: 1077 llvm_unreachable("Don't know how to custom type legalize this operation!"); 1078 case ISD::STRICT_FP_TO_SINT: 1079 case ISD::STRICT_FP_TO_UINT: 1080 case ISD::FP_TO_SINT: 1081 case ISD::FP_TO_UINT: { 1082 bool IsStrict = N->isStrictFPOpcode(); 1083 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1084 "Unexpected custom legalisation"); 1085 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 1086 // If the FP type needs to be softened, emit a library call using the 'si' 1087 // version. If we left it to default legalization we'd end up with 'di'. If 1088 // the FP type doesn't need to be softened just let generic type 1089 // legalization promote the result type. 1090 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 1091 TargetLowering::TypeSoftenFloat) 1092 return; 1093 RTLIB::Libcall LC; 1094 if (N->getOpcode() == ISD::FP_TO_SINT || 1095 N->getOpcode() == ISD::STRICT_FP_TO_SINT) 1096 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 1097 else 1098 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 1099 MakeLibCallOptions CallOptions; 1100 EVT OpVT = Op0.getValueType(); 1101 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 1102 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 1103 SDValue Result; 1104 std::tie(Result, Chain) = 1105 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 1106 Results.push_back(Result); 1107 if (IsStrict) 1108 Results.push_back(Chain); 1109 break; 1110 } 1111 case ISD::READCYCLECOUNTER: { 1112 assert(!Subtarget.is64Bit() && 1113 "READCYCLECOUNTER only has custom type legalization on riscv32"); 1114 1115 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 1116 SDValue RCW = 1117 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 1118 1119 Results.push_back( 1120 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 1121 Results.push_back(RCW.getValue(2)); 1122 break; 1123 } 1124 case ISD::ADD: 1125 case ISD::SUB: 1126 case ISD::MUL: 1127 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1128 "Unexpected custom legalisation"); 1129 if (N->getOperand(1).getOpcode() == ISD::Constant) 1130 return; 1131 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 1132 break; 1133 case ISD::SHL: 1134 case ISD::SRA: 1135 case ISD::SRL: 1136 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1137 "Unexpected custom legalisation"); 1138 if (N->getOperand(1).getOpcode() == ISD::Constant) 1139 return; 1140 Results.push_back(customLegalizeToWOp(N, DAG)); 1141 break; 1142 case ISD::ROTL: 1143 case ISD::ROTR: 1144 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1145 "Unexpected custom legalisation"); 1146 Results.push_back(customLegalizeToWOp(N, DAG)); 1147 break; 1148 case ISD::SDIV: 1149 case ISD::UDIV: 1150 case ISD::UREM: 1151 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1152 Subtarget.hasStdExtM() && "Unexpected custom legalisation"); 1153 if (N->getOperand(0).getOpcode() == ISD::Constant || 1154 N->getOperand(1).getOpcode() == ISD::Constant) 1155 return; 1156 Results.push_back(customLegalizeToWOp(N, DAG)); 1157 break; 1158 case ISD::BITCAST: { 1159 assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1160 Subtarget.hasStdExtF()) || 1161 (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && 1162 "Unexpected custom legalisation"); 1163 SDValue Op0 = N->getOperand(0); 1164 if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) { 1165 if (Op0.getValueType() != MVT::f16) 1166 return; 1167 SDValue FPConv = 1168 DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0); 1169 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 1170 } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1171 Subtarget.hasStdExtF()) { 1172 if (Op0.getValueType() != MVT::f32) 1173 return; 1174 SDValue FPConv = 1175 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 1176 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 1177 } 1178 break; 1179 } 1180 case RISCVISD::GREVI: 1181 case RISCVISD::GORCI: { 1182 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1183 "Unexpected custom legalisation"); 1184 // This is similar to customLegalizeToWOp, except that we pass the second 1185 // operand (a TargetConstant) straight through: it is already of type 1186 // XLenVT. 1187 SDLoc DL(N); 1188 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 1189 SDValue NewOp0 = 1190 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1191 SDValue NewRes = 1192 DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1)); 1193 // ReplaceNodeResults requires we maintain the same type for the return 1194 // value. 1195 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 1196 break; 1197 } 1198 case ISD::BSWAP: 1199 case ISD::BITREVERSE: { 1200 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1201 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 1202 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 1203 N->getOperand(0)); 1204 unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24; 1205 SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0, 1206 DAG.getTargetConstant(Imm, DL, 1207 Subtarget.getXLenVT())); 1208 // ReplaceNodeResults requires we maintain the same type for the return 1209 // value. 1210 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW)); 1211 break; 1212 } 1213 case ISD::FSHL: 1214 case ISD::FSHR: { 1215 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1216 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 1217 SDValue NewOp0 = 1218 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1219 SDValue NewOp1 = 1220 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 1221 SDValue NewOp2 = 1222 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 1223 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 1224 // Mask the shift amount to 5 bits. 1225 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 1226 DAG.getConstant(0x1f, DL, MVT::i64)); 1227 unsigned Opc = 1228 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; 1229 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); 1230 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 1231 break; 1232 } 1233 } 1234 } 1235 1236 // A structure to hold one of the bit-manipulation patterns below. Together, a 1237 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 1238 // (or (and (shl x, 1), 0xAAAAAAAA), 1239 // (and (srl x, 1), 0x55555555)) 1240 struct RISCVBitmanipPat { 1241 SDValue Op; 1242 unsigned ShAmt; 1243 bool IsSHL; 1244 1245 bool formsPairWith(const RISCVBitmanipPat &Other) const { 1246 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 1247 } 1248 }; 1249 1250 // Matches any of the following bit-manipulation patterns: 1251 // (and (shl x, 1), (0x55555555 << 1)) 1252 // (and (srl x, 1), 0x55555555) 1253 // (shl (and x, 0x55555555), 1) 1254 // (srl (and x, (0x55555555 << 1)), 1) 1255 // where the shift amount and mask may vary thus: 1256 // [1] = 0x55555555 / 0xAAAAAAAA 1257 // [2] = 0x33333333 / 0xCCCCCCCC 1258 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 1259 // [8] = 0x00FF00FF / 0xFF00FF00 1260 // [16] = 0x0000FFFF / 0xFFFFFFFF 1261 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 1262 static Optional<RISCVBitmanipPat> matchRISCVBitmanipPat(SDValue Op) { 1263 Optional<uint64_t> Mask; 1264 // Optionally consume a mask around the shift operation. 1265 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 1266 Mask = Op.getConstantOperandVal(1); 1267 Op = Op.getOperand(0); 1268 } 1269 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 1270 return None; 1271 bool IsSHL = Op.getOpcode() == ISD::SHL; 1272 1273 if (!isa<ConstantSDNode>(Op.getOperand(1))) 1274 return None; 1275 auto ShAmt = Op.getConstantOperandVal(1); 1276 1277 if (!isPowerOf2_64(ShAmt)) 1278 return None; 1279 1280 // These are the unshifted masks which we use to match bit-manipulation 1281 // patterns. They may be shifted left in certain circumstances. 1282 static const uint64_t BitmanipMasks[] = { 1283 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 1284 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL, 1285 }; 1286 1287 unsigned MaskIdx = Log2_64(ShAmt); 1288 if (MaskIdx >= array_lengthof(BitmanipMasks)) 1289 return None; 1290 1291 auto Src = Op.getOperand(0); 1292 1293 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 1294 auto ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 1295 1296 // The expected mask is shifted left when the AND is found around SHL 1297 // patterns. 1298 // ((x >> 1) & 0x55555555) 1299 // ((x << 1) & 0xAAAAAAAA) 1300 bool SHLExpMask = IsSHL; 1301 1302 if (!Mask) { 1303 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 1304 // the mask is all ones: consume that now. 1305 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 1306 Mask = Src.getConstantOperandVal(1); 1307 Src = Src.getOperand(0); 1308 // The expected mask is now in fact shifted left for SRL, so reverse the 1309 // decision. 1310 // ((x & 0xAAAAAAAA) >> 1) 1311 // ((x & 0x55555555) << 1) 1312 SHLExpMask = !SHLExpMask; 1313 } else { 1314 // Use a default shifted mask of all-ones if there's no AND, truncated 1315 // down to the expected width. This simplifies the logic later on. 1316 Mask = maskTrailingOnes<uint64_t>(Width); 1317 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 1318 } 1319 } 1320 1321 if (SHLExpMask) 1322 ExpMask <<= ShAmt; 1323 1324 if (Mask != ExpMask) 1325 return None; 1326 1327 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 1328 } 1329 1330 // Match the following pattern as a GREVI(W) operation 1331 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 1332 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 1333 const RISCVSubtarget &Subtarget) { 1334 EVT VT = Op.getValueType(); 1335 1336 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 1337 auto LHS = matchRISCVBitmanipPat(Op.getOperand(0)); 1338 auto RHS = matchRISCVBitmanipPat(Op.getOperand(1)); 1339 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 1340 SDLoc DL(Op); 1341 return DAG.getNode( 1342 RISCVISD::GREVI, DL, VT, LHS->Op, 1343 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 1344 } 1345 } 1346 return SDValue(); 1347 } 1348 1349 // Matches any the following pattern as a GORCI(W) operation 1350 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 1351 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 1352 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 1353 // Note that with the variant of 3., 1354 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 1355 // the inner pattern will first be matched as GREVI and then the outer 1356 // pattern will be matched to GORC via the first rule above. 1357 // 4. (or (rotl/rotr x, bitwidth/2), x) 1358 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 1359 const RISCVSubtarget &Subtarget) { 1360 EVT VT = Op.getValueType(); 1361 1362 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 1363 SDLoc DL(Op); 1364 SDValue Op0 = Op.getOperand(0); 1365 SDValue Op1 = Op.getOperand(1); 1366 1367 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 1368 if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X && 1369 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 1370 return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1)); 1371 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 1372 if ((Reverse.getOpcode() == ISD::ROTL || 1373 Reverse.getOpcode() == ISD::ROTR) && 1374 Reverse.getOperand(0) == X && 1375 isa<ConstantSDNode>(Reverse.getOperand(1))) { 1376 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 1377 if (RotAmt == (VT.getSizeInBits() / 2)) 1378 return DAG.getNode( 1379 RISCVISD::GORCI, DL, VT, X, 1380 DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT())); 1381 } 1382 return SDValue(); 1383 }; 1384 1385 // Check for either commutable permutation of (or (GREVI x, shamt), x) 1386 if (SDValue V = MatchOROfReverse(Op0, Op1)) 1387 return V; 1388 if (SDValue V = MatchOROfReverse(Op1, Op0)) 1389 return V; 1390 1391 // OR is commutable so canonicalize its OR operand to the left 1392 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 1393 std::swap(Op0, Op1); 1394 if (Op0.getOpcode() != ISD::OR) 1395 return SDValue(); 1396 SDValue OrOp0 = Op0.getOperand(0); 1397 SDValue OrOp1 = Op0.getOperand(1); 1398 auto LHS = matchRISCVBitmanipPat(OrOp0); 1399 // OR is commutable so swap the operands and try again: x might have been 1400 // on the left 1401 if (!LHS) { 1402 std::swap(OrOp0, OrOp1); 1403 LHS = matchRISCVBitmanipPat(OrOp0); 1404 } 1405 auto RHS = matchRISCVBitmanipPat(Op1); 1406 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 1407 return DAG.getNode( 1408 RISCVISD::GORCI, DL, VT, LHS->Op, 1409 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 1410 } 1411 } 1412 return SDValue(); 1413 } 1414 1415 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 1416 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 1417 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 1418 // not undo itself, but they are redundant. 1419 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 1420 unsigned ShAmt1 = N->getConstantOperandVal(1); 1421 SDValue Src = N->getOperand(0); 1422 1423 if (Src.getOpcode() != N->getOpcode()) 1424 return SDValue(); 1425 1426 unsigned ShAmt2 = Src.getConstantOperandVal(1); 1427 Src = Src.getOperand(0); 1428 1429 unsigned CombinedShAmt; 1430 if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW) 1431 CombinedShAmt = ShAmt1 | ShAmt2; 1432 else 1433 CombinedShAmt = ShAmt1 ^ ShAmt2; 1434 1435 if (CombinedShAmt == 0) 1436 return Src; 1437 1438 SDLoc DL(N); 1439 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src, 1440 DAG.getTargetConstant(CombinedShAmt, DL, 1441 N->getOperand(1).getValueType())); 1442 } 1443 1444 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 1445 DAGCombinerInfo &DCI) const { 1446 SelectionDAG &DAG = DCI.DAG; 1447 1448 switch (N->getOpcode()) { 1449 default: 1450 break; 1451 case RISCVISD::SplitF64: { 1452 SDValue Op0 = N->getOperand(0); 1453 // If the input to SplitF64 is just BuildPairF64 then the operation is 1454 // redundant. Instead, use BuildPairF64's operands directly. 1455 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 1456 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 1457 1458 SDLoc DL(N); 1459 1460 // It's cheaper to materialise two 32-bit integers than to load a double 1461 // from the constant pool and transfer it to integer registers through the 1462 // stack. 1463 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 1464 APInt V = C->getValueAPF().bitcastToAPInt(); 1465 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 1466 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 1467 return DCI.CombineTo(N, Lo, Hi); 1468 } 1469 1470 // This is a target-specific version of a DAGCombine performed in 1471 // DAGCombiner::visitBITCAST. It performs the equivalent of: 1472 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1473 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1474 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1475 !Op0.getNode()->hasOneUse()) 1476 break; 1477 SDValue NewSplitF64 = 1478 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 1479 Op0.getOperand(0)); 1480 SDValue Lo = NewSplitF64.getValue(0); 1481 SDValue Hi = NewSplitF64.getValue(1); 1482 APInt SignBit = APInt::getSignMask(32); 1483 if (Op0.getOpcode() == ISD::FNEG) { 1484 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 1485 DAG.getConstant(SignBit, DL, MVT::i32)); 1486 return DCI.CombineTo(N, Lo, NewHi); 1487 } 1488 assert(Op0.getOpcode() == ISD::FABS); 1489 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 1490 DAG.getConstant(~SignBit, DL, MVT::i32)); 1491 return DCI.CombineTo(N, Lo, NewHi); 1492 } 1493 case RISCVISD::SLLW: 1494 case RISCVISD::SRAW: 1495 case RISCVISD::SRLW: 1496 case RISCVISD::ROLW: 1497 case RISCVISD::RORW: { 1498 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 1499 SDValue LHS = N->getOperand(0); 1500 SDValue RHS = N->getOperand(1); 1501 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 1502 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 1503 if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || 1504 SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { 1505 if (N->getOpcode() != ISD::DELETED_NODE) 1506 DCI.AddToWorklist(N); 1507 return SDValue(N, 0); 1508 } 1509 break; 1510 } 1511 case RISCVISD::FSLW: 1512 case RISCVISD::FSRW: { 1513 // Only the lower 32 bits of Values and lower 6 bits of shift amount are 1514 // read. 1515 SDValue Op0 = N->getOperand(0); 1516 SDValue Op1 = N->getOperand(1); 1517 SDValue ShAmt = N->getOperand(2); 1518 APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 1519 APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); 1520 if (SimplifyDemandedBits(Op0, OpMask, DCI) || 1521 SimplifyDemandedBits(Op1, OpMask, DCI) || 1522 SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 1523 if (N->getOpcode() != ISD::DELETED_NODE) 1524 DCI.AddToWorklist(N); 1525 return SDValue(N, 0); 1526 } 1527 break; 1528 } 1529 case RISCVISD::GREVIW: 1530 case RISCVISD::GORCIW: { 1531 // Only the lower 32 bits of the first operand are read 1532 SDValue Op0 = N->getOperand(0); 1533 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 1534 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 1535 if (N->getOpcode() != ISD::DELETED_NODE) 1536 DCI.AddToWorklist(N); 1537 return SDValue(N, 0); 1538 } 1539 1540 return combineGREVI_GORCI(N, DCI.DAG); 1541 } 1542 case RISCVISD::FMV_X_ANYEXTW_RV64: { 1543 SDLoc DL(N); 1544 SDValue Op0 = N->getOperand(0); 1545 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 1546 // conversion is unnecessary and can be replaced with an ANY_EXTEND 1547 // of the FMV_W_X_RV64 operand. 1548 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 1549 assert(Op0.getOperand(0).getValueType() == MVT::i64 && 1550 "Unexpected value type!"); 1551 return Op0.getOperand(0); 1552 } 1553 1554 // This is a target-specific version of a DAGCombine performed in 1555 // DAGCombiner::visitBITCAST. It performs the equivalent of: 1556 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1557 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1558 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1559 !Op0.getNode()->hasOneUse()) 1560 break; 1561 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 1562 Op0.getOperand(0)); 1563 APInt SignBit = APInt::getSignMask(32).sext(64); 1564 if (Op0.getOpcode() == ISD::FNEG) 1565 return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 1566 DAG.getConstant(SignBit, DL, MVT::i64)); 1567 1568 assert(Op0.getOpcode() == ISD::FABS); 1569 return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 1570 DAG.getConstant(~SignBit, DL, MVT::i64)); 1571 } 1572 case RISCVISD::GREVI: 1573 case RISCVISD::GORCI: 1574 return combineGREVI_GORCI(N, DCI.DAG); 1575 case ISD::OR: 1576 if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget)) 1577 return GREV; 1578 if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget)) 1579 return GORC; 1580 break; 1581 } 1582 1583 return SDValue(); 1584 } 1585 1586 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 1587 const SDNode *N, CombineLevel Level) const { 1588 // The following folds are only desirable if `(OP _, c1 << c2)` can be 1589 // materialised in fewer instructions than `(OP _, c1)`: 1590 // 1591 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 1592 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 1593 SDValue N0 = N->getOperand(0); 1594 EVT Ty = N0.getValueType(); 1595 if (Ty.isScalarInteger() && 1596 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 1597 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 1598 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1599 if (C1 && C2) { 1600 APInt C1Int = C1->getAPIntValue(); 1601 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 1602 1603 // We can materialise `c1 << c2` into an add immediate, so it's "free", 1604 // and the combine should happen, to potentially allow further combines 1605 // later. 1606 if (ShiftedC1Int.getMinSignedBits() <= 64 && 1607 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 1608 return true; 1609 1610 // We can materialise `c1` in an add immediate, so it's "free", and the 1611 // combine should be prevented. 1612 if (C1Int.getMinSignedBits() <= 64 && 1613 isLegalAddImmediate(C1Int.getSExtValue())) 1614 return false; 1615 1616 // Neither constant will fit into an immediate, so find materialisation 1617 // costs. 1618 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 1619 Subtarget.is64Bit()); 1620 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 1621 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 1622 1623 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 1624 // combine should be prevented. 1625 if (C1Cost < ShiftedC1Cost) 1626 return false; 1627 } 1628 } 1629 return true; 1630 } 1631 1632 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 1633 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 1634 unsigned Depth) const { 1635 switch (Op.getOpcode()) { 1636 default: 1637 break; 1638 case RISCVISD::SLLW: 1639 case RISCVISD::SRAW: 1640 case RISCVISD::SRLW: 1641 case RISCVISD::DIVW: 1642 case RISCVISD::DIVUW: 1643 case RISCVISD::REMUW: 1644 case RISCVISD::ROLW: 1645 case RISCVISD::RORW: 1646 case RISCVISD::GREVIW: 1647 case RISCVISD::GORCIW: 1648 case RISCVISD::FSLW: 1649 case RISCVISD::FSRW: 1650 // TODO: As the result is sign-extended, this is conservatively correct. A 1651 // more precise answer could be calculated for SRAW depending on known 1652 // bits in the shift amount. 1653 return 33; 1654 } 1655 1656 return 1; 1657 } 1658 1659 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 1660 MachineBasicBlock *BB) { 1661 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 1662 1663 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 1664 // Should the count have wrapped while it was being read, we need to try 1665 // again. 1666 // ... 1667 // read: 1668 // rdcycleh x3 # load high word of cycle 1669 // rdcycle x2 # load low word of cycle 1670 // rdcycleh x4 # load high word of cycle 1671 // bne x3, x4, read # check if high word reads match, otherwise try again 1672 // ... 1673 1674 MachineFunction &MF = *BB->getParent(); 1675 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1676 MachineFunction::iterator It = ++BB->getIterator(); 1677 1678 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1679 MF.insert(It, LoopMBB); 1680 1681 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1682 MF.insert(It, DoneMBB); 1683 1684 // Transfer the remainder of BB and its successor edges to DoneMBB. 1685 DoneMBB->splice(DoneMBB->begin(), BB, 1686 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1687 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 1688 1689 BB->addSuccessor(LoopMBB); 1690 1691 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1692 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1693 Register LoReg = MI.getOperand(0).getReg(); 1694 Register HiReg = MI.getOperand(1).getReg(); 1695 DebugLoc DL = MI.getDebugLoc(); 1696 1697 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 1698 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 1699 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1700 .addReg(RISCV::X0); 1701 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 1702 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 1703 .addReg(RISCV::X0); 1704 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 1705 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 1706 .addReg(RISCV::X0); 1707 1708 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 1709 .addReg(HiReg) 1710 .addReg(ReadAgainReg) 1711 .addMBB(LoopMBB); 1712 1713 LoopMBB->addSuccessor(LoopMBB); 1714 LoopMBB->addSuccessor(DoneMBB); 1715 1716 MI.eraseFromParent(); 1717 1718 return DoneMBB; 1719 } 1720 1721 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 1722 MachineBasicBlock *BB) { 1723 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 1724 1725 MachineFunction &MF = *BB->getParent(); 1726 DebugLoc DL = MI.getDebugLoc(); 1727 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1728 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1729 Register LoReg = MI.getOperand(0).getReg(); 1730 Register HiReg = MI.getOperand(1).getReg(); 1731 Register SrcReg = MI.getOperand(2).getReg(); 1732 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 1733 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 1734 1735 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 1736 RI); 1737 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 1738 MachineMemOperand *MMOLo = 1739 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 1740 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 1741 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 1742 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 1743 .addFrameIndex(FI) 1744 .addImm(0) 1745 .addMemOperand(MMOLo); 1746 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 1747 .addFrameIndex(FI) 1748 .addImm(4) 1749 .addMemOperand(MMOHi); 1750 MI.eraseFromParent(); // The pseudo instruction is gone now. 1751 return BB; 1752 } 1753 1754 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 1755 MachineBasicBlock *BB) { 1756 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 1757 "Unexpected instruction"); 1758 1759 MachineFunction &MF = *BB->getParent(); 1760 DebugLoc DL = MI.getDebugLoc(); 1761 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1762 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 1763 Register DstReg = MI.getOperand(0).getReg(); 1764 Register LoReg = MI.getOperand(1).getReg(); 1765 Register HiReg = MI.getOperand(2).getReg(); 1766 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 1767 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 1768 1769 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 1770 MachineMemOperand *MMOLo = 1771 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 1772 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 1773 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 1774 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1775 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 1776 .addFrameIndex(FI) 1777 .addImm(0) 1778 .addMemOperand(MMOLo); 1779 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 1780 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 1781 .addFrameIndex(FI) 1782 .addImm(4) 1783 .addMemOperand(MMOHi); 1784 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 1785 MI.eraseFromParent(); // The pseudo instruction is gone now. 1786 return BB; 1787 } 1788 1789 static bool isSelectPseudo(MachineInstr &MI) { 1790 switch (MI.getOpcode()) { 1791 default: 1792 return false; 1793 case RISCV::Select_GPR_Using_CC_GPR: 1794 case RISCV::Select_FPR16_Using_CC_GPR: 1795 case RISCV::Select_FPR32_Using_CC_GPR: 1796 case RISCV::Select_FPR64_Using_CC_GPR: 1797 return true; 1798 } 1799 } 1800 1801 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 1802 MachineBasicBlock *BB) { 1803 // To "insert" Select_* instructions, we actually have to insert the triangle 1804 // control-flow pattern. The incoming instructions know the destination vreg 1805 // to set, the condition code register to branch on, the true/false values to 1806 // select between, and the condcode to use to select the appropriate branch. 1807 // 1808 // We produce the following control flow: 1809 // HeadMBB 1810 // | \ 1811 // | IfFalseMBB 1812 // | / 1813 // TailMBB 1814 // 1815 // When we find a sequence of selects we attempt to optimize their emission 1816 // by sharing the control flow. Currently we only handle cases where we have 1817 // multiple selects with the exact same condition (same LHS, RHS and CC). 1818 // The selects may be interleaved with other instructions if the other 1819 // instructions meet some requirements we deem safe: 1820 // - They are debug instructions. Otherwise, 1821 // - They do not have side-effects, do not access memory and their inputs do 1822 // not depend on the results of the select pseudo-instructions. 1823 // The TrueV/FalseV operands of the selects cannot depend on the result of 1824 // previous selects in the sequence. 1825 // These conditions could be further relaxed. See the X86 target for a 1826 // related approach and more information. 1827 Register LHS = MI.getOperand(1).getReg(); 1828 Register RHS = MI.getOperand(2).getReg(); 1829 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 1830 1831 SmallVector<MachineInstr *, 4> SelectDebugValues; 1832 SmallSet<Register, 4> SelectDests; 1833 SelectDests.insert(MI.getOperand(0).getReg()); 1834 1835 MachineInstr *LastSelectPseudo = &MI; 1836 1837 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 1838 SequenceMBBI != E; ++SequenceMBBI) { 1839 if (SequenceMBBI->isDebugInstr()) 1840 continue; 1841 else if (isSelectPseudo(*SequenceMBBI)) { 1842 if (SequenceMBBI->getOperand(1).getReg() != LHS || 1843 SequenceMBBI->getOperand(2).getReg() != RHS || 1844 SequenceMBBI->getOperand(3).getImm() != CC || 1845 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 1846 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 1847 break; 1848 LastSelectPseudo = &*SequenceMBBI; 1849 SequenceMBBI->collectDebugValues(SelectDebugValues); 1850 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 1851 } else { 1852 if (SequenceMBBI->hasUnmodeledSideEffects() || 1853 SequenceMBBI->mayLoadOrStore()) 1854 break; 1855 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 1856 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 1857 })) 1858 break; 1859 } 1860 } 1861 1862 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 1863 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1864 DebugLoc DL = MI.getDebugLoc(); 1865 MachineFunction::iterator I = ++BB->getIterator(); 1866 1867 MachineBasicBlock *HeadMBB = BB; 1868 MachineFunction *F = BB->getParent(); 1869 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 1870 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 1871 1872 F->insert(I, IfFalseMBB); 1873 F->insert(I, TailMBB); 1874 1875 // Transfer debug instructions associated with the selects to TailMBB. 1876 for (MachineInstr *DebugInstr : SelectDebugValues) { 1877 TailMBB->push_back(DebugInstr->removeFromParent()); 1878 } 1879 1880 // Move all instructions after the sequence to TailMBB. 1881 TailMBB->splice(TailMBB->end(), HeadMBB, 1882 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 1883 // Update machine-CFG edges by transferring all successors of the current 1884 // block to the new block which will contain the Phi nodes for the selects. 1885 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 1886 // Set the successors for HeadMBB. 1887 HeadMBB->addSuccessor(IfFalseMBB); 1888 HeadMBB->addSuccessor(TailMBB); 1889 1890 // Insert appropriate branch. 1891 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 1892 1893 BuildMI(HeadMBB, DL, TII.get(Opcode)) 1894 .addReg(LHS) 1895 .addReg(RHS) 1896 .addMBB(TailMBB); 1897 1898 // IfFalseMBB just falls through to TailMBB. 1899 IfFalseMBB->addSuccessor(TailMBB); 1900 1901 // Create PHIs for all of the select pseudo-instructions. 1902 auto SelectMBBI = MI.getIterator(); 1903 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 1904 auto InsertionPoint = TailMBB->begin(); 1905 while (SelectMBBI != SelectEnd) { 1906 auto Next = std::next(SelectMBBI); 1907 if (isSelectPseudo(*SelectMBBI)) { 1908 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 1909 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 1910 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 1911 .addReg(SelectMBBI->getOperand(4).getReg()) 1912 .addMBB(HeadMBB) 1913 .addReg(SelectMBBI->getOperand(5).getReg()) 1914 .addMBB(IfFalseMBB); 1915 SelectMBBI->eraseFromParent(); 1916 } 1917 SelectMBBI = Next; 1918 } 1919 1920 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 1921 return TailMBB; 1922 } 1923 1924 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, 1925 int VLIndex, unsigned SEWIndex, 1926 unsigned VLMul) { 1927 MachineFunction &MF = *BB->getParent(); 1928 DebugLoc DL = MI.getDebugLoc(); 1929 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 1930 1931 unsigned SEW = MI.getOperand(SEWIndex).getImm(); 1932 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); 1933 RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8)); 1934 1935 // LMUL should already be encoded correctly. 1936 RISCVVLMUL Multiplier = static_cast<RISCVVLMUL>(VLMul); 1937 1938 MachineRegisterInfo &MRI = MF.getRegInfo(); 1939 1940 // VL and VTYPE are alive here. 1941 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)); 1942 1943 if (VLIndex >= 0) { 1944 // Set VL (rs1 != X0). 1945 Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 1946 MIB.addReg(DestReg, RegState::Define | RegState::Dead) 1947 .addReg(MI.getOperand(VLIndex).getReg()); 1948 } else 1949 // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0). 1950 MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead) 1951 .addReg(RISCV::X0, RegState::Kill); 1952 1953 // For simplicity we reuse the vtype representation here. 1954 MIB.addImm(RISCVVType::encodeVTYPE(Multiplier, ElementWidth, 1955 /*TailAgnostic*/ true, 1956 /*MaskAgnostic*/ false)); 1957 1958 // Remove (now) redundant operands from pseudo 1959 MI.getOperand(SEWIndex).setImm(-1); 1960 if (VLIndex >= 0) { 1961 MI.getOperand(VLIndex).setReg(RISCV::NoRegister); 1962 MI.getOperand(VLIndex).setIsKill(false); 1963 } 1964 1965 return BB; 1966 } 1967 1968 MachineBasicBlock * 1969 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1970 MachineBasicBlock *BB) const { 1971 1972 if (const RISCVVPseudosTable::PseudoInfo *RVV = 1973 RISCVVPseudosTable::getPseudoInfo(MI.getOpcode())) { 1974 int VLIndex = RVV->getVLIndex(); 1975 int SEWIndex = RVV->getSEWIndex(); 1976 1977 assert(SEWIndex >= 0 && "SEWIndex must be >= 0"); 1978 return addVSetVL(MI, BB, VLIndex, SEWIndex, RVV->VLMul); 1979 } 1980 1981 switch (MI.getOpcode()) { 1982 default: 1983 llvm_unreachable("Unexpected instr type to insert"); 1984 case RISCV::ReadCycleWide: 1985 assert(!Subtarget.is64Bit() && 1986 "ReadCycleWrite is only to be used on riscv32"); 1987 return emitReadCycleWidePseudo(MI, BB); 1988 case RISCV::Select_GPR_Using_CC_GPR: 1989 case RISCV::Select_FPR16_Using_CC_GPR: 1990 case RISCV::Select_FPR32_Using_CC_GPR: 1991 case RISCV::Select_FPR64_Using_CC_GPR: 1992 return emitSelectPseudo(MI, BB); 1993 case RISCV::BuildPairF64Pseudo: 1994 return emitBuildPairF64Pseudo(MI, BB); 1995 case RISCV::SplitF64Pseudo: 1996 return emitSplitF64Pseudo(MI, BB); 1997 } 1998 } 1999 2000 // Calling Convention Implementation. 2001 // The expectations for frontend ABI lowering vary from target to target. 2002 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 2003 // details, but this is a longer term goal. For now, we simply try to keep the 2004 // role of the frontend as simple and well-defined as possible. The rules can 2005 // be summarised as: 2006 // * Never split up large scalar arguments. We handle them here. 2007 // * If a hardfloat calling convention is being used, and the struct may be 2008 // passed in a pair of registers (fp+fp, int+fp), and both registers are 2009 // available, then pass as two separate arguments. If either the GPRs or FPRs 2010 // are exhausted, then pass according to the rule below. 2011 // * If a struct could never be passed in registers or directly in a stack 2012 // slot (as it is larger than 2*XLEN and the floating point rules don't 2013 // apply), then pass it using a pointer with the byval attribute. 2014 // * If a struct is less than 2*XLEN, then coerce to either a two-element 2015 // word-sized array or a 2*XLEN scalar (depending on alignment). 2016 // * The frontend can determine whether a struct is returned by reference or 2017 // not based on its size and fields. If it will be returned by reference, the 2018 // frontend must modify the prototype so a pointer with the sret annotation is 2019 // passed as the first argument. This is not necessary for large scalar 2020 // returns. 2021 // * Struct return values and varargs should be coerced to structs containing 2022 // register-size fields in the same situations they would be for fixed 2023 // arguments. 2024 2025 static const MCPhysReg ArgGPRs[] = { 2026 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 2027 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 2028 }; 2029 static const MCPhysReg ArgFPR16s[] = { 2030 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 2031 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 2032 }; 2033 static const MCPhysReg ArgFPR32s[] = { 2034 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 2035 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 2036 }; 2037 static const MCPhysReg ArgFPR64s[] = { 2038 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 2039 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 2040 }; 2041 2042 // Pass a 2*XLEN argument that has been split into two XLEN values through 2043 // registers or the stack as necessary. 2044 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 2045 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 2046 MVT ValVT2, MVT LocVT2, 2047 ISD::ArgFlagsTy ArgFlags2) { 2048 unsigned XLenInBytes = XLen / 8; 2049 if (Register Reg = State.AllocateReg(ArgGPRs)) { 2050 // At least one half can be passed via register. 2051 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 2052 VA1.getLocVT(), CCValAssign::Full)); 2053 } else { 2054 // Both halves must be passed on the stack, with proper alignment. 2055 Align StackAlign = 2056 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 2057 State.addLoc( 2058 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 2059 State.AllocateStack(XLenInBytes, StackAlign), 2060 VA1.getLocVT(), CCValAssign::Full)); 2061 State.addLoc(CCValAssign::getMem( 2062 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 2063 LocVT2, CCValAssign::Full)); 2064 return false; 2065 } 2066 2067 if (Register Reg = State.AllocateReg(ArgGPRs)) { 2068 // The second half can also be passed via register. 2069 State.addLoc( 2070 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 2071 } else { 2072 // The second half is passed via the stack, without additional alignment. 2073 State.addLoc(CCValAssign::getMem( 2074 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 2075 LocVT2, CCValAssign::Full)); 2076 } 2077 2078 return false; 2079 } 2080 2081 // Implements the RISC-V calling convention. Returns true upon failure. 2082 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 2083 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 2084 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 2085 bool IsRet, Type *OrigTy) { 2086 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 2087 assert(XLen == 32 || XLen == 64); 2088 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 2089 2090 // Any return value split in to more than two values can't be returned 2091 // directly. 2092 if (IsRet && ValNo > 1) 2093 return true; 2094 2095 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 2096 // variadic argument, or if no F16/F32 argument registers are available. 2097 bool UseGPRForF16_F32 = true; 2098 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 2099 // variadic argument, or if no F64 argument registers are available. 2100 bool UseGPRForF64 = true; 2101 2102 switch (ABI) { 2103 default: 2104 llvm_unreachable("Unexpected ABI"); 2105 case RISCVABI::ABI_ILP32: 2106 case RISCVABI::ABI_LP64: 2107 break; 2108 case RISCVABI::ABI_ILP32F: 2109 case RISCVABI::ABI_LP64F: 2110 UseGPRForF16_F32 = !IsFixed; 2111 break; 2112 case RISCVABI::ABI_ILP32D: 2113 case RISCVABI::ABI_LP64D: 2114 UseGPRForF16_F32 = !IsFixed; 2115 UseGPRForF64 = !IsFixed; 2116 break; 2117 } 2118 2119 // FPR16, FPR32, and FPR64 alias each other. 2120 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 2121 UseGPRForF16_F32 = true; 2122 UseGPRForF64 = true; 2123 } 2124 2125 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 2126 // similar local variables rather than directly checking against the target 2127 // ABI. 2128 2129 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 2130 LocVT = XLenVT; 2131 LocInfo = CCValAssign::BCvt; 2132 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 2133 LocVT = MVT::i64; 2134 LocInfo = CCValAssign::BCvt; 2135 } 2136 2137 // If this is a variadic argument, the RISC-V calling convention requires 2138 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 2139 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 2140 // be used regardless of whether the original argument was split during 2141 // legalisation or not. The argument will not be passed by registers if the 2142 // original type is larger than 2*XLEN, so the register alignment rule does 2143 // not apply. 2144 unsigned TwoXLenInBytes = (2 * XLen) / 8; 2145 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 2146 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 2147 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 2148 // Skip 'odd' register if necessary. 2149 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 2150 State.AllocateReg(ArgGPRs); 2151 } 2152 2153 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 2154 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 2155 State.getPendingArgFlags(); 2156 2157 assert(PendingLocs.size() == PendingArgFlags.size() && 2158 "PendingLocs and PendingArgFlags out of sync"); 2159 2160 // Handle passing f64 on RV32D with a soft float ABI or when floating point 2161 // registers are exhausted. 2162 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 2163 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 2164 "Can't lower f64 if it is split"); 2165 // Depending on available argument GPRS, f64 may be passed in a pair of 2166 // GPRs, split between a GPR and the stack, or passed completely on the 2167 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 2168 // cases. 2169 Register Reg = State.AllocateReg(ArgGPRs); 2170 LocVT = MVT::i32; 2171 if (!Reg) { 2172 unsigned StackOffset = State.AllocateStack(8, Align(8)); 2173 State.addLoc( 2174 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 2175 return false; 2176 } 2177 if (!State.AllocateReg(ArgGPRs)) 2178 State.AllocateStack(4, Align(4)); 2179 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2180 return false; 2181 } 2182 2183 // Split arguments might be passed indirectly, so keep track of the pending 2184 // values. 2185 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 2186 LocVT = XLenVT; 2187 LocInfo = CCValAssign::Indirect; 2188 PendingLocs.push_back( 2189 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 2190 PendingArgFlags.push_back(ArgFlags); 2191 if (!ArgFlags.isSplitEnd()) { 2192 return false; 2193 } 2194 } 2195 2196 // If the split argument only had two elements, it should be passed directly 2197 // in registers or on the stack. 2198 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 2199 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 2200 // Apply the normal calling convention rules to the first half of the 2201 // split argument. 2202 CCValAssign VA = PendingLocs[0]; 2203 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 2204 PendingLocs.clear(); 2205 PendingArgFlags.clear(); 2206 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 2207 ArgFlags); 2208 } 2209 2210 // Allocate to a register if possible, or else a stack slot. 2211 Register Reg; 2212 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 2213 Reg = State.AllocateReg(ArgFPR16s); 2214 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 2215 Reg = State.AllocateReg(ArgFPR32s); 2216 else if (ValVT == MVT::f64 && !UseGPRForF64) 2217 Reg = State.AllocateReg(ArgFPR64s); 2218 else 2219 Reg = State.AllocateReg(ArgGPRs); 2220 unsigned StackOffset = 2221 Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); 2222 2223 // If we reach this point and PendingLocs is non-empty, we must be at the 2224 // end of a split argument that must be passed indirectly. 2225 if (!PendingLocs.empty()) { 2226 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 2227 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 2228 2229 for (auto &It : PendingLocs) { 2230 if (Reg) 2231 It.convertToReg(Reg); 2232 else 2233 It.convertToMem(StackOffset); 2234 State.addLoc(It); 2235 } 2236 PendingLocs.clear(); 2237 PendingArgFlags.clear(); 2238 return false; 2239 } 2240 2241 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT) && 2242 "Expected an XLenVT at this stage"); 2243 2244 if (Reg) { 2245 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2246 return false; 2247 } 2248 2249 // When a floating-point value is passed on the stack, no bit-conversion is 2250 // needed. 2251 if (ValVT.isFloatingPoint()) { 2252 LocVT = ValVT; 2253 LocInfo = CCValAssign::Full; 2254 } 2255 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 2256 return false; 2257 } 2258 2259 void RISCVTargetLowering::analyzeInputArgs( 2260 MachineFunction &MF, CCState &CCInfo, 2261 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 2262 unsigned NumArgs = Ins.size(); 2263 FunctionType *FType = MF.getFunction().getFunctionType(); 2264 2265 for (unsigned i = 0; i != NumArgs; ++i) { 2266 MVT ArgVT = Ins[i].VT; 2267 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 2268 2269 Type *ArgTy = nullptr; 2270 if (IsRet) 2271 ArgTy = FType->getReturnType(); 2272 else if (Ins[i].isOrigArg()) 2273 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 2274 2275 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2276 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 2277 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) { 2278 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 2279 << EVT(ArgVT).getEVTString() << '\n'); 2280 llvm_unreachable(nullptr); 2281 } 2282 } 2283 } 2284 2285 void RISCVTargetLowering::analyzeOutputArgs( 2286 MachineFunction &MF, CCState &CCInfo, 2287 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 2288 CallLoweringInfo *CLI) const { 2289 unsigned NumArgs = Outs.size(); 2290 2291 for (unsigned i = 0; i != NumArgs; i++) { 2292 MVT ArgVT = Outs[i].VT; 2293 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2294 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 2295 2296 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2297 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 2298 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) { 2299 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 2300 << EVT(ArgVT).getEVTString() << "\n"); 2301 llvm_unreachable(nullptr); 2302 } 2303 } 2304 } 2305 2306 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 2307 // values. 2308 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 2309 const CCValAssign &VA, const SDLoc &DL) { 2310 switch (VA.getLocInfo()) { 2311 default: 2312 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 2313 case CCValAssign::Full: 2314 break; 2315 case CCValAssign::BCvt: 2316 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 2317 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 2318 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 2319 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 2320 else 2321 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2322 break; 2323 } 2324 return Val; 2325 } 2326 2327 // The caller is responsible for loading the full value if the argument is 2328 // passed with CCValAssign::Indirect. 2329 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 2330 const CCValAssign &VA, const SDLoc &DL) { 2331 MachineFunction &MF = DAG.getMachineFunction(); 2332 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 2333 EVT LocVT = VA.getLocVT(); 2334 SDValue Val; 2335 const TargetRegisterClass *RC; 2336 2337 switch (LocVT.getSimpleVT().SimpleTy) { 2338 default: 2339 llvm_unreachable("Unexpected register type"); 2340 case MVT::i32: 2341 case MVT::i64: 2342 RC = &RISCV::GPRRegClass; 2343 break; 2344 case MVT::f16: 2345 RC = &RISCV::FPR16RegClass; 2346 break; 2347 case MVT::f32: 2348 RC = &RISCV::FPR32RegClass; 2349 break; 2350 case MVT::f64: 2351 RC = &RISCV::FPR64RegClass; 2352 break; 2353 } 2354 2355 Register VReg = RegInfo.createVirtualRegister(RC); 2356 RegInfo.addLiveIn(VA.getLocReg(), VReg); 2357 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 2358 2359 if (VA.getLocInfo() == CCValAssign::Indirect) 2360 return Val; 2361 2362 return convertLocVTToValVT(DAG, Val, VA, DL); 2363 } 2364 2365 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 2366 const CCValAssign &VA, const SDLoc &DL) { 2367 EVT LocVT = VA.getLocVT(); 2368 2369 switch (VA.getLocInfo()) { 2370 default: 2371 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 2372 case CCValAssign::Full: 2373 break; 2374 case CCValAssign::BCvt: 2375 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 2376 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 2377 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 2378 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 2379 else 2380 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 2381 break; 2382 } 2383 return Val; 2384 } 2385 2386 // The caller is responsible for loading the full value if the argument is 2387 // passed with CCValAssign::Indirect. 2388 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 2389 const CCValAssign &VA, const SDLoc &DL) { 2390 MachineFunction &MF = DAG.getMachineFunction(); 2391 MachineFrameInfo &MFI = MF.getFrameInfo(); 2392 EVT LocVT = VA.getLocVT(); 2393 EVT ValVT = VA.getValVT(); 2394 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 2395 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 2396 VA.getLocMemOffset(), /*Immutable=*/true); 2397 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2398 SDValue Val; 2399 2400 ISD::LoadExtType ExtType; 2401 switch (VA.getLocInfo()) { 2402 default: 2403 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 2404 case CCValAssign::Full: 2405 case CCValAssign::Indirect: 2406 case CCValAssign::BCvt: 2407 ExtType = ISD::NON_EXTLOAD; 2408 break; 2409 } 2410 Val = DAG.getExtLoad( 2411 ExtType, DL, LocVT, Chain, FIN, 2412 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 2413 return Val; 2414 } 2415 2416 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 2417 const CCValAssign &VA, const SDLoc &DL) { 2418 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 2419 "Unexpected VA"); 2420 MachineFunction &MF = DAG.getMachineFunction(); 2421 MachineFrameInfo &MFI = MF.getFrameInfo(); 2422 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 2423 2424 if (VA.isMemLoc()) { 2425 // f64 is passed on the stack. 2426 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 2427 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 2428 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 2429 MachinePointerInfo::getFixedStack(MF, FI)); 2430 } 2431 2432 assert(VA.isRegLoc() && "Expected register VA assignment"); 2433 2434 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 2435 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 2436 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 2437 SDValue Hi; 2438 if (VA.getLocReg() == RISCV::X17) { 2439 // Second half of f64 is passed on the stack. 2440 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 2441 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 2442 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 2443 MachinePointerInfo::getFixedStack(MF, FI)); 2444 } else { 2445 // Second half of f64 is passed in another GPR. 2446 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 2447 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 2448 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 2449 } 2450 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 2451 } 2452 2453 // FastCC has less than 1% performance improvement for some particular 2454 // benchmark. But theoretically, it may has benenfit for some cases. 2455 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 2456 CCValAssign::LocInfo LocInfo, 2457 ISD::ArgFlagsTy ArgFlags, CCState &State) { 2458 2459 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 2460 // X5 and X6 might be used for save-restore libcall. 2461 static const MCPhysReg GPRList[] = { 2462 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 2463 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 2464 RISCV::X29, RISCV::X30, RISCV::X31}; 2465 if (unsigned Reg = State.AllocateReg(GPRList)) { 2466 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2467 return false; 2468 } 2469 } 2470 2471 if (LocVT == MVT::f16) { 2472 static const MCPhysReg FPR16List[] = { 2473 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 2474 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 2475 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 2476 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 2477 if (unsigned Reg = State.AllocateReg(FPR16List)) { 2478 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2479 return false; 2480 } 2481 } 2482 2483 if (LocVT == MVT::f32) { 2484 static const MCPhysReg FPR32List[] = { 2485 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 2486 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 2487 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 2488 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 2489 if (unsigned Reg = State.AllocateReg(FPR32List)) { 2490 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2491 return false; 2492 } 2493 } 2494 2495 if (LocVT == MVT::f64) { 2496 static const MCPhysReg FPR64List[] = { 2497 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 2498 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 2499 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 2500 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 2501 if (unsigned Reg = State.AllocateReg(FPR64List)) { 2502 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2503 return false; 2504 } 2505 } 2506 2507 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 2508 unsigned Offset4 = State.AllocateStack(4, Align(4)); 2509 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 2510 return false; 2511 } 2512 2513 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 2514 unsigned Offset5 = State.AllocateStack(8, Align(8)); 2515 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 2516 return false; 2517 } 2518 2519 return true; // CC didn't match. 2520 } 2521 2522 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 2523 CCValAssign::LocInfo LocInfo, 2524 ISD::ArgFlagsTy ArgFlags, CCState &State) { 2525 2526 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 2527 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 2528 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 2529 static const MCPhysReg GPRList[] = { 2530 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 2531 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 2532 if (unsigned Reg = State.AllocateReg(GPRList)) { 2533 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2534 return false; 2535 } 2536 } 2537 2538 if (LocVT == MVT::f32) { 2539 // Pass in STG registers: F1, ..., F6 2540 // fs0 ... fs5 2541 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 2542 RISCV::F18_F, RISCV::F19_F, 2543 RISCV::F20_F, RISCV::F21_F}; 2544 if (unsigned Reg = State.AllocateReg(FPR32List)) { 2545 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2546 return false; 2547 } 2548 } 2549 2550 if (LocVT == MVT::f64) { 2551 // Pass in STG registers: D1, ..., D6 2552 // fs6 ... fs11 2553 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 2554 RISCV::F24_D, RISCV::F25_D, 2555 RISCV::F26_D, RISCV::F27_D}; 2556 if (unsigned Reg = State.AllocateReg(FPR64List)) { 2557 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2558 return false; 2559 } 2560 } 2561 2562 report_fatal_error("No registers left in GHC calling convention"); 2563 return true; 2564 } 2565 2566 // Transform physical registers into virtual registers. 2567 SDValue RISCVTargetLowering::LowerFormalArguments( 2568 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 2569 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2570 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2571 2572 MachineFunction &MF = DAG.getMachineFunction(); 2573 2574 switch (CallConv) { 2575 default: 2576 report_fatal_error("Unsupported calling convention"); 2577 case CallingConv::C: 2578 case CallingConv::Fast: 2579 break; 2580 case CallingConv::GHC: 2581 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 2582 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 2583 report_fatal_error( 2584 "GHC calling convention requires the F and D instruction set extensions"); 2585 } 2586 2587 const Function &Func = MF.getFunction(); 2588 if (Func.hasFnAttribute("interrupt")) { 2589 if (!Func.arg_empty()) 2590 report_fatal_error( 2591 "Functions with the interrupt attribute cannot have arguments!"); 2592 2593 StringRef Kind = 2594 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 2595 2596 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 2597 report_fatal_error( 2598 "Function interrupt attribute argument not supported!"); 2599 } 2600 2601 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2602 MVT XLenVT = Subtarget.getXLenVT(); 2603 unsigned XLenInBytes = Subtarget.getXLen() / 8; 2604 // Used with vargs to acumulate store chains. 2605 std::vector<SDValue> OutChains; 2606 2607 // Assign locations to all of the incoming arguments. 2608 SmallVector<CCValAssign, 16> ArgLocs; 2609 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2610 2611 if (CallConv == CallingConv::Fast) 2612 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 2613 else if (CallConv == CallingConv::GHC) 2614 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 2615 else 2616 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 2617 2618 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2619 CCValAssign &VA = ArgLocs[i]; 2620 SDValue ArgValue; 2621 // Passing f64 on RV32D with a soft float ABI must be handled as a special 2622 // case. 2623 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 2624 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 2625 else if (VA.isRegLoc()) 2626 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL); 2627 else 2628 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 2629 2630 if (VA.getLocInfo() == CCValAssign::Indirect) { 2631 // If the original argument was split and passed by reference (e.g. i128 2632 // on RV32), we need to load all parts of it here (using the same 2633 // address). 2634 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 2635 MachinePointerInfo())); 2636 unsigned ArgIndex = Ins[i].OrigArgIndex; 2637 assert(Ins[i].PartOffset == 0); 2638 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 2639 CCValAssign &PartVA = ArgLocs[i + 1]; 2640 unsigned PartOffset = Ins[i + 1].PartOffset; 2641 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 2642 DAG.getIntPtrConstant(PartOffset, DL)); 2643 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 2644 MachinePointerInfo())); 2645 ++i; 2646 } 2647 continue; 2648 } 2649 InVals.push_back(ArgValue); 2650 } 2651 2652 if (IsVarArg) { 2653 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 2654 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 2655 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 2656 MachineFrameInfo &MFI = MF.getFrameInfo(); 2657 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 2658 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 2659 2660 // Offset of the first variable argument from stack pointer, and size of 2661 // the vararg save area. For now, the varargs save area is either zero or 2662 // large enough to hold a0-a7. 2663 int VaArgOffset, VarArgsSaveSize; 2664 2665 // If all registers are allocated, then all varargs must be passed on the 2666 // stack and we don't need to save any argregs. 2667 if (ArgRegs.size() == Idx) { 2668 VaArgOffset = CCInfo.getNextStackOffset(); 2669 VarArgsSaveSize = 0; 2670 } else { 2671 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 2672 VaArgOffset = -VarArgsSaveSize; 2673 } 2674 2675 // Record the frame index of the first variable argument 2676 // which is a value necessary to VASTART. 2677 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 2678 RVFI->setVarArgsFrameIndex(FI); 2679 2680 // If saving an odd number of registers then create an extra stack slot to 2681 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 2682 // offsets to even-numbered registered remain 2*XLEN-aligned. 2683 if (Idx % 2) { 2684 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 2685 VarArgsSaveSize += XLenInBytes; 2686 } 2687 2688 // Copy the integer registers that may have been used for passing varargs 2689 // to the vararg save area. 2690 for (unsigned I = Idx; I < ArgRegs.size(); 2691 ++I, VaArgOffset += XLenInBytes) { 2692 const Register Reg = RegInfo.createVirtualRegister(RC); 2693 RegInfo.addLiveIn(ArgRegs[I], Reg); 2694 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 2695 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 2696 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2697 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 2698 MachinePointerInfo::getFixedStack(MF, FI)); 2699 cast<StoreSDNode>(Store.getNode()) 2700 ->getMemOperand() 2701 ->setValue((Value *)nullptr); 2702 OutChains.push_back(Store); 2703 } 2704 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 2705 } 2706 2707 // All stores are grouped in one node to allow the matching between 2708 // the size of Ins and InVals. This only happens for vararg functions. 2709 if (!OutChains.empty()) { 2710 OutChains.push_back(Chain); 2711 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 2712 } 2713 2714 return Chain; 2715 } 2716 2717 /// isEligibleForTailCallOptimization - Check whether the call is eligible 2718 /// for tail call optimization. 2719 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 2720 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 2721 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 2722 const SmallVector<CCValAssign, 16> &ArgLocs) const { 2723 2724 auto &Callee = CLI.Callee; 2725 auto CalleeCC = CLI.CallConv; 2726 auto &Outs = CLI.Outs; 2727 auto &Caller = MF.getFunction(); 2728 auto CallerCC = Caller.getCallingConv(); 2729 2730 // Exception-handling functions need a special set of instructions to 2731 // indicate a return to the hardware. Tail-calling another function would 2732 // probably break this. 2733 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 2734 // should be expanded as new function attributes are introduced. 2735 if (Caller.hasFnAttribute("interrupt")) 2736 return false; 2737 2738 // Do not tail call opt if the stack is used to pass parameters. 2739 if (CCInfo.getNextStackOffset() != 0) 2740 return false; 2741 2742 // Do not tail call opt if any parameters need to be passed indirectly. 2743 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 2744 // passed indirectly. So the address of the value will be passed in a 2745 // register, or if not available, then the address is put on the stack. In 2746 // order to pass indirectly, space on the stack often needs to be allocated 2747 // in order to store the value. In this case the CCInfo.getNextStackOffset() 2748 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 2749 // are passed CCValAssign::Indirect. 2750 for (auto &VA : ArgLocs) 2751 if (VA.getLocInfo() == CCValAssign::Indirect) 2752 return false; 2753 2754 // Do not tail call opt if either caller or callee uses struct return 2755 // semantics. 2756 auto IsCallerStructRet = Caller.hasStructRetAttr(); 2757 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 2758 if (IsCallerStructRet || IsCalleeStructRet) 2759 return false; 2760 2761 // Externally-defined functions with weak linkage should not be 2762 // tail-called. The behaviour of branch instructions in this situation (as 2763 // used for tail calls) is implementation-defined, so we cannot rely on the 2764 // linker replacing the tail call with a return. 2765 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 2766 const GlobalValue *GV = G->getGlobal(); 2767 if (GV->hasExternalWeakLinkage()) 2768 return false; 2769 } 2770 2771 // The callee has to preserve all registers the caller needs to preserve. 2772 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2773 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 2774 if (CalleeCC != CallerCC) { 2775 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 2776 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 2777 return false; 2778 } 2779 2780 // Byval parameters hand the function a pointer directly into the stack area 2781 // we want to reuse during a tail call. Working around this *is* possible 2782 // but less efficient and uglier in LowerCall. 2783 for (auto &Arg : Outs) 2784 if (Arg.Flags.isByVal()) 2785 return false; 2786 2787 return true; 2788 } 2789 2790 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 2791 // and output parameter nodes. 2792 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 2793 SmallVectorImpl<SDValue> &InVals) const { 2794 SelectionDAG &DAG = CLI.DAG; 2795 SDLoc &DL = CLI.DL; 2796 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 2797 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 2798 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 2799 SDValue Chain = CLI.Chain; 2800 SDValue Callee = CLI.Callee; 2801 bool &IsTailCall = CLI.IsTailCall; 2802 CallingConv::ID CallConv = CLI.CallConv; 2803 bool IsVarArg = CLI.IsVarArg; 2804 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2805 MVT XLenVT = Subtarget.getXLenVT(); 2806 2807 MachineFunction &MF = DAG.getMachineFunction(); 2808 2809 // Analyze the operands of the call, assigning locations to each operand. 2810 SmallVector<CCValAssign, 16> ArgLocs; 2811 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2812 2813 if (CallConv == CallingConv::Fast) 2814 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 2815 else if (CallConv == CallingConv::GHC) 2816 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 2817 else 2818 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 2819 2820 // Check if it's really possible to do a tail call. 2821 if (IsTailCall) 2822 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 2823 2824 if (IsTailCall) 2825 ++NumTailCalls; 2826 else if (CLI.CB && CLI.CB->isMustTailCall()) 2827 report_fatal_error("failed to perform tail call elimination on a call " 2828 "site marked musttail"); 2829 2830 // Get a count of how many bytes are to be pushed on the stack. 2831 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 2832 2833 // Create local copies for byval args 2834 SmallVector<SDValue, 8> ByValArgs; 2835 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 2836 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2837 if (!Flags.isByVal()) 2838 continue; 2839 2840 SDValue Arg = OutVals[i]; 2841 unsigned Size = Flags.getByValSize(); 2842 Align Alignment = Flags.getNonZeroByValAlign(); 2843 2844 int FI = 2845 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 2846 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 2847 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 2848 2849 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 2850 /*IsVolatile=*/false, 2851 /*AlwaysInline=*/false, IsTailCall, 2852 MachinePointerInfo(), MachinePointerInfo()); 2853 ByValArgs.push_back(FIPtr); 2854 } 2855 2856 if (!IsTailCall) 2857 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 2858 2859 // Copy argument values to their designated locations. 2860 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 2861 SmallVector<SDValue, 8> MemOpChains; 2862 SDValue StackPtr; 2863 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 2864 CCValAssign &VA = ArgLocs[i]; 2865 SDValue ArgValue = OutVals[i]; 2866 ISD::ArgFlagsTy Flags = Outs[i].Flags; 2867 2868 // Handle passing f64 on RV32D with a soft float ABI as a special case. 2869 bool IsF64OnRV32DSoftABI = 2870 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 2871 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 2872 SDValue SplitF64 = DAG.getNode( 2873 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 2874 SDValue Lo = SplitF64.getValue(0); 2875 SDValue Hi = SplitF64.getValue(1); 2876 2877 Register RegLo = VA.getLocReg(); 2878 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 2879 2880 if (RegLo == RISCV::X17) { 2881 // Second half of f64 is passed on the stack. 2882 // Work out the address of the stack slot. 2883 if (!StackPtr.getNode()) 2884 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2885 // Emit the store. 2886 MemOpChains.push_back( 2887 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 2888 } else { 2889 // Second half of f64 is passed in another GPR. 2890 assert(RegLo < RISCV::X31 && "Invalid register pair"); 2891 Register RegHigh = RegLo + 1; 2892 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 2893 } 2894 continue; 2895 } 2896 2897 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 2898 // as any other MemLoc. 2899 2900 // Promote the value if needed. 2901 // For now, only handle fully promoted and indirect arguments. 2902 if (VA.getLocInfo() == CCValAssign::Indirect) { 2903 // Store the argument in a stack slot and pass its address. 2904 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 2905 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 2906 MemOpChains.push_back( 2907 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 2908 MachinePointerInfo::getFixedStack(MF, FI))); 2909 // If the original argument was split (e.g. i128), we need 2910 // to store all parts of it here (and pass just one address). 2911 unsigned ArgIndex = Outs[i].OrigArgIndex; 2912 assert(Outs[i].PartOffset == 0); 2913 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 2914 SDValue PartValue = OutVals[i + 1]; 2915 unsigned PartOffset = Outs[i + 1].PartOffset; 2916 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 2917 DAG.getIntPtrConstant(PartOffset, DL)); 2918 MemOpChains.push_back( 2919 DAG.getStore(Chain, DL, PartValue, Address, 2920 MachinePointerInfo::getFixedStack(MF, FI))); 2921 ++i; 2922 } 2923 ArgValue = SpillSlot; 2924 } else { 2925 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 2926 } 2927 2928 // Use local copy if it is a byval arg. 2929 if (Flags.isByVal()) 2930 ArgValue = ByValArgs[j++]; 2931 2932 if (VA.isRegLoc()) { 2933 // Queue up the argument copies and emit them at the end. 2934 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 2935 } else { 2936 assert(VA.isMemLoc() && "Argument not register or memory"); 2937 assert(!IsTailCall && "Tail call not allowed if stack is used " 2938 "for passing parameters"); 2939 2940 // Work out the address of the stack slot. 2941 if (!StackPtr.getNode()) 2942 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 2943 SDValue Address = 2944 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 2945 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 2946 2947 // Emit the store. 2948 MemOpChains.push_back( 2949 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 2950 } 2951 } 2952 2953 // Join the stores, which are independent of one another. 2954 if (!MemOpChains.empty()) 2955 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 2956 2957 SDValue Glue; 2958 2959 // Build a sequence of copy-to-reg nodes, chained and glued together. 2960 for (auto &Reg : RegsToPass) { 2961 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 2962 Glue = Chain.getValue(1); 2963 } 2964 2965 // Validate that none of the argument registers have been marked as 2966 // reserved, if so report an error. Do the same for the return address if this 2967 // is not a tailcall. 2968 validateCCReservedRegs(RegsToPass, MF); 2969 if (!IsTailCall && 2970 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 2971 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 2972 MF.getFunction(), 2973 "Return address register required, but has been reserved."}); 2974 2975 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 2976 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 2977 // split it and then direct call can be matched by PseudoCALL. 2978 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 2979 const GlobalValue *GV = S->getGlobal(); 2980 2981 unsigned OpFlags = RISCVII::MO_CALL; 2982 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 2983 OpFlags = RISCVII::MO_PLT; 2984 2985 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 2986 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 2987 unsigned OpFlags = RISCVII::MO_CALL; 2988 2989 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 2990 nullptr)) 2991 OpFlags = RISCVII::MO_PLT; 2992 2993 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 2994 } 2995 2996 // The first call operand is the chain and the second is the target address. 2997 SmallVector<SDValue, 8> Ops; 2998 Ops.push_back(Chain); 2999 Ops.push_back(Callee); 3000 3001 // Add argument registers to the end of the list so that they are 3002 // known live into the call. 3003 for (auto &Reg : RegsToPass) 3004 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 3005 3006 if (!IsTailCall) { 3007 // Add a register mask operand representing the call-preserved registers. 3008 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3009 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 3010 assert(Mask && "Missing call preserved mask for calling convention"); 3011 Ops.push_back(DAG.getRegisterMask(Mask)); 3012 } 3013 3014 // Glue the call to the argument copies, if any. 3015 if (Glue.getNode()) 3016 Ops.push_back(Glue); 3017 3018 // Emit the call. 3019 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 3020 3021 if (IsTailCall) { 3022 MF.getFrameInfo().setHasTailCall(); 3023 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 3024 } 3025 3026 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 3027 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 3028 Glue = Chain.getValue(1); 3029 3030 // Mark the end of the call, which is glued to the call itself. 3031 Chain = DAG.getCALLSEQ_END(Chain, 3032 DAG.getConstant(NumBytes, DL, PtrVT, true), 3033 DAG.getConstant(0, DL, PtrVT, true), 3034 Glue, DL); 3035 Glue = Chain.getValue(1); 3036 3037 // Assign locations to each value returned by this call. 3038 SmallVector<CCValAssign, 16> RVLocs; 3039 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 3040 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 3041 3042 // Copy all of the result registers out of their specified physreg. 3043 for (auto &VA : RVLocs) { 3044 // Copy the value out 3045 SDValue RetValue = 3046 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 3047 // Glue the RetValue to the end of the call sequence 3048 Chain = RetValue.getValue(1); 3049 Glue = RetValue.getValue(2); 3050 3051 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 3052 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 3053 SDValue RetValue2 = 3054 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 3055 Chain = RetValue2.getValue(1); 3056 Glue = RetValue2.getValue(2); 3057 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 3058 RetValue2); 3059 } 3060 3061 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 3062 3063 InVals.push_back(RetValue); 3064 } 3065 3066 return Chain; 3067 } 3068 3069 bool RISCVTargetLowering::CanLowerReturn( 3070 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 3071 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 3072 SmallVector<CCValAssign, 16> RVLocs; 3073 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 3074 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 3075 MVT VT = Outs[i].VT; 3076 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3077 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 3078 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 3079 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr)) 3080 return false; 3081 } 3082 return true; 3083 } 3084 3085 SDValue 3086 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 3087 bool IsVarArg, 3088 const SmallVectorImpl<ISD::OutputArg> &Outs, 3089 const SmallVectorImpl<SDValue> &OutVals, 3090 const SDLoc &DL, SelectionDAG &DAG) const { 3091 const MachineFunction &MF = DAG.getMachineFunction(); 3092 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 3093 3094 // Stores the assignment of the return value to a location. 3095 SmallVector<CCValAssign, 16> RVLocs; 3096 3097 // Info about the registers and stack slot. 3098 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 3099 *DAG.getContext()); 3100 3101 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 3102 nullptr); 3103 3104 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 3105 report_fatal_error("GHC functions return void only"); 3106 3107 SDValue Glue; 3108 SmallVector<SDValue, 4> RetOps(1, Chain); 3109 3110 // Copy the result values into the output registers. 3111 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 3112 SDValue Val = OutVals[i]; 3113 CCValAssign &VA = RVLocs[i]; 3114 assert(VA.isRegLoc() && "Can only return in registers!"); 3115 3116 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 3117 // Handle returning f64 on RV32D with a soft float ABI. 3118 assert(VA.isRegLoc() && "Expected return via registers"); 3119 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 3120 DAG.getVTList(MVT::i32, MVT::i32), Val); 3121 SDValue Lo = SplitF64.getValue(0); 3122 SDValue Hi = SplitF64.getValue(1); 3123 Register RegLo = VA.getLocReg(); 3124 assert(RegLo < RISCV::X31 && "Invalid register pair"); 3125 Register RegHi = RegLo + 1; 3126 3127 if (STI.isRegisterReservedByUser(RegLo) || 3128 STI.isRegisterReservedByUser(RegHi)) 3129 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 3130 MF.getFunction(), 3131 "Return value register required, but has been reserved."}); 3132 3133 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 3134 Glue = Chain.getValue(1); 3135 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 3136 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 3137 Glue = Chain.getValue(1); 3138 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 3139 } else { 3140 // Handle a 'normal' return. 3141 Val = convertValVTToLocVT(DAG, Val, VA, DL); 3142 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 3143 3144 if (STI.isRegisterReservedByUser(VA.getLocReg())) 3145 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 3146 MF.getFunction(), 3147 "Return value register required, but has been reserved."}); 3148 3149 // Guarantee that all emitted copies are stuck together. 3150 Glue = Chain.getValue(1); 3151 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3152 } 3153 } 3154 3155 RetOps[0] = Chain; // Update chain. 3156 3157 // Add the glue node if we have it. 3158 if (Glue.getNode()) { 3159 RetOps.push_back(Glue); 3160 } 3161 3162 // Interrupt service routines use different return instructions. 3163 const Function &Func = DAG.getMachineFunction().getFunction(); 3164 if (Func.hasFnAttribute("interrupt")) { 3165 if (!Func.getReturnType()->isVoidTy()) 3166 report_fatal_error( 3167 "Functions with the interrupt attribute must have void return type!"); 3168 3169 MachineFunction &MF = DAG.getMachineFunction(); 3170 StringRef Kind = 3171 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 3172 3173 unsigned RetOpc; 3174 if (Kind == "user") 3175 RetOpc = RISCVISD::URET_FLAG; 3176 else if (Kind == "supervisor") 3177 RetOpc = RISCVISD::SRET_FLAG; 3178 else 3179 RetOpc = RISCVISD::MRET_FLAG; 3180 3181 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 3182 } 3183 3184 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 3185 } 3186 3187 void RISCVTargetLowering::validateCCReservedRegs( 3188 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 3189 MachineFunction &MF) const { 3190 const Function &F = MF.getFunction(); 3191 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 3192 3193 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) { 3194 return STI.isRegisterReservedByUser(Reg.first); 3195 })) 3196 F.getContext().diagnose(DiagnosticInfoUnsupported{ 3197 F, "Argument register required, but has been reserved."}); 3198 } 3199 3200 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 3201 return CI->isTailCall(); 3202 } 3203 3204 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 3205 #define NODE_NAME_CASE(NODE) \ 3206 case RISCVISD::NODE: \ 3207 return "RISCVISD::" #NODE; 3208 // clang-format off 3209 switch ((RISCVISD::NodeType)Opcode) { 3210 case RISCVISD::FIRST_NUMBER: 3211 break; 3212 NODE_NAME_CASE(RET_FLAG) 3213 NODE_NAME_CASE(URET_FLAG) 3214 NODE_NAME_CASE(SRET_FLAG) 3215 NODE_NAME_CASE(MRET_FLAG) 3216 NODE_NAME_CASE(CALL) 3217 NODE_NAME_CASE(SELECT_CC) 3218 NODE_NAME_CASE(BuildPairF64) 3219 NODE_NAME_CASE(SplitF64) 3220 NODE_NAME_CASE(TAIL) 3221 NODE_NAME_CASE(SLLW) 3222 NODE_NAME_CASE(SRAW) 3223 NODE_NAME_CASE(SRLW) 3224 NODE_NAME_CASE(DIVW) 3225 NODE_NAME_CASE(DIVUW) 3226 NODE_NAME_CASE(REMUW) 3227 NODE_NAME_CASE(ROLW) 3228 NODE_NAME_CASE(RORW) 3229 NODE_NAME_CASE(FSLW) 3230 NODE_NAME_CASE(FSRW) 3231 NODE_NAME_CASE(FMV_H_X) 3232 NODE_NAME_CASE(FMV_X_ANYEXTH) 3233 NODE_NAME_CASE(FMV_W_X_RV64) 3234 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 3235 NODE_NAME_CASE(READ_CYCLE_WIDE) 3236 NODE_NAME_CASE(GREVI) 3237 NODE_NAME_CASE(GREVIW) 3238 NODE_NAME_CASE(GORCI) 3239 NODE_NAME_CASE(GORCIW) 3240 } 3241 // clang-format on 3242 return nullptr; 3243 #undef NODE_NAME_CASE 3244 } 3245 3246 /// getConstraintType - Given a constraint letter, return the type of 3247 /// constraint it is for this target. 3248 RISCVTargetLowering::ConstraintType 3249 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 3250 if (Constraint.size() == 1) { 3251 switch (Constraint[0]) { 3252 default: 3253 break; 3254 case 'f': 3255 return C_RegisterClass; 3256 case 'I': 3257 case 'J': 3258 case 'K': 3259 return C_Immediate; 3260 case 'A': 3261 return C_Memory; 3262 } 3263 } 3264 return TargetLowering::getConstraintType(Constraint); 3265 } 3266 3267 std::pair<unsigned, const TargetRegisterClass *> 3268 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 3269 StringRef Constraint, 3270 MVT VT) const { 3271 // First, see if this is a constraint that directly corresponds to a 3272 // RISCV register class. 3273 if (Constraint.size() == 1) { 3274 switch (Constraint[0]) { 3275 case 'r': 3276 return std::make_pair(0U, &RISCV::GPRRegClass); 3277 case 'f': 3278 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 3279 return std::make_pair(0U, &RISCV::FPR16RegClass); 3280 if (Subtarget.hasStdExtF() && VT == MVT::f32) 3281 return std::make_pair(0U, &RISCV::FPR32RegClass); 3282 if (Subtarget.hasStdExtD() && VT == MVT::f64) 3283 return std::make_pair(0U, &RISCV::FPR64RegClass); 3284 break; 3285 default: 3286 break; 3287 } 3288 } 3289 3290 // Clang will correctly decode the usage of register name aliases into their 3291 // official names. However, other frontends like `rustc` do not. This allows 3292 // users of these frontends to use the ABI names for registers in LLVM-style 3293 // register constraints. 3294 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 3295 .Case("{zero}", RISCV::X0) 3296 .Case("{ra}", RISCV::X1) 3297 .Case("{sp}", RISCV::X2) 3298 .Case("{gp}", RISCV::X3) 3299 .Case("{tp}", RISCV::X4) 3300 .Case("{t0}", RISCV::X5) 3301 .Case("{t1}", RISCV::X6) 3302 .Case("{t2}", RISCV::X7) 3303 .Cases("{s0}", "{fp}", RISCV::X8) 3304 .Case("{s1}", RISCV::X9) 3305 .Case("{a0}", RISCV::X10) 3306 .Case("{a1}", RISCV::X11) 3307 .Case("{a2}", RISCV::X12) 3308 .Case("{a3}", RISCV::X13) 3309 .Case("{a4}", RISCV::X14) 3310 .Case("{a5}", RISCV::X15) 3311 .Case("{a6}", RISCV::X16) 3312 .Case("{a7}", RISCV::X17) 3313 .Case("{s2}", RISCV::X18) 3314 .Case("{s3}", RISCV::X19) 3315 .Case("{s4}", RISCV::X20) 3316 .Case("{s5}", RISCV::X21) 3317 .Case("{s6}", RISCV::X22) 3318 .Case("{s7}", RISCV::X23) 3319 .Case("{s8}", RISCV::X24) 3320 .Case("{s9}", RISCV::X25) 3321 .Case("{s10}", RISCV::X26) 3322 .Case("{s11}", RISCV::X27) 3323 .Case("{t3}", RISCV::X28) 3324 .Case("{t4}", RISCV::X29) 3325 .Case("{t5}", RISCV::X30) 3326 .Case("{t6}", RISCV::X31) 3327 .Default(RISCV::NoRegister); 3328 if (XRegFromAlias != RISCV::NoRegister) 3329 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 3330 3331 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 3332 // TableGen record rather than the AsmName to choose registers for InlineAsm 3333 // constraints, plus we want to match those names to the widest floating point 3334 // register type available, manually select floating point registers here. 3335 // 3336 // The second case is the ABI name of the register, so that frontends can also 3337 // use the ABI names in register constraint lists. 3338 if (Subtarget.hasStdExtF()) { 3339 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 3340 .Cases("{f0}", "{ft0}", RISCV::F0_F) 3341 .Cases("{f1}", "{ft1}", RISCV::F1_F) 3342 .Cases("{f2}", "{ft2}", RISCV::F2_F) 3343 .Cases("{f3}", "{ft3}", RISCV::F3_F) 3344 .Cases("{f4}", "{ft4}", RISCV::F4_F) 3345 .Cases("{f5}", "{ft5}", RISCV::F5_F) 3346 .Cases("{f6}", "{ft6}", RISCV::F6_F) 3347 .Cases("{f7}", "{ft7}", RISCV::F7_F) 3348 .Cases("{f8}", "{fs0}", RISCV::F8_F) 3349 .Cases("{f9}", "{fs1}", RISCV::F9_F) 3350 .Cases("{f10}", "{fa0}", RISCV::F10_F) 3351 .Cases("{f11}", "{fa1}", RISCV::F11_F) 3352 .Cases("{f12}", "{fa2}", RISCV::F12_F) 3353 .Cases("{f13}", "{fa3}", RISCV::F13_F) 3354 .Cases("{f14}", "{fa4}", RISCV::F14_F) 3355 .Cases("{f15}", "{fa5}", RISCV::F15_F) 3356 .Cases("{f16}", "{fa6}", RISCV::F16_F) 3357 .Cases("{f17}", "{fa7}", RISCV::F17_F) 3358 .Cases("{f18}", "{fs2}", RISCV::F18_F) 3359 .Cases("{f19}", "{fs3}", RISCV::F19_F) 3360 .Cases("{f20}", "{fs4}", RISCV::F20_F) 3361 .Cases("{f21}", "{fs5}", RISCV::F21_F) 3362 .Cases("{f22}", "{fs6}", RISCV::F22_F) 3363 .Cases("{f23}", "{fs7}", RISCV::F23_F) 3364 .Cases("{f24}", "{fs8}", RISCV::F24_F) 3365 .Cases("{f25}", "{fs9}", RISCV::F25_F) 3366 .Cases("{f26}", "{fs10}", RISCV::F26_F) 3367 .Cases("{f27}", "{fs11}", RISCV::F27_F) 3368 .Cases("{f28}", "{ft8}", RISCV::F28_F) 3369 .Cases("{f29}", "{ft9}", RISCV::F29_F) 3370 .Cases("{f30}", "{ft10}", RISCV::F30_F) 3371 .Cases("{f31}", "{ft11}", RISCV::F31_F) 3372 .Default(RISCV::NoRegister); 3373 if (FReg != RISCV::NoRegister) { 3374 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 3375 if (Subtarget.hasStdExtD()) { 3376 unsigned RegNo = FReg - RISCV::F0_F; 3377 unsigned DReg = RISCV::F0_D + RegNo; 3378 return std::make_pair(DReg, &RISCV::FPR64RegClass); 3379 } 3380 return std::make_pair(FReg, &RISCV::FPR32RegClass); 3381 } 3382 } 3383 3384 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3385 } 3386 3387 unsigned 3388 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 3389 // Currently only support length 1 constraints. 3390 if (ConstraintCode.size() == 1) { 3391 switch (ConstraintCode[0]) { 3392 case 'A': 3393 return InlineAsm::Constraint_A; 3394 default: 3395 break; 3396 } 3397 } 3398 3399 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 3400 } 3401 3402 void RISCVTargetLowering::LowerAsmOperandForConstraint( 3403 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 3404 SelectionDAG &DAG) const { 3405 // Currently only support length 1 constraints. 3406 if (Constraint.length() == 1) { 3407 switch (Constraint[0]) { 3408 case 'I': 3409 // Validate & create a 12-bit signed immediate operand. 3410 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3411 uint64_t CVal = C->getSExtValue(); 3412 if (isInt<12>(CVal)) 3413 Ops.push_back( 3414 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 3415 } 3416 return; 3417 case 'J': 3418 // Validate & create an integer zero operand. 3419 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 3420 if (C->getZExtValue() == 0) 3421 Ops.push_back( 3422 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 3423 return; 3424 case 'K': 3425 // Validate & create a 5-bit unsigned immediate operand. 3426 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3427 uint64_t CVal = C->getZExtValue(); 3428 if (isUInt<5>(CVal)) 3429 Ops.push_back( 3430 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 3431 } 3432 return; 3433 default: 3434 break; 3435 } 3436 } 3437 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 3438 } 3439 3440 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 3441 Instruction *Inst, 3442 AtomicOrdering Ord) const { 3443 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 3444 return Builder.CreateFence(Ord); 3445 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 3446 return Builder.CreateFence(AtomicOrdering::Release); 3447 return nullptr; 3448 } 3449 3450 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 3451 Instruction *Inst, 3452 AtomicOrdering Ord) const { 3453 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 3454 return Builder.CreateFence(AtomicOrdering::Acquire); 3455 return nullptr; 3456 } 3457 3458 TargetLowering::AtomicExpansionKind 3459 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 3460 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 3461 // point operations can't be used in an lr/sc sequence without breaking the 3462 // forward-progress guarantee. 3463 if (AI->isFloatingPointOperation()) 3464 return AtomicExpansionKind::CmpXChg; 3465 3466 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 3467 if (Size == 8 || Size == 16) 3468 return AtomicExpansionKind::MaskedIntrinsic; 3469 return AtomicExpansionKind::None; 3470 } 3471 3472 static Intrinsic::ID 3473 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 3474 if (XLen == 32) { 3475 switch (BinOp) { 3476 default: 3477 llvm_unreachable("Unexpected AtomicRMW BinOp"); 3478 case AtomicRMWInst::Xchg: 3479 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 3480 case AtomicRMWInst::Add: 3481 return Intrinsic::riscv_masked_atomicrmw_add_i32; 3482 case AtomicRMWInst::Sub: 3483 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 3484 case AtomicRMWInst::Nand: 3485 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 3486 case AtomicRMWInst::Max: 3487 return Intrinsic::riscv_masked_atomicrmw_max_i32; 3488 case AtomicRMWInst::Min: 3489 return Intrinsic::riscv_masked_atomicrmw_min_i32; 3490 case AtomicRMWInst::UMax: 3491 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 3492 case AtomicRMWInst::UMin: 3493 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 3494 } 3495 } 3496 3497 if (XLen == 64) { 3498 switch (BinOp) { 3499 default: 3500 llvm_unreachable("Unexpected AtomicRMW BinOp"); 3501 case AtomicRMWInst::Xchg: 3502 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 3503 case AtomicRMWInst::Add: 3504 return Intrinsic::riscv_masked_atomicrmw_add_i64; 3505 case AtomicRMWInst::Sub: 3506 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 3507 case AtomicRMWInst::Nand: 3508 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 3509 case AtomicRMWInst::Max: 3510 return Intrinsic::riscv_masked_atomicrmw_max_i64; 3511 case AtomicRMWInst::Min: 3512 return Intrinsic::riscv_masked_atomicrmw_min_i64; 3513 case AtomicRMWInst::UMax: 3514 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 3515 case AtomicRMWInst::UMin: 3516 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 3517 } 3518 } 3519 3520 llvm_unreachable("Unexpected XLen\n"); 3521 } 3522 3523 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 3524 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 3525 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 3526 unsigned XLen = Subtarget.getXLen(); 3527 Value *Ordering = 3528 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 3529 Type *Tys[] = {AlignedAddr->getType()}; 3530 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 3531 AI->getModule(), 3532 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 3533 3534 if (XLen == 64) { 3535 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 3536 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 3537 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 3538 } 3539 3540 Value *Result; 3541 3542 // Must pass the shift amount needed to sign extend the loaded value prior 3543 // to performing a signed comparison for min/max. ShiftAmt is the number of 3544 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 3545 // is the number of bits to left+right shift the value in order to 3546 // sign-extend. 3547 if (AI->getOperation() == AtomicRMWInst::Min || 3548 AI->getOperation() == AtomicRMWInst::Max) { 3549 const DataLayout &DL = AI->getModule()->getDataLayout(); 3550 unsigned ValWidth = 3551 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 3552 Value *SextShamt = 3553 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 3554 Result = Builder.CreateCall(LrwOpScwLoop, 3555 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 3556 } else { 3557 Result = 3558 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 3559 } 3560 3561 if (XLen == 64) 3562 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 3563 return Result; 3564 } 3565 3566 TargetLowering::AtomicExpansionKind 3567 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 3568 AtomicCmpXchgInst *CI) const { 3569 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 3570 if (Size == 8 || Size == 16) 3571 return AtomicExpansionKind::MaskedIntrinsic; 3572 return AtomicExpansionKind::None; 3573 } 3574 3575 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 3576 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 3577 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 3578 unsigned XLen = Subtarget.getXLen(); 3579 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 3580 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 3581 if (XLen == 64) { 3582 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 3583 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 3584 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 3585 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 3586 } 3587 Type *Tys[] = {AlignedAddr->getType()}; 3588 Function *MaskedCmpXchg = 3589 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 3590 Value *Result = Builder.CreateCall( 3591 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 3592 if (XLen == 64) 3593 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 3594 return Result; 3595 } 3596 3597 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3598 EVT VT) const { 3599 VT = VT.getScalarType(); 3600 3601 if (!VT.isSimple()) 3602 return false; 3603 3604 switch (VT.getSimpleVT().SimpleTy) { 3605 case MVT::f16: 3606 return Subtarget.hasStdExtZfh(); 3607 case MVT::f32: 3608 return Subtarget.hasStdExtF(); 3609 case MVT::f64: 3610 return Subtarget.hasStdExtD(); 3611 default: 3612 break; 3613 } 3614 3615 return false; 3616 } 3617 3618 Register RISCVTargetLowering::getExceptionPointerRegister( 3619 const Constant *PersonalityFn) const { 3620 return RISCV::X10; 3621 } 3622 3623 Register RISCVTargetLowering::getExceptionSelectorRegister( 3624 const Constant *PersonalityFn) const { 3625 return RISCV::X11; 3626 } 3627 3628 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 3629 // Return false to suppress the unnecessary extensions if the LibCall 3630 // arguments or return value is f32 type for LP64 ABI. 3631 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 3632 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 3633 return false; 3634 3635 return true; 3636 } 3637 3638 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 3639 SDValue C) const { 3640 // Check integral scalar types. 3641 if (VT.isScalarInteger()) { 3642 // Do not perform the transformation on riscv32 with the M extension. 3643 if (!Subtarget.is64Bit() && Subtarget.hasStdExtM()) 3644 return false; 3645 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 3646 if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t)) 3647 return false; 3648 int64_t Imm = ConstNode->getSExtValue(); 3649 if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) || 3650 isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm)) 3651 return true; 3652 } 3653 } 3654 3655 return false; 3656 } 3657 3658 #define GET_REGISTER_MATCHER 3659 #include "RISCVGenAsmMatcher.inc" 3660 3661 Register 3662 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 3663 const MachineFunction &MF) const { 3664 Register Reg = MatchRegisterAltName(RegName); 3665 if (Reg == RISCV::NoRegister) 3666 Reg = MatchRegisterName(RegName); 3667 if (Reg == RISCV::NoRegister) 3668 report_fatal_error( 3669 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 3670 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 3671 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 3672 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 3673 StringRef(RegName) + "\".")); 3674 return Reg; 3675 } 3676