1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/CodeGen/ValueTypes.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "llvm/IR/DiagnosticPrinter.h" 32 #include "llvm/IR/IntrinsicsRISCV.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "riscv-lower" 42 43 STATISTIC(NumTailCalls, "Number of tail calls"); 44 45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 46 const RISCVSubtarget &STI) 47 : TargetLowering(TM), Subtarget(STI) { 48 49 if (Subtarget.isRV32E()) 50 report_fatal_error("Codegen not yet implemented for RV32E"); 51 52 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 53 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 54 55 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 56 !Subtarget.hasStdExtF()) { 57 errs() << "Hard-float 'f' ABI can't be used for a target that " 58 "doesn't support the F instruction set extension (ignoring " 59 "target-abi)\n"; 60 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 61 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 62 !Subtarget.hasStdExtD()) { 63 errs() << "Hard-float 'd' ABI can't be used for a target that " 64 "doesn't support the D instruction set extension (ignoring " 65 "target-abi)\n"; 66 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 67 } 68 69 switch (ABI) { 70 default: 71 report_fatal_error("Don't know how to lower this ABI"); 72 case RISCVABI::ABI_ILP32: 73 case RISCVABI::ABI_ILP32F: 74 case RISCVABI::ABI_ILP32D: 75 case RISCVABI::ABI_LP64: 76 case RISCVABI::ABI_LP64F: 77 case RISCVABI::ABI_LP64D: 78 break; 79 } 80 81 MVT XLenVT = Subtarget.getXLenVT(); 82 83 // Set up the register classes. 84 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 85 86 if (Subtarget.hasStdExtZfh()) 87 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 88 if (Subtarget.hasStdExtF()) 89 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 90 if (Subtarget.hasStdExtD()) 91 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 92 93 if (Subtarget.hasStdExtV()) { 94 addRegisterClass(RISCVVMVTs::vbool64_t, &RISCV::VRRegClass); 95 addRegisterClass(RISCVVMVTs::vbool32_t, &RISCV::VRRegClass); 96 addRegisterClass(RISCVVMVTs::vbool16_t, &RISCV::VRRegClass); 97 addRegisterClass(RISCVVMVTs::vbool8_t, &RISCV::VRRegClass); 98 addRegisterClass(RISCVVMVTs::vbool4_t, &RISCV::VRRegClass); 99 addRegisterClass(RISCVVMVTs::vbool2_t, &RISCV::VRRegClass); 100 addRegisterClass(RISCVVMVTs::vbool1_t, &RISCV::VRRegClass); 101 102 addRegisterClass(RISCVVMVTs::vint8mf8_t, &RISCV::VRRegClass); 103 addRegisterClass(RISCVVMVTs::vint8mf4_t, &RISCV::VRRegClass); 104 addRegisterClass(RISCVVMVTs::vint8mf2_t, &RISCV::VRRegClass); 105 addRegisterClass(RISCVVMVTs::vint8m1_t, &RISCV::VRRegClass); 106 addRegisterClass(RISCVVMVTs::vint8m2_t, &RISCV::VRM2RegClass); 107 addRegisterClass(RISCVVMVTs::vint8m4_t, &RISCV::VRM4RegClass); 108 addRegisterClass(RISCVVMVTs::vint8m8_t, &RISCV::VRM8RegClass); 109 110 addRegisterClass(RISCVVMVTs::vint16mf4_t, &RISCV::VRRegClass); 111 addRegisterClass(RISCVVMVTs::vint16mf2_t, &RISCV::VRRegClass); 112 addRegisterClass(RISCVVMVTs::vint16m1_t, &RISCV::VRRegClass); 113 addRegisterClass(RISCVVMVTs::vint16m2_t, &RISCV::VRM2RegClass); 114 addRegisterClass(RISCVVMVTs::vint16m4_t, &RISCV::VRM4RegClass); 115 addRegisterClass(RISCVVMVTs::vint16m8_t, &RISCV::VRM8RegClass); 116 117 addRegisterClass(RISCVVMVTs::vint32mf2_t, &RISCV::VRRegClass); 118 addRegisterClass(RISCVVMVTs::vint32m1_t, &RISCV::VRRegClass); 119 addRegisterClass(RISCVVMVTs::vint32m2_t, &RISCV::VRM2RegClass); 120 addRegisterClass(RISCVVMVTs::vint32m4_t, &RISCV::VRM4RegClass); 121 addRegisterClass(RISCVVMVTs::vint32m8_t, &RISCV::VRM8RegClass); 122 123 addRegisterClass(RISCVVMVTs::vint64m1_t, &RISCV::VRRegClass); 124 addRegisterClass(RISCVVMVTs::vint64m2_t, &RISCV::VRM2RegClass); 125 addRegisterClass(RISCVVMVTs::vint64m4_t, &RISCV::VRM4RegClass); 126 addRegisterClass(RISCVVMVTs::vint64m8_t, &RISCV::VRM8RegClass); 127 128 if (Subtarget.hasStdExtZfh()) { 129 addRegisterClass(RISCVVMVTs::vfloat16mf4_t, &RISCV::VRRegClass); 130 addRegisterClass(RISCVVMVTs::vfloat16mf2_t, &RISCV::VRRegClass); 131 addRegisterClass(RISCVVMVTs::vfloat16m1_t, &RISCV::VRRegClass); 132 addRegisterClass(RISCVVMVTs::vfloat16m2_t, &RISCV::VRM2RegClass); 133 addRegisterClass(RISCVVMVTs::vfloat16m4_t, &RISCV::VRM4RegClass); 134 addRegisterClass(RISCVVMVTs::vfloat16m8_t, &RISCV::VRM8RegClass); 135 } 136 137 if (Subtarget.hasStdExtF()) { 138 addRegisterClass(RISCVVMVTs::vfloat32mf2_t, &RISCV::VRRegClass); 139 addRegisterClass(RISCVVMVTs::vfloat32m1_t, &RISCV::VRRegClass); 140 addRegisterClass(RISCVVMVTs::vfloat32m2_t, &RISCV::VRM2RegClass); 141 addRegisterClass(RISCVVMVTs::vfloat32m4_t, &RISCV::VRM4RegClass); 142 addRegisterClass(RISCVVMVTs::vfloat32m8_t, &RISCV::VRM8RegClass); 143 } 144 145 if (Subtarget.hasStdExtD()) { 146 addRegisterClass(RISCVVMVTs::vfloat64m1_t, &RISCV::VRRegClass); 147 addRegisterClass(RISCVVMVTs::vfloat64m2_t, &RISCV::VRM2RegClass); 148 addRegisterClass(RISCVVMVTs::vfloat64m4_t, &RISCV::VRM4RegClass); 149 addRegisterClass(RISCVVMVTs::vfloat64m8_t, &RISCV::VRM8RegClass); 150 } 151 } 152 153 // Compute derived properties from the register classes. 154 computeRegisterProperties(STI.getRegisterInfo()); 155 156 setStackPointerRegisterToSaveRestore(RISCV::X2); 157 158 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 159 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 160 161 // TODO: add all necessary setOperationAction calls. 162 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 163 164 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 165 setOperationAction(ISD::BR_CC, XLenVT, Expand); 166 setOperationAction(ISD::SELECT, XLenVT, Custom); 167 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 168 169 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 170 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 171 172 setOperationAction(ISD::VASTART, MVT::Other, Custom); 173 setOperationAction(ISD::VAARG, MVT::Other, Expand); 174 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 175 setOperationAction(ISD::VAEND, MVT::Other, Expand); 176 177 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 178 if (!Subtarget.hasStdExtZbb()) { 179 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 180 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 181 } 182 183 if (Subtarget.is64Bit()) { 184 setOperationAction(ISD::ADD, MVT::i32, Custom); 185 setOperationAction(ISD::SUB, MVT::i32, Custom); 186 setOperationAction(ISD::SHL, MVT::i32, Custom); 187 setOperationAction(ISD::SRA, MVT::i32, Custom); 188 setOperationAction(ISD::SRL, MVT::i32, Custom); 189 } 190 191 if (!Subtarget.hasStdExtM()) { 192 setOperationAction(ISD::MUL, XLenVT, Expand); 193 setOperationAction(ISD::MULHS, XLenVT, Expand); 194 setOperationAction(ISD::MULHU, XLenVT, Expand); 195 setOperationAction(ISD::SDIV, XLenVT, Expand); 196 setOperationAction(ISD::UDIV, XLenVT, Expand); 197 setOperationAction(ISD::SREM, XLenVT, Expand); 198 setOperationAction(ISD::UREM, XLenVT, Expand); 199 } 200 201 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 202 setOperationAction(ISD::MUL, MVT::i32, Custom); 203 setOperationAction(ISD::SDIV, MVT::i32, Custom); 204 setOperationAction(ISD::UDIV, MVT::i32, Custom); 205 setOperationAction(ISD::UREM, MVT::i32, Custom); 206 } 207 208 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 209 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 210 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 211 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 212 213 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 214 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 215 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 216 217 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 218 if (Subtarget.is64Bit()) { 219 setOperationAction(ISD::ROTL, MVT::i32, Custom); 220 setOperationAction(ISD::ROTR, MVT::i32, Custom); 221 } 222 } else { 223 setOperationAction(ISD::ROTL, XLenVT, Expand); 224 setOperationAction(ISD::ROTR, XLenVT, Expand); 225 } 226 227 if (Subtarget.hasStdExtZbp()) { 228 setOperationAction(ISD::BITREVERSE, XLenVT, Custom); 229 setOperationAction(ISD::BSWAP, XLenVT, Custom); 230 231 if (Subtarget.is64Bit()) { 232 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); 233 setOperationAction(ISD::BSWAP, MVT::i32, Custom); 234 } 235 } else { 236 setOperationAction(ISD::BSWAP, XLenVT, Expand); 237 } 238 239 if (Subtarget.hasStdExtZbb()) { 240 setOperationAction(ISD::SMIN, XLenVT, Legal); 241 setOperationAction(ISD::SMAX, XLenVT, Legal); 242 setOperationAction(ISD::UMIN, XLenVT, Legal); 243 setOperationAction(ISD::UMAX, XLenVT, Legal); 244 } else { 245 setOperationAction(ISD::CTTZ, XLenVT, Expand); 246 setOperationAction(ISD::CTLZ, XLenVT, Expand); 247 setOperationAction(ISD::CTPOP, XLenVT, Expand); 248 } 249 250 if (Subtarget.hasStdExtZbt()) { 251 setOperationAction(ISD::FSHL, XLenVT, Legal); 252 setOperationAction(ISD::FSHR, XLenVT, Legal); 253 254 if (Subtarget.is64Bit()) { 255 setOperationAction(ISD::FSHL, MVT::i32, Custom); 256 setOperationAction(ISD::FSHR, MVT::i32, Custom); 257 } 258 } 259 260 ISD::CondCode FPCCToExpand[] = { 261 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 262 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 263 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 264 265 ISD::NodeType FPOpToExpand[] = { 266 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 267 ISD::FP_TO_FP16}; 268 269 if (Subtarget.hasStdExtZfh()) 270 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 271 272 if (Subtarget.hasStdExtZfh()) { 273 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 274 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 275 for (auto CC : FPCCToExpand) 276 setCondCodeAction(CC, MVT::f16, Expand); 277 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 278 setOperationAction(ISD::SELECT, MVT::f16, Custom); 279 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 280 for (auto Op : FPOpToExpand) 281 setOperationAction(Op, MVT::f16, Expand); 282 } 283 284 if (Subtarget.hasStdExtF()) { 285 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 286 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 287 for (auto CC : FPCCToExpand) 288 setCondCodeAction(CC, MVT::f32, Expand); 289 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 290 setOperationAction(ISD::SELECT, MVT::f32, Custom); 291 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 292 for (auto Op : FPOpToExpand) 293 setOperationAction(Op, MVT::f32, Expand); 294 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 295 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 296 } 297 298 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 299 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 300 301 if (Subtarget.hasStdExtD()) { 302 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 303 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 304 for (auto CC : FPCCToExpand) 305 setCondCodeAction(CC, MVT::f64, Expand); 306 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 307 setOperationAction(ISD::SELECT, MVT::f64, Custom); 308 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 309 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 310 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 311 for (auto Op : FPOpToExpand) 312 setOperationAction(Op, MVT::f64, Expand); 313 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 314 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 315 } 316 317 if (Subtarget.is64Bit()) { 318 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 319 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 320 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 321 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 322 } 323 324 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 325 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 326 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 327 setOperationAction(ISD::JumpTable, XLenVT, Custom); 328 329 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 330 331 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 332 // Unfortunately this can't be determined just from the ISA naming string. 333 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 334 Subtarget.is64Bit() ? Legal : Custom); 335 336 setOperationAction(ISD::TRAP, MVT::Other, Legal); 337 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 338 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 339 340 if (Subtarget.hasStdExtA()) { 341 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 342 setMinCmpXchgSizeInBits(32); 343 } else { 344 setMaxAtomicSizeInBitsSupported(0); 345 } 346 347 setBooleanContents(ZeroOrOneBooleanContent); 348 349 if (Subtarget.hasStdExtV()) { 350 setBooleanVectorContents(ZeroOrOneBooleanContent); 351 352 setOperationAction(ISD::VSCALE, XLenVT, Custom); 353 354 // RVV intrinsics may have illegal operands. 355 // We also need to custom legalize vmv.x.s. 356 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); 357 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 358 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 359 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 360 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 361 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); 362 363 if (Subtarget.is64Bit()) { 364 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 365 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 366 } 367 368 for (auto VT : MVT::integer_scalable_vector_valuetypes()) { 369 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 370 371 setOperationAction(ISD::SMIN, VT, Legal); 372 setOperationAction(ISD::SMAX, VT, Legal); 373 setOperationAction(ISD::UMIN, VT, Legal); 374 setOperationAction(ISD::UMAX, VT, Legal); 375 } 376 377 // We must custom-lower SPLAT_VECTOR vXi64 on RV32 378 if (!Subtarget.is64Bit()) 379 setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom); 380 381 // Expand various CCs to best match the RVV ISA, which natively supports UNE 382 // but no other unordered comparisons, and supports all ordered comparisons 383 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization 384 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), 385 // and we pattern-match those back to the "original", swapping operands once 386 // more. This way we catch both operations and both "vf" and "fv" forms with 387 // fewer patterns. 388 ISD::CondCode VFPCCToExpand[] = { 389 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 390 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, 391 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, 392 }; 393 394 if (Subtarget.hasStdExtZfh()) { 395 for (auto VT : {RISCVVMVTs::vfloat16mf4_t, RISCVVMVTs::vfloat16mf2_t, 396 RISCVVMVTs::vfloat16m1_t, RISCVVMVTs::vfloat16m2_t, 397 RISCVVMVTs::vfloat16m4_t, RISCVVMVTs::vfloat16m8_t}) { 398 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 399 for (auto CC : VFPCCToExpand) 400 setCondCodeAction(CC, VT, Expand); 401 } 402 } 403 404 if (Subtarget.hasStdExtF()) { 405 for (auto VT : {RISCVVMVTs::vfloat32mf2_t, RISCVVMVTs::vfloat32m1_t, 406 RISCVVMVTs::vfloat32m2_t, RISCVVMVTs::vfloat32m4_t, 407 RISCVVMVTs::vfloat32m8_t}) { 408 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 409 for (auto CC : VFPCCToExpand) 410 setCondCodeAction(CC, VT, Expand); 411 } 412 } 413 414 if (Subtarget.hasStdExtD()) { 415 for (auto VT : {RISCVVMVTs::vfloat64m1_t, RISCVVMVTs::vfloat64m2_t, 416 RISCVVMVTs::vfloat64m4_t, RISCVVMVTs::vfloat64m8_t}) { 417 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 418 for (auto CC : VFPCCToExpand) 419 setCondCodeAction(CC, VT, Expand); 420 } 421 } 422 } 423 424 // Function alignments. 425 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 426 setMinFunctionAlignment(FunctionAlignment); 427 setPrefFunctionAlignment(FunctionAlignment); 428 429 setMinimumJumpTableEntries(5); 430 431 // Jumps are expensive, compared to logic 432 setJumpIsExpensive(); 433 434 // We can use any register for comparisons 435 setHasMultipleConditionRegisters(); 436 437 if (Subtarget.hasStdExtZbp()) { 438 setTargetDAGCombine(ISD::OR); 439 } 440 } 441 442 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 443 EVT VT) const { 444 if (!VT.isVector()) 445 return getPointerTy(DL); 446 if (Subtarget.hasStdExtV()) 447 return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 448 return VT.changeVectorElementTypeToInteger(); 449 } 450 451 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 452 const CallInst &I, 453 MachineFunction &MF, 454 unsigned Intrinsic) const { 455 switch (Intrinsic) { 456 default: 457 return false; 458 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 459 case Intrinsic::riscv_masked_atomicrmw_add_i32: 460 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 461 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 462 case Intrinsic::riscv_masked_atomicrmw_max_i32: 463 case Intrinsic::riscv_masked_atomicrmw_min_i32: 464 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 465 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 466 case Intrinsic::riscv_masked_cmpxchg_i32: 467 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 468 Info.opc = ISD::INTRINSIC_W_CHAIN; 469 Info.memVT = MVT::getVT(PtrTy->getElementType()); 470 Info.ptrVal = I.getArgOperand(0); 471 Info.offset = 0; 472 Info.align = Align(4); 473 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 474 MachineMemOperand::MOVolatile; 475 return true; 476 } 477 } 478 479 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 480 const AddrMode &AM, Type *Ty, 481 unsigned AS, 482 Instruction *I) const { 483 // No global is ever allowed as a base. 484 if (AM.BaseGV) 485 return false; 486 487 // Require a 12-bit signed offset. 488 if (!isInt<12>(AM.BaseOffs)) 489 return false; 490 491 switch (AM.Scale) { 492 case 0: // "r+i" or just "i", depending on HasBaseReg. 493 break; 494 case 1: 495 if (!AM.HasBaseReg) // allow "r+i". 496 break; 497 return false; // disallow "r+r" or "r+r+i". 498 default: 499 return false; 500 } 501 502 return true; 503 } 504 505 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 506 return isInt<12>(Imm); 507 } 508 509 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 510 return isInt<12>(Imm); 511 } 512 513 // On RV32, 64-bit integers are split into their high and low parts and held 514 // in two different registers, so the trunc is free since the low register can 515 // just be used. 516 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 517 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 518 return false; 519 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 520 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 521 return (SrcBits == 64 && DestBits == 32); 522 } 523 524 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 525 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 526 !SrcVT.isInteger() || !DstVT.isInteger()) 527 return false; 528 unsigned SrcBits = SrcVT.getSizeInBits(); 529 unsigned DestBits = DstVT.getSizeInBits(); 530 return (SrcBits == 64 && DestBits == 32); 531 } 532 533 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 534 // Zexts are free if they can be combined with a load. 535 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 536 EVT MemVT = LD->getMemoryVT(); 537 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 538 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 539 (LD->getExtensionType() == ISD::NON_EXTLOAD || 540 LD->getExtensionType() == ISD::ZEXTLOAD)) 541 return true; 542 } 543 544 return TargetLowering::isZExtFree(Val, VT2); 545 } 546 547 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 548 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 549 } 550 551 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 552 return Subtarget.hasStdExtZbb(); 553 } 554 555 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 556 return Subtarget.hasStdExtZbb(); 557 } 558 559 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 560 bool ForCodeSize) const { 561 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 562 return false; 563 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 564 return false; 565 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 566 return false; 567 if (Imm.isNegZero()) 568 return false; 569 return Imm.isZero(); 570 } 571 572 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 573 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 574 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 575 (VT == MVT::f64 && Subtarget.hasStdExtD()); 576 } 577 578 // Changes the condition code and swaps operands if necessary, so the SetCC 579 // operation matches one of the comparisons supported directly in the RISC-V 580 // ISA. 581 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 582 switch (CC) { 583 default: 584 break; 585 case ISD::SETGT: 586 case ISD::SETLE: 587 case ISD::SETUGT: 588 case ISD::SETULE: 589 CC = ISD::getSetCCSwappedOperands(CC); 590 std::swap(LHS, RHS); 591 break; 592 } 593 } 594 595 // Return the RISC-V branch opcode that matches the given DAG integer 596 // condition code. The CondCode must be one of those supported by the RISC-V 597 // ISA (see normaliseSetCC). 598 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 599 switch (CC) { 600 default: 601 llvm_unreachable("Unsupported CondCode"); 602 case ISD::SETEQ: 603 return RISCV::BEQ; 604 case ISD::SETNE: 605 return RISCV::BNE; 606 case ISD::SETLT: 607 return RISCV::BLT; 608 case ISD::SETGE: 609 return RISCV::BGE; 610 case ISD::SETULT: 611 return RISCV::BLTU; 612 case ISD::SETUGE: 613 return RISCV::BGEU; 614 } 615 } 616 617 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 618 SelectionDAG &DAG) const { 619 switch (Op.getOpcode()) { 620 default: 621 report_fatal_error("unimplemented operand"); 622 case ISD::GlobalAddress: 623 return lowerGlobalAddress(Op, DAG); 624 case ISD::BlockAddress: 625 return lowerBlockAddress(Op, DAG); 626 case ISD::ConstantPool: 627 return lowerConstantPool(Op, DAG); 628 case ISD::JumpTable: 629 return lowerJumpTable(Op, DAG); 630 case ISD::GlobalTLSAddress: 631 return lowerGlobalTLSAddress(Op, DAG); 632 case ISD::SELECT: 633 return lowerSELECT(Op, DAG); 634 case ISD::VASTART: 635 return lowerVASTART(Op, DAG); 636 case ISD::FRAMEADDR: 637 return lowerFRAMEADDR(Op, DAG); 638 case ISD::RETURNADDR: 639 return lowerRETURNADDR(Op, DAG); 640 case ISD::SHL_PARTS: 641 return lowerShiftLeftParts(Op, DAG); 642 case ISD::SRA_PARTS: 643 return lowerShiftRightParts(Op, DAG, true); 644 case ISD::SRL_PARTS: 645 return lowerShiftRightParts(Op, DAG, false); 646 case ISD::BITCAST: { 647 assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || 648 Subtarget.hasStdExtZfh()) && 649 "Unexpected custom legalisation"); 650 SDLoc DL(Op); 651 SDValue Op0 = Op.getOperand(0); 652 if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) { 653 if (Op0.getValueType() != MVT::i16) 654 return SDValue(); 655 SDValue NewOp0 = 656 DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0); 657 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 658 return FPConv; 659 } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() && 660 Subtarget.hasStdExtF()) { 661 if (Op0.getValueType() != MVT::i32) 662 return SDValue(); 663 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 664 SDValue FPConv = 665 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 666 return FPConv; 667 } 668 return SDValue(); 669 } 670 case ISD::INTRINSIC_WO_CHAIN: 671 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 672 case ISD::INTRINSIC_W_CHAIN: 673 return LowerINTRINSIC_W_CHAIN(Op, DAG); 674 case ISD::BSWAP: 675 case ISD::BITREVERSE: { 676 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 677 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 678 MVT VT = Op.getSimpleValueType(); 679 SDLoc DL(Op); 680 // Start with the maximum immediate value which is the bitwidth - 1. 681 unsigned Imm = VT.getSizeInBits() - 1; 682 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 683 if (Op.getOpcode() == ISD::BSWAP) 684 Imm &= ~0x7U; 685 return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), 686 DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); 687 } 688 case ISD::SPLAT_VECTOR: 689 return lowerSPLATVECTOR(Op, DAG); 690 case ISD::VSCALE: { 691 MVT VT = Op.getSimpleValueType(); 692 SDLoc DL(Op); 693 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); 694 // We define our scalable vector types for lmul=1 to use a 64 bit known 695 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate 696 // vscale as VLENB / 8. 697 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, 698 DAG.getConstant(3, DL, VT)); 699 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); 700 } 701 } 702 } 703 704 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 705 SelectionDAG &DAG, unsigned Flags) { 706 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 707 } 708 709 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 710 SelectionDAG &DAG, unsigned Flags) { 711 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 712 Flags); 713 } 714 715 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 716 SelectionDAG &DAG, unsigned Flags) { 717 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 718 N->getOffset(), Flags); 719 } 720 721 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 722 SelectionDAG &DAG, unsigned Flags) { 723 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 724 } 725 726 template <class NodeTy> 727 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 728 bool IsLocal) const { 729 SDLoc DL(N); 730 EVT Ty = getPointerTy(DAG.getDataLayout()); 731 732 if (isPositionIndependent()) { 733 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 734 if (IsLocal) 735 // Use PC-relative addressing to access the symbol. This generates the 736 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 737 // %pcrel_lo(auipc)). 738 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 739 740 // Use PC-relative addressing to access the GOT for this symbol, then load 741 // the address from the GOT. This generates the pattern (PseudoLA sym), 742 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 743 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 744 } 745 746 switch (getTargetMachine().getCodeModel()) { 747 default: 748 report_fatal_error("Unsupported code model for lowering"); 749 case CodeModel::Small: { 750 // Generate a sequence for accessing addresses within the first 2 GiB of 751 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 752 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 753 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 754 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 755 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 756 } 757 case CodeModel::Medium: { 758 // Generate a sequence for accessing addresses within any 2GiB range within 759 // the address space. This generates the pattern (PseudoLLA sym), which 760 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 761 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 762 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 763 } 764 } 765 } 766 767 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 768 SelectionDAG &DAG) const { 769 SDLoc DL(Op); 770 EVT Ty = Op.getValueType(); 771 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 772 int64_t Offset = N->getOffset(); 773 MVT XLenVT = Subtarget.getXLenVT(); 774 775 const GlobalValue *GV = N->getGlobal(); 776 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 777 SDValue Addr = getAddr(N, DAG, IsLocal); 778 779 // In order to maximise the opportunity for common subexpression elimination, 780 // emit a separate ADD node for the global address offset instead of folding 781 // it in the global address node. Later peephole optimisations may choose to 782 // fold it back in when profitable. 783 if (Offset != 0) 784 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 785 DAG.getConstant(Offset, DL, XLenVT)); 786 return Addr; 787 } 788 789 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 790 SelectionDAG &DAG) const { 791 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 792 793 return getAddr(N, DAG); 794 } 795 796 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 797 SelectionDAG &DAG) const { 798 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 799 800 return getAddr(N, DAG); 801 } 802 803 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, 804 SelectionDAG &DAG) const { 805 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 806 807 return getAddr(N, DAG); 808 } 809 810 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 811 SelectionDAG &DAG, 812 bool UseGOT) const { 813 SDLoc DL(N); 814 EVT Ty = getPointerTy(DAG.getDataLayout()); 815 const GlobalValue *GV = N->getGlobal(); 816 MVT XLenVT = Subtarget.getXLenVT(); 817 818 if (UseGOT) { 819 // Use PC-relative addressing to access the GOT for this TLS symbol, then 820 // load the address from the GOT and add the thread pointer. This generates 821 // the pattern (PseudoLA_TLS_IE sym), which expands to 822 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 823 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 824 SDValue Load = 825 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 826 827 // Add the thread pointer. 828 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 829 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 830 } 831 832 // Generate a sequence for accessing the address relative to the thread 833 // pointer, with the appropriate adjustment for the thread pointer offset. 834 // This generates the pattern 835 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 836 SDValue AddrHi = 837 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 838 SDValue AddrAdd = 839 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 840 SDValue AddrLo = 841 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 842 843 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 844 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 845 SDValue MNAdd = SDValue( 846 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 847 0); 848 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 849 } 850 851 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 852 SelectionDAG &DAG) const { 853 SDLoc DL(N); 854 EVT Ty = getPointerTy(DAG.getDataLayout()); 855 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 856 const GlobalValue *GV = N->getGlobal(); 857 858 // Use a PC-relative addressing mode to access the global dynamic GOT address. 859 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 860 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 861 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 862 SDValue Load = 863 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 864 865 // Prepare argument list to generate call. 866 ArgListTy Args; 867 ArgListEntry Entry; 868 Entry.Node = Load; 869 Entry.Ty = CallTy; 870 Args.push_back(Entry); 871 872 // Setup call to __tls_get_addr. 873 TargetLowering::CallLoweringInfo CLI(DAG); 874 CLI.setDebugLoc(DL) 875 .setChain(DAG.getEntryNode()) 876 .setLibCallee(CallingConv::C, CallTy, 877 DAG.getExternalSymbol("__tls_get_addr", Ty), 878 std::move(Args)); 879 880 return LowerCallTo(CLI).first; 881 } 882 883 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 884 SelectionDAG &DAG) const { 885 SDLoc DL(Op); 886 EVT Ty = Op.getValueType(); 887 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 888 int64_t Offset = N->getOffset(); 889 MVT XLenVT = Subtarget.getXLenVT(); 890 891 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 892 893 if (DAG.getMachineFunction().getFunction().getCallingConv() == 894 CallingConv::GHC) 895 report_fatal_error("In GHC calling convention TLS is not supported"); 896 897 SDValue Addr; 898 switch (Model) { 899 case TLSModel::LocalExec: 900 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 901 break; 902 case TLSModel::InitialExec: 903 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 904 break; 905 case TLSModel::LocalDynamic: 906 case TLSModel::GeneralDynamic: 907 Addr = getDynamicTLSAddr(N, DAG); 908 break; 909 } 910 911 // In order to maximise the opportunity for common subexpression elimination, 912 // emit a separate ADD node for the global address offset instead of folding 913 // it in the global address node. Later peephole optimisations may choose to 914 // fold it back in when profitable. 915 if (Offset != 0) 916 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 917 DAG.getConstant(Offset, DL, XLenVT)); 918 return Addr; 919 } 920 921 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 922 SDValue CondV = Op.getOperand(0); 923 SDValue TrueV = Op.getOperand(1); 924 SDValue FalseV = Op.getOperand(2); 925 SDLoc DL(Op); 926 MVT XLenVT = Subtarget.getXLenVT(); 927 928 // If the result type is XLenVT and CondV is the output of a SETCC node 929 // which also operated on XLenVT inputs, then merge the SETCC node into the 930 // lowered RISCVISD::SELECT_CC to take advantage of the integer 931 // compare+branch instructions. i.e.: 932 // (select (setcc lhs, rhs, cc), truev, falsev) 933 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 934 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 935 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 936 SDValue LHS = CondV.getOperand(0); 937 SDValue RHS = CondV.getOperand(1); 938 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 939 ISD::CondCode CCVal = CC->get(); 940 941 normaliseSetCC(LHS, RHS, CCVal); 942 943 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 944 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 945 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 946 } 947 948 // Otherwise: 949 // (select condv, truev, falsev) 950 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 951 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 952 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 953 954 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 955 956 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 957 } 958 959 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 960 MachineFunction &MF = DAG.getMachineFunction(); 961 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 962 963 SDLoc DL(Op); 964 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 965 getPointerTy(MF.getDataLayout())); 966 967 // vastart just stores the address of the VarArgsFrameIndex slot into the 968 // memory location argument. 969 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 970 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 971 MachinePointerInfo(SV)); 972 } 973 974 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 975 SelectionDAG &DAG) const { 976 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 977 MachineFunction &MF = DAG.getMachineFunction(); 978 MachineFrameInfo &MFI = MF.getFrameInfo(); 979 MFI.setFrameAddressIsTaken(true); 980 Register FrameReg = RI.getFrameRegister(MF); 981 int XLenInBytes = Subtarget.getXLen() / 8; 982 983 EVT VT = Op.getValueType(); 984 SDLoc DL(Op); 985 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 986 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 987 while (Depth--) { 988 int Offset = -(XLenInBytes * 2); 989 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 990 DAG.getIntPtrConstant(Offset, DL)); 991 FrameAddr = 992 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 993 } 994 return FrameAddr; 995 } 996 997 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 998 SelectionDAG &DAG) const { 999 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1000 MachineFunction &MF = DAG.getMachineFunction(); 1001 MachineFrameInfo &MFI = MF.getFrameInfo(); 1002 MFI.setReturnAddressIsTaken(true); 1003 MVT XLenVT = Subtarget.getXLenVT(); 1004 int XLenInBytes = Subtarget.getXLen() / 8; 1005 1006 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1007 return SDValue(); 1008 1009 EVT VT = Op.getValueType(); 1010 SDLoc DL(Op); 1011 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1012 if (Depth) { 1013 int Off = -XLenInBytes; 1014 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 1015 SDValue Offset = DAG.getConstant(Off, DL, VT); 1016 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 1017 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 1018 MachinePointerInfo()); 1019 } 1020 1021 // Return the value of the return address register, marking it an implicit 1022 // live-in. 1023 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 1024 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 1025 } 1026 1027 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 1028 SelectionDAG &DAG) const { 1029 SDLoc DL(Op); 1030 SDValue Lo = Op.getOperand(0); 1031 SDValue Hi = Op.getOperand(1); 1032 SDValue Shamt = Op.getOperand(2); 1033 EVT VT = Lo.getValueType(); 1034 1035 // if Shamt-XLEN < 0: // Shamt < XLEN 1036 // Lo = Lo << Shamt 1037 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 1038 // else: 1039 // Lo = 0 1040 // Hi = Lo << (Shamt-XLEN) 1041 1042 SDValue Zero = DAG.getConstant(0, DL, VT); 1043 SDValue One = DAG.getConstant(1, DL, VT); 1044 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 1045 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 1046 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 1047 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 1048 1049 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 1050 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 1051 SDValue ShiftRightLo = 1052 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 1053 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 1054 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 1055 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 1056 1057 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 1058 1059 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 1060 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 1061 1062 SDValue Parts[2] = {Lo, Hi}; 1063 return DAG.getMergeValues(Parts, DL); 1064 } 1065 1066 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 1067 bool IsSRA) const { 1068 SDLoc DL(Op); 1069 SDValue Lo = Op.getOperand(0); 1070 SDValue Hi = Op.getOperand(1); 1071 SDValue Shamt = Op.getOperand(2); 1072 EVT VT = Lo.getValueType(); 1073 1074 // SRA expansion: 1075 // if Shamt-XLEN < 0: // Shamt < XLEN 1076 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 1077 // Hi = Hi >>s Shamt 1078 // else: 1079 // Lo = Hi >>s (Shamt-XLEN); 1080 // Hi = Hi >>s (XLEN-1) 1081 // 1082 // SRL expansion: 1083 // if Shamt-XLEN < 0: // Shamt < XLEN 1084 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 1085 // Hi = Hi >>u Shamt 1086 // else: 1087 // Lo = Hi >>u (Shamt-XLEN); 1088 // Hi = 0; 1089 1090 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 1091 1092 SDValue Zero = DAG.getConstant(0, DL, VT); 1093 SDValue One = DAG.getConstant(1, DL, VT); 1094 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 1095 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 1096 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 1097 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 1098 1099 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 1100 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 1101 SDValue ShiftLeftHi = 1102 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 1103 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 1104 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 1105 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 1106 SDValue HiFalse = 1107 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 1108 1109 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 1110 1111 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 1112 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 1113 1114 SDValue Parts[2] = {Lo, Hi}; 1115 return DAG.getMergeValues(Parts, DL); 1116 } 1117 1118 // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is 1119 // illegal (currently only vXi64 RV32). 1120 // FIXME: We could also catch non-constant sign-extended i32 values and lower 1121 // them to SPLAT_VECTOR_I64 1122 SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op, 1123 SelectionDAG &DAG) const { 1124 SDLoc DL(Op); 1125 EVT VecVT = Op.getValueType(); 1126 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && 1127 "Unexpected SPLAT_VECTOR lowering"); 1128 SDValue SplatVal = Op.getOperand(0); 1129 1130 // If we can prove that the value is a sign-extended 32-bit value, lower this 1131 // as a custom node in order to try and match RVV vector/scalar instructions. 1132 if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) { 1133 if (isInt<32>(CVal->getSExtValue())) 1134 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, 1135 DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32)); 1136 } 1137 1138 // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not 1139 // to accidentally sign-extend the 32-bit halves to the e64 SEW: 1140 // vmv.v.x vX, hi 1141 // vsll.vx vX, vX, /*32*/ 1142 // vmv.v.x vY, lo 1143 // vsll.vx vY, vY, /*32*/ 1144 // vsrl.vx vY, vY, /*32*/ 1145 // vor.vv vX, vX, vY 1146 SDValue One = DAG.getConstant(1, DL, MVT::i32); 1147 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 1148 SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT); 1149 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero); 1150 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One); 1151 1152 Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 1153 Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV); 1154 Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV); 1155 1156 if (isNullConstant(Hi)) 1157 return Lo; 1158 1159 Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi); 1160 Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV); 1161 1162 return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); 1163 } 1164 1165 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 1166 SelectionDAG &DAG) const { 1167 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1168 SDLoc DL(Op); 1169 1170 if (Subtarget.hasStdExtV()) { 1171 // Some RVV intrinsics may claim that they want an integer operand to be 1172 // extended. 1173 if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 1174 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { 1175 if (II->ExtendedOperand) { 1176 assert(II->ExtendedOperand < Op.getNumOperands()); 1177 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 1178 SDValue &ScalarOp = Operands[II->ExtendedOperand]; 1179 EVT OpVT = ScalarOp.getValueType(); 1180 if (OpVT == MVT::i8 || OpVT == MVT::i16 || 1181 (OpVT == MVT::i32 && Subtarget.is64Bit())) { 1182 // If the operand is a constant, sign extend to increase our chances 1183 // of being able to use a .vi instruction. ANY_EXTEND would become a 1184 // a zero extend and the simm5 check in isel would fail. 1185 // FIXME: Should we ignore the upper bits in isel instead? 1186 unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND 1187 : ISD::ANY_EXTEND; 1188 ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); 1189 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), 1190 Operands); 1191 } 1192 } 1193 } 1194 } 1195 1196 switch (IntNo) { 1197 default: 1198 return SDValue(); // Don't custom lower most intrinsics. 1199 case Intrinsic::thread_pointer: { 1200 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 1201 return DAG.getRegister(RISCV::X4, PtrVT); 1202 } 1203 case Intrinsic::riscv_vmv_x_s: 1204 assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!"); 1205 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), 1206 Op.getOperand(1)); 1207 } 1208 } 1209 1210 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 1211 SelectionDAG &DAG) const { 1212 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1213 SDLoc DL(Op); 1214 1215 if (Subtarget.hasStdExtV()) { 1216 // Some RVV intrinsics may claim that they want an integer operand to be 1217 // extended. 1218 if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 1219 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { 1220 if (II->ExtendedOperand) { 1221 // The operands start from the second argument in INTRINSIC_W_CHAIN. 1222 unsigned ExtendOp = II->ExtendedOperand + 1; 1223 assert(ExtendOp < Op.getNumOperands()); 1224 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 1225 SDValue &ScalarOp = Operands[ExtendOp]; 1226 EVT OpVT = ScalarOp.getValueType(); 1227 if (OpVT == MVT::i8 || OpVT == MVT::i16 || 1228 (OpVT == MVT::i32 && Subtarget.is64Bit())) { 1229 // If the operand is a constant, sign extend to increase our chances 1230 // of being able to use a .vi instruction. ANY_EXTEND would become a 1231 // a zero extend and the simm5 check in isel would fail. 1232 // FIXME: Should we ignore the upper bits in isel instead? 1233 unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND 1234 : ISD::ANY_EXTEND; 1235 ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); 1236 return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), 1237 Operands); 1238 } 1239 } 1240 } 1241 } 1242 1243 return SDValue(); 1244 } 1245 1246 // Returns the opcode of the target-specific SDNode that implements the 32-bit 1247 // form of the given Opcode. 1248 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 1249 switch (Opcode) { 1250 default: 1251 llvm_unreachable("Unexpected opcode"); 1252 case ISD::SHL: 1253 return RISCVISD::SLLW; 1254 case ISD::SRA: 1255 return RISCVISD::SRAW; 1256 case ISD::SRL: 1257 return RISCVISD::SRLW; 1258 case ISD::SDIV: 1259 return RISCVISD::DIVW; 1260 case ISD::UDIV: 1261 return RISCVISD::DIVUW; 1262 case ISD::UREM: 1263 return RISCVISD::REMUW; 1264 case ISD::ROTL: 1265 return RISCVISD::ROLW; 1266 case ISD::ROTR: 1267 return RISCVISD::RORW; 1268 case RISCVISD::GREVI: 1269 return RISCVISD::GREVIW; 1270 case RISCVISD::GORCI: 1271 return RISCVISD::GORCIW; 1272 } 1273 } 1274 1275 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 1276 // Because i32 isn't a legal type for RV64, these operations would otherwise 1277 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 1278 // later one because the fact the operation was originally of type i32 is 1279 // lost. 1280 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) { 1281 SDLoc DL(N); 1282 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 1283 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1284 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 1285 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 1286 // ReplaceNodeResults requires we maintain the same type for the return value. 1287 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 1288 } 1289 1290 // Converts the given 32-bit operation to a i64 operation with signed extension 1291 // semantic to reduce the signed extension instructions. 1292 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 1293 SDLoc DL(N); 1294 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1295 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 1296 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 1297 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 1298 DAG.getValueType(MVT::i32)); 1299 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 1300 } 1301 1302 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 1303 SmallVectorImpl<SDValue> &Results, 1304 SelectionDAG &DAG) const { 1305 SDLoc DL(N); 1306 switch (N->getOpcode()) { 1307 default: 1308 llvm_unreachable("Don't know how to custom type legalize this operation!"); 1309 case ISD::STRICT_FP_TO_SINT: 1310 case ISD::STRICT_FP_TO_UINT: 1311 case ISD::FP_TO_SINT: 1312 case ISD::FP_TO_UINT: { 1313 bool IsStrict = N->isStrictFPOpcode(); 1314 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1315 "Unexpected custom legalisation"); 1316 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 1317 // If the FP type needs to be softened, emit a library call using the 'si' 1318 // version. If we left it to default legalization we'd end up with 'di'. If 1319 // the FP type doesn't need to be softened just let generic type 1320 // legalization promote the result type. 1321 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 1322 TargetLowering::TypeSoftenFloat) 1323 return; 1324 RTLIB::Libcall LC; 1325 if (N->getOpcode() == ISD::FP_TO_SINT || 1326 N->getOpcode() == ISD::STRICT_FP_TO_SINT) 1327 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 1328 else 1329 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 1330 MakeLibCallOptions CallOptions; 1331 EVT OpVT = Op0.getValueType(); 1332 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 1333 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 1334 SDValue Result; 1335 std::tie(Result, Chain) = 1336 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 1337 Results.push_back(Result); 1338 if (IsStrict) 1339 Results.push_back(Chain); 1340 break; 1341 } 1342 case ISD::READCYCLECOUNTER: { 1343 assert(!Subtarget.is64Bit() && 1344 "READCYCLECOUNTER only has custom type legalization on riscv32"); 1345 1346 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 1347 SDValue RCW = 1348 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 1349 1350 Results.push_back( 1351 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 1352 Results.push_back(RCW.getValue(2)); 1353 break; 1354 } 1355 case ISD::ADD: 1356 case ISD::SUB: 1357 case ISD::MUL: 1358 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1359 "Unexpected custom legalisation"); 1360 if (N->getOperand(1).getOpcode() == ISD::Constant) 1361 return; 1362 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 1363 break; 1364 case ISD::SHL: 1365 case ISD::SRA: 1366 case ISD::SRL: 1367 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1368 "Unexpected custom legalisation"); 1369 if (N->getOperand(1).getOpcode() == ISD::Constant) 1370 return; 1371 Results.push_back(customLegalizeToWOp(N, DAG)); 1372 break; 1373 case ISD::ROTL: 1374 case ISD::ROTR: 1375 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1376 "Unexpected custom legalisation"); 1377 Results.push_back(customLegalizeToWOp(N, DAG)); 1378 break; 1379 case ISD::SDIV: 1380 case ISD::UDIV: 1381 case ISD::UREM: 1382 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1383 Subtarget.hasStdExtM() && "Unexpected custom legalisation"); 1384 if (N->getOperand(0).getOpcode() == ISD::Constant || 1385 N->getOperand(1).getOpcode() == ISD::Constant) 1386 return; 1387 Results.push_back(customLegalizeToWOp(N, DAG)); 1388 break; 1389 case ISD::BITCAST: { 1390 assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1391 Subtarget.hasStdExtF()) || 1392 (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && 1393 "Unexpected custom legalisation"); 1394 SDValue Op0 = N->getOperand(0); 1395 if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) { 1396 if (Op0.getValueType() != MVT::f16) 1397 return; 1398 SDValue FPConv = 1399 DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0); 1400 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 1401 } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1402 Subtarget.hasStdExtF()) { 1403 if (Op0.getValueType() != MVT::f32) 1404 return; 1405 SDValue FPConv = 1406 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 1407 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 1408 } 1409 break; 1410 } 1411 case RISCVISD::GREVI: 1412 case RISCVISD::GORCI: { 1413 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1414 "Unexpected custom legalisation"); 1415 // This is similar to customLegalizeToWOp, except that we pass the second 1416 // operand (a TargetConstant) straight through: it is already of type 1417 // XLenVT. 1418 SDLoc DL(N); 1419 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 1420 SDValue NewOp0 = 1421 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1422 SDValue NewRes = 1423 DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1)); 1424 // ReplaceNodeResults requires we maintain the same type for the return 1425 // value. 1426 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 1427 break; 1428 } 1429 case ISD::BSWAP: 1430 case ISD::BITREVERSE: { 1431 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1432 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 1433 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 1434 N->getOperand(0)); 1435 unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24; 1436 SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0, 1437 DAG.getTargetConstant(Imm, DL, 1438 Subtarget.getXLenVT())); 1439 // ReplaceNodeResults requires we maintain the same type for the return 1440 // value. 1441 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW)); 1442 break; 1443 } 1444 case ISD::FSHL: 1445 case ISD::FSHR: { 1446 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 1447 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 1448 SDValue NewOp0 = 1449 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 1450 SDValue NewOp1 = 1451 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 1452 SDValue NewOp2 = 1453 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 1454 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 1455 // Mask the shift amount to 5 bits. 1456 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 1457 DAG.getConstant(0x1f, DL, MVT::i64)); 1458 unsigned Opc = 1459 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; 1460 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); 1461 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 1462 break; 1463 } 1464 case ISD::INTRINSIC_WO_CHAIN: { 1465 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 1466 switch (IntNo) { 1467 default: 1468 llvm_unreachable( 1469 "Don't know how to custom type legalize this intrinsic!"); 1470 case Intrinsic::riscv_vmv_x_s: { 1471 EVT VT = N->getValueType(0); 1472 assert((VT == MVT::i8 || VT == MVT::i16 || 1473 (Subtarget.is64Bit() && VT == MVT::i32)) && 1474 "Unexpected custom legalisation!"); 1475 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, 1476 Subtarget.getXLenVT(), N->getOperand(1)); 1477 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); 1478 break; 1479 } 1480 } 1481 break; 1482 } 1483 } 1484 } 1485 1486 // A structure to hold one of the bit-manipulation patterns below. Together, a 1487 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 1488 // (or (and (shl x, 1), 0xAAAAAAAA), 1489 // (and (srl x, 1), 0x55555555)) 1490 struct RISCVBitmanipPat { 1491 SDValue Op; 1492 unsigned ShAmt; 1493 bool IsSHL; 1494 1495 bool formsPairWith(const RISCVBitmanipPat &Other) const { 1496 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 1497 } 1498 }; 1499 1500 // Matches any of the following bit-manipulation patterns: 1501 // (and (shl x, 1), (0x55555555 << 1)) 1502 // (and (srl x, 1), 0x55555555) 1503 // (shl (and x, 0x55555555), 1) 1504 // (srl (and x, (0x55555555 << 1)), 1) 1505 // where the shift amount and mask may vary thus: 1506 // [1] = 0x55555555 / 0xAAAAAAAA 1507 // [2] = 0x33333333 / 0xCCCCCCCC 1508 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 1509 // [8] = 0x00FF00FF / 0xFF00FF00 1510 // [16] = 0x0000FFFF / 0xFFFFFFFF 1511 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 1512 static Optional<RISCVBitmanipPat> matchRISCVBitmanipPat(SDValue Op) { 1513 Optional<uint64_t> Mask; 1514 // Optionally consume a mask around the shift operation. 1515 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 1516 Mask = Op.getConstantOperandVal(1); 1517 Op = Op.getOperand(0); 1518 } 1519 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 1520 return None; 1521 bool IsSHL = Op.getOpcode() == ISD::SHL; 1522 1523 if (!isa<ConstantSDNode>(Op.getOperand(1))) 1524 return None; 1525 auto ShAmt = Op.getConstantOperandVal(1); 1526 1527 if (!isPowerOf2_64(ShAmt)) 1528 return None; 1529 1530 // These are the unshifted masks which we use to match bit-manipulation 1531 // patterns. They may be shifted left in certain circumstances. 1532 static const uint64_t BitmanipMasks[] = { 1533 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 1534 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL, 1535 }; 1536 1537 unsigned MaskIdx = Log2_64(ShAmt); 1538 if (MaskIdx >= array_lengthof(BitmanipMasks)) 1539 return None; 1540 1541 auto Src = Op.getOperand(0); 1542 1543 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 1544 auto ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 1545 1546 // The expected mask is shifted left when the AND is found around SHL 1547 // patterns. 1548 // ((x >> 1) & 0x55555555) 1549 // ((x << 1) & 0xAAAAAAAA) 1550 bool SHLExpMask = IsSHL; 1551 1552 if (!Mask) { 1553 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 1554 // the mask is all ones: consume that now. 1555 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 1556 Mask = Src.getConstantOperandVal(1); 1557 Src = Src.getOperand(0); 1558 // The expected mask is now in fact shifted left for SRL, so reverse the 1559 // decision. 1560 // ((x & 0xAAAAAAAA) >> 1) 1561 // ((x & 0x55555555) << 1) 1562 SHLExpMask = !SHLExpMask; 1563 } else { 1564 // Use a default shifted mask of all-ones if there's no AND, truncated 1565 // down to the expected width. This simplifies the logic later on. 1566 Mask = maskTrailingOnes<uint64_t>(Width); 1567 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 1568 } 1569 } 1570 1571 if (SHLExpMask) 1572 ExpMask <<= ShAmt; 1573 1574 if (Mask != ExpMask) 1575 return None; 1576 1577 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 1578 } 1579 1580 // Match the following pattern as a GREVI(W) operation 1581 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 1582 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 1583 const RISCVSubtarget &Subtarget) { 1584 EVT VT = Op.getValueType(); 1585 1586 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 1587 auto LHS = matchRISCVBitmanipPat(Op.getOperand(0)); 1588 auto RHS = matchRISCVBitmanipPat(Op.getOperand(1)); 1589 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 1590 SDLoc DL(Op); 1591 return DAG.getNode( 1592 RISCVISD::GREVI, DL, VT, LHS->Op, 1593 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 1594 } 1595 } 1596 return SDValue(); 1597 } 1598 1599 // Matches any the following pattern as a GORCI(W) operation 1600 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 1601 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 1602 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 1603 // Note that with the variant of 3., 1604 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 1605 // the inner pattern will first be matched as GREVI and then the outer 1606 // pattern will be matched to GORC via the first rule above. 1607 // 4. (or (rotl/rotr x, bitwidth/2), x) 1608 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 1609 const RISCVSubtarget &Subtarget) { 1610 EVT VT = Op.getValueType(); 1611 1612 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 1613 SDLoc DL(Op); 1614 SDValue Op0 = Op.getOperand(0); 1615 SDValue Op1 = Op.getOperand(1); 1616 1617 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 1618 if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X && 1619 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 1620 return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1)); 1621 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 1622 if ((Reverse.getOpcode() == ISD::ROTL || 1623 Reverse.getOpcode() == ISD::ROTR) && 1624 Reverse.getOperand(0) == X && 1625 isa<ConstantSDNode>(Reverse.getOperand(1))) { 1626 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 1627 if (RotAmt == (VT.getSizeInBits() / 2)) 1628 return DAG.getNode( 1629 RISCVISD::GORCI, DL, VT, X, 1630 DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT())); 1631 } 1632 return SDValue(); 1633 }; 1634 1635 // Check for either commutable permutation of (or (GREVI x, shamt), x) 1636 if (SDValue V = MatchOROfReverse(Op0, Op1)) 1637 return V; 1638 if (SDValue V = MatchOROfReverse(Op1, Op0)) 1639 return V; 1640 1641 // OR is commutable so canonicalize its OR operand to the left 1642 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 1643 std::swap(Op0, Op1); 1644 if (Op0.getOpcode() != ISD::OR) 1645 return SDValue(); 1646 SDValue OrOp0 = Op0.getOperand(0); 1647 SDValue OrOp1 = Op0.getOperand(1); 1648 auto LHS = matchRISCVBitmanipPat(OrOp0); 1649 // OR is commutable so swap the operands and try again: x might have been 1650 // on the left 1651 if (!LHS) { 1652 std::swap(OrOp0, OrOp1); 1653 LHS = matchRISCVBitmanipPat(OrOp0); 1654 } 1655 auto RHS = matchRISCVBitmanipPat(Op1); 1656 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 1657 return DAG.getNode( 1658 RISCVISD::GORCI, DL, VT, LHS->Op, 1659 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 1660 } 1661 } 1662 return SDValue(); 1663 } 1664 1665 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 1666 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 1667 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 1668 // not undo itself, but they are redundant. 1669 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 1670 unsigned ShAmt1 = N->getConstantOperandVal(1); 1671 SDValue Src = N->getOperand(0); 1672 1673 if (Src.getOpcode() != N->getOpcode()) 1674 return SDValue(); 1675 1676 unsigned ShAmt2 = Src.getConstantOperandVal(1); 1677 Src = Src.getOperand(0); 1678 1679 unsigned CombinedShAmt; 1680 if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW) 1681 CombinedShAmt = ShAmt1 | ShAmt2; 1682 else 1683 CombinedShAmt = ShAmt1 ^ ShAmt2; 1684 1685 if (CombinedShAmt == 0) 1686 return Src; 1687 1688 SDLoc DL(N); 1689 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src, 1690 DAG.getTargetConstant(CombinedShAmt, DL, 1691 N->getOperand(1).getValueType())); 1692 } 1693 1694 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 1695 DAGCombinerInfo &DCI) const { 1696 SelectionDAG &DAG = DCI.DAG; 1697 1698 switch (N->getOpcode()) { 1699 default: 1700 break; 1701 case RISCVISD::SplitF64: { 1702 SDValue Op0 = N->getOperand(0); 1703 // If the input to SplitF64 is just BuildPairF64 then the operation is 1704 // redundant. Instead, use BuildPairF64's operands directly. 1705 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 1706 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 1707 1708 SDLoc DL(N); 1709 1710 // It's cheaper to materialise two 32-bit integers than to load a double 1711 // from the constant pool and transfer it to integer registers through the 1712 // stack. 1713 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 1714 APInt V = C->getValueAPF().bitcastToAPInt(); 1715 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 1716 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 1717 return DCI.CombineTo(N, Lo, Hi); 1718 } 1719 1720 // This is a target-specific version of a DAGCombine performed in 1721 // DAGCombiner::visitBITCAST. It performs the equivalent of: 1722 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1723 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1724 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1725 !Op0.getNode()->hasOneUse()) 1726 break; 1727 SDValue NewSplitF64 = 1728 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 1729 Op0.getOperand(0)); 1730 SDValue Lo = NewSplitF64.getValue(0); 1731 SDValue Hi = NewSplitF64.getValue(1); 1732 APInt SignBit = APInt::getSignMask(32); 1733 if (Op0.getOpcode() == ISD::FNEG) { 1734 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 1735 DAG.getConstant(SignBit, DL, MVT::i32)); 1736 return DCI.CombineTo(N, Lo, NewHi); 1737 } 1738 assert(Op0.getOpcode() == ISD::FABS); 1739 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 1740 DAG.getConstant(~SignBit, DL, MVT::i32)); 1741 return DCI.CombineTo(N, Lo, NewHi); 1742 } 1743 case RISCVISD::SLLW: 1744 case RISCVISD::SRAW: 1745 case RISCVISD::SRLW: 1746 case RISCVISD::ROLW: 1747 case RISCVISD::RORW: { 1748 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 1749 SDValue LHS = N->getOperand(0); 1750 SDValue RHS = N->getOperand(1); 1751 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 1752 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 1753 if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || 1754 SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { 1755 if (N->getOpcode() != ISD::DELETED_NODE) 1756 DCI.AddToWorklist(N); 1757 return SDValue(N, 0); 1758 } 1759 break; 1760 } 1761 case RISCVISD::FSLW: 1762 case RISCVISD::FSRW: { 1763 // Only the lower 32 bits of Values and lower 6 bits of shift amount are 1764 // read. 1765 SDValue Op0 = N->getOperand(0); 1766 SDValue Op1 = N->getOperand(1); 1767 SDValue ShAmt = N->getOperand(2); 1768 APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 1769 APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); 1770 if (SimplifyDemandedBits(Op0, OpMask, DCI) || 1771 SimplifyDemandedBits(Op1, OpMask, DCI) || 1772 SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 1773 if (N->getOpcode() != ISD::DELETED_NODE) 1774 DCI.AddToWorklist(N); 1775 return SDValue(N, 0); 1776 } 1777 break; 1778 } 1779 case RISCVISD::GREVIW: 1780 case RISCVISD::GORCIW: { 1781 // Only the lower 32 bits of the first operand are read 1782 SDValue Op0 = N->getOperand(0); 1783 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 1784 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 1785 if (N->getOpcode() != ISD::DELETED_NODE) 1786 DCI.AddToWorklist(N); 1787 return SDValue(N, 0); 1788 } 1789 1790 return combineGREVI_GORCI(N, DCI.DAG); 1791 } 1792 case RISCVISD::FMV_X_ANYEXTW_RV64: { 1793 SDLoc DL(N); 1794 SDValue Op0 = N->getOperand(0); 1795 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 1796 // conversion is unnecessary and can be replaced with an ANY_EXTEND 1797 // of the FMV_W_X_RV64 operand. 1798 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 1799 assert(Op0.getOperand(0).getValueType() == MVT::i64 && 1800 "Unexpected value type!"); 1801 return Op0.getOperand(0); 1802 } 1803 1804 // This is a target-specific version of a DAGCombine performed in 1805 // DAGCombiner::visitBITCAST. It performs the equivalent of: 1806 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 1807 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 1808 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 1809 !Op0.getNode()->hasOneUse()) 1810 break; 1811 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 1812 Op0.getOperand(0)); 1813 APInt SignBit = APInt::getSignMask(32).sext(64); 1814 if (Op0.getOpcode() == ISD::FNEG) 1815 return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 1816 DAG.getConstant(SignBit, DL, MVT::i64)); 1817 1818 assert(Op0.getOpcode() == ISD::FABS); 1819 return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 1820 DAG.getConstant(~SignBit, DL, MVT::i64)); 1821 } 1822 case RISCVISD::GREVI: 1823 case RISCVISD::GORCI: 1824 return combineGREVI_GORCI(N, DCI.DAG); 1825 case ISD::OR: 1826 if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget)) 1827 return GREV; 1828 if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget)) 1829 return GORC; 1830 break; 1831 case RISCVISD::SELECT_CC: { 1832 // Transform 1833 // (select_cc (xor X, 1), 0, setne, trueV, falseV) -> 1834 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. 1835 // This can occur when legalizing some floating point comparisons. 1836 SDValue LHS = N->getOperand(0); 1837 SDValue RHS = N->getOperand(1); 1838 auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2)); 1839 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 1840 if ((CCVal == ISD::SETNE || CCVal == ISD::SETEQ) && isNullConstant(RHS) && 1841 LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) && 1842 DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) { 1843 SDLoc DL(N); 1844 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 1845 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 1846 return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0), 1847 {LHS.getOperand(0), RHS, TargetCC, N->getOperand(3), 1848 N->getOperand(4)}); 1849 } 1850 break; 1851 } 1852 } 1853 1854 return SDValue(); 1855 } 1856 1857 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 1858 const SDNode *N, CombineLevel Level) const { 1859 // The following folds are only desirable if `(OP _, c1 << c2)` can be 1860 // materialised in fewer instructions than `(OP _, c1)`: 1861 // 1862 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 1863 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 1864 SDValue N0 = N->getOperand(0); 1865 EVT Ty = N0.getValueType(); 1866 if (Ty.isScalarInteger() && 1867 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 1868 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 1869 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 1870 if (C1 && C2) { 1871 APInt C1Int = C1->getAPIntValue(); 1872 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 1873 1874 // We can materialise `c1 << c2` into an add immediate, so it's "free", 1875 // and the combine should happen, to potentially allow further combines 1876 // later. 1877 if (ShiftedC1Int.getMinSignedBits() <= 64 && 1878 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 1879 return true; 1880 1881 // We can materialise `c1` in an add immediate, so it's "free", and the 1882 // combine should be prevented. 1883 if (C1Int.getMinSignedBits() <= 64 && 1884 isLegalAddImmediate(C1Int.getSExtValue())) 1885 return false; 1886 1887 // Neither constant will fit into an immediate, so find materialisation 1888 // costs. 1889 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 1890 Subtarget.is64Bit()); 1891 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 1892 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 1893 1894 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 1895 // combine should be prevented. 1896 if (C1Cost < ShiftedC1Cost) 1897 return false; 1898 } 1899 } 1900 return true; 1901 } 1902 1903 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1904 KnownBits &Known, 1905 const APInt &DemandedElts, 1906 const SelectionDAG &DAG, 1907 unsigned Depth) const { 1908 unsigned Opc = Op.getOpcode(); 1909 assert((Opc >= ISD::BUILTIN_OP_END || 1910 Opc == ISD::INTRINSIC_WO_CHAIN || 1911 Opc == ISD::INTRINSIC_W_CHAIN || 1912 Opc == ISD::INTRINSIC_VOID) && 1913 "Should use MaskedValueIsZero if you don't know whether Op" 1914 " is a target node!"); 1915 1916 Known.resetAll(); 1917 switch (Opc) { 1918 default: break; 1919 case RISCVISD::READ_VLENB: 1920 // We assume VLENB is at least 8 bytes. 1921 // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits. 1922 Known.Zero.setLowBits(3); 1923 break; 1924 } 1925 } 1926 1927 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 1928 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 1929 unsigned Depth) const { 1930 switch (Op.getOpcode()) { 1931 default: 1932 break; 1933 case RISCVISD::SLLW: 1934 case RISCVISD::SRAW: 1935 case RISCVISD::SRLW: 1936 case RISCVISD::DIVW: 1937 case RISCVISD::DIVUW: 1938 case RISCVISD::REMUW: 1939 case RISCVISD::ROLW: 1940 case RISCVISD::RORW: 1941 case RISCVISD::GREVIW: 1942 case RISCVISD::GORCIW: 1943 case RISCVISD::FSLW: 1944 case RISCVISD::FSRW: 1945 // TODO: As the result is sign-extended, this is conservatively correct. A 1946 // more precise answer could be calculated for SRAW depending on known 1947 // bits in the shift amount. 1948 return 33; 1949 case RISCVISD::VMV_X_S: 1950 // The number of sign bits of the scalar result is computed by obtaining the 1951 // element type of the input vector operand, substracting its width from the 1952 // XLEN, and then adding one (sign bit within the element type). 1953 return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; 1954 } 1955 1956 return 1; 1957 } 1958 1959 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 1960 MachineBasicBlock *BB) { 1961 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 1962 1963 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 1964 // Should the count have wrapped while it was being read, we need to try 1965 // again. 1966 // ... 1967 // read: 1968 // rdcycleh x3 # load high word of cycle 1969 // rdcycle x2 # load low word of cycle 1970 // rdcycleh x4 # load high word of cycle 1971 // bne x3, x4, read # check if high word reads match, otherwise try again 1972 // ... 1973 1974 MachineFunction &MF = *BB->getParent(); 1975 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1976 MachineFunction::iterator It = ++BB->getIterator(); 1977 1978 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1979 MF.insert(It, LoopMBB); 1980 1981 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 1982 MF.insert(It, DoneMBB); 1983 1984 // Transfer the remainder of BB and its successor edges to DoneMBB. 1985 DoneMBB->splice(DoneMBB->begin(), BB, 1986 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1987 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 1988 1989 BB->addSuccessor(LoopMBB); 1990 1991 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1992 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 1993 Register LoReg = MI.getOperand(0).getReg(); 1994 Register HiReg = MI.getOperand(1).getReg(); 1995 DebugLoc DL = MI.getDebugLoc(); 1996 1997 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 1998 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 1999 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 2000 .addReg(RISCV::X0); 2001 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 2002 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 2003 .addReg(RISCV::X0); 2004 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 2005 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 2006 .addReg(RISCV::X0); 2007 2008 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 2009 .addReg(HiReg) 2010 .addReg(ReadAgainReg) 2011 .addMBB(LoopMBB); 2012 2013 LoopMBB->addSuccessor(LoopMBB); 2014 LoopMBB->addSuccessor(DoneMBB); 2015 2016 MI.eraseFromParent(); 2017 2018 return DoneMBB; 2019 } 2020 2021 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 2022 MachineBasicBlock *BB) { 2023 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 2024 2025 MachineFunction &MF = *BB->getParent(); 2026 DebugLoc DL = MI.getDebugLoc(); 2027 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2028 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 2029 Register LoReg = MI.getOperand(0).getReg(); 2030 Register HiReg = MI.getOperand(1).getReg(); 2031 Register SrcReg = MI.getOperand(2).getReg(); 2032 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 2033 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 2034 2035 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 2036 RI); 2037 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 2038 MachineMemOperand *MMOLo = 2039 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 2040 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 2041 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 2042 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 2043 .addFrameIndex(FI) 2044 .addImm(0) 2045 .addMemOperand(MMOLo); 2046 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 2047 .addFrameIndex(FI) 2048 .addImm(4) 2049 .addMemOperand(MMOHi); 2050 MI.eraseFromParent(); // The pseudo instruction is gone now. 2051 return BB; 2052 } 2053 2054 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 2055 MachineBasicBlock *BB) { 2056 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 2057 "Unexpected instruction"); 2058 2059 MachineFunction &MF = *BB->getParent(); 2060 DebugLoc DL = MI.getDebugLoc(); 2061 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2062 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 2063 Register DstReg = MI.getOperand(0).getReg(); 2064 Register LoReg = MI.getOperand(1).getReg(); 2065 Register HiReg = MI.getOperand(2).getReg(); 2066 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 2067 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 2068 2069 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 2070 MachineMemOperand *MMOLo = 2071 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 2072 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 2073 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 2074 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 2075 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 2076 .addFrameIndex(FI) 2077 .addImm(0) 2078 .addMemOperand(MMOLo); 2079 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 2080 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 2081 .addFrameIndex(FI) 2082 .addImm(4) 2083 .addMemOperand(MMOHi); 2084 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 2085 MI.eraseFromParent(); // The pseudo instruction is gone now. 2086 return BB; 2087 } 2088 2089 static bool isSelectPseudo(MachineInstr &MI) { 2090 switch (MI.getOpcode()) { 2091 default: 2092 return false; 2093 case RISCV::Select_GPR_Using_CC_GPR: 2094 case RISCV::Select_FPR16_Using_CC_GPR: 2095 case RISCV::Select_FPR32_Using_CC_GPR: 2096 case RISCV::Select_FPR64_Using_CC_GPR: 2097 return true; 2098 } 2099 } 2100 2101 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 2102 MachineBasicBlock *BB) { 2103 // To "insert" Select_* instructions, we actually have to insert the triangle 2104 // control-flow pattern. The incoming instructions know the destination vreg 2105 // to set, the condition code register to branch on, the true/false values to 2106 // select between, and the condcode to use to select the appropriate branch. 2107 // 2108 // We produce the following control flow: 2109 // HeadMBB 2110 // | \ 2111 // | IfFalseMBB 2112 // | / 2113 // TailMBB 2114 // 2115 // When we find a sequence of selects we attempt to optimize their emission 2116 // by sharing the control flow. Currently we only handle cases where we have 2117 // multiple selects with the exact same condition (same LHS, RHS and CC). 2118 // The selects may be interleaved with other instructions if the other 2119 // instructions meet some requirements we deem safe: 2120 // - They are debug instructions. Otherwise, 2121 // - They do not have side-effects, do not access memory and their inputs do 2122 // not depend on the results of the select pseudo-instructions. 2123 // The TrueV/FalseV operands of the selects cannot depend on the result of 2124 // previous selects in the sequence. 2125 // These conditions could be further relaxed. See the X86 target for a 2126 // related approach and more information. 2127 Register LHS = MI.getOperand(1).getReg(); 2128 Register RHS = MI.getOperand(2).getReg(); 2129 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 2130 2131 SmallVector<MachineInstr *, 4> SelectDebugValues; 2132 SmallSet<Register, 4> SelectDests; 2133 SelectDests.insert(MI.getOperand(0).getReg()); 2134 2135 MachineInstr *LastSelectPseudo = &MI; 2136 2137 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 2138 SequenceMBBI != E; ++SequenceMBBI) { 2139 if (SequenceMBBI->isDebugInstr()) 2140 continue; 2141 else if (isSelectPseudo(*SequenceMBBI)) { 2142 if (SequenceMBBI->getOperand(1).getReg() != LHS || 2143 SequenceMBBI->getOperand(2).getReg() != RHS || 2144 SequenceMBBI->getOperand(3).getImm() != CC || 2145 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 2146 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 2147 break; 2148 LastSelectPseudo = &*SequenceMBBI; 2149 SequenceMBBI->collectDebugValues(SelectDebugValues); 2150 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 2151 } else { 2152 if (SequenceMBBI->hasUnmodeledSideEffects() || 2153 SequenceMBBI->mayLoadOrStore()) 2154 break; 2155 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 2156 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 2157 })) 2158 break; 2159 } 2160 } 2161 2162 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 2163 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 2164 DebugLoc DL = MI.getDebugLoc(); 2165 MachineFunction::iterator I = ++BB->getIterator(); 2166 2167 MachineBasicBlock *HeadMBB = BB; 2168 MachineFunction *F = BB->getParent(); 2169 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 2170 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 2171 2172 F->insert(I, IfFalseMBB); 2173 F->insert(I, TailMBB); 2174 2175 // Transfer debug instructions associated with the selects to TailMBB. 2176 for (MachineInstr *DebugInstr : SelectDebugValues) { 2177 TailMBB->push_back(DebugInstr->removeFromParent()); 2178 } 2179 2180 // Move all instructions after the sequence to TailMBB. 2181 TailMBB->splice(TailMBB->end(), HeadMBB, 2182 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 2183 // Update machine-CFG edges by transferring all successors of the current 2184 // block to the new block which will contain the Phi nodes for the selects. 2185 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 2186 // Set the successors for HeadMBB. 2187 HeadMBB->addSuccessor(IfFalseMBB); 2188 HeadMBB->addSuccessor(TailMBB); 2189 2190 // Insert appropriate branch. 2191 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 2192 2193 BuildMI(HeadMBB, DL, TII.get(Opcode)) 2194 .addReg(LHS) 2195 .addReg(RHS) 2196 .addMBB(TailMBB); 2197 2198 // IfFalseMBB just falls through to TailMBB. 2199 IfFalseMBB->addSuccessor(TailMBB); 2200 2201 // Create PHIs for all of the select pseudo-instructions. 2202 auto SelectMBBI = MI.getIterator(); 2203 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 2204 auto InsertionPoint = TailMBB->begin(); 2205 while (SelectMBBI != SelectEnd) { 2206 auto Next = std::next(SelectMBBI); 2207 if (isSelectPseudo(*SelectMBBI)) { 2208 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 2209 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 2210 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 2211 .addReg(SelectMBBI->getOperand(4).getReg()) 2212 .addMBB(HeadMBB) 2213 .addReg(SelectMBBI->getOperand(5).getReg()) 2214 .addMBB(IfFalseMBB); 2215 SelectMBBI->eraseFromParent(); 2216 } 2217 SelectMBBI = Next; 2218 } 2219 2220 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 2221 return TailMBB; 2222 } 2223 2224 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, 2225 int VLIndex, unsigned SEWIndex, 2226 RISCVVLMUL VLMul, bool WritesElement0) { 2227 MachineFunction &MF = *BB->getParent(); 2228 DebugLoc DL = MI.getDebugLoc(); 2229 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 2230 2231 unsigned SEW = MI.getOperand(SEWIndex).getImm(); 2232 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); 2233 RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8)); 2234 2235 MachineRegisterInfo &MRI = MF.getRegInfo(); 2236 2237 // VL and VTYPE are alive here. 2238 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)); 2239 2240 if (VLIndex >= 0) { 2241 // Set VL (rs1 != X0). 2242 Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 2243 MIB.addReg(DestReg, RegState::Define | RegState::Dead) 2244 .addReg(MI.getOperand(VLIndex).getReg()); 2245 } else 2246 // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0). 2247 MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead) 2248 .addReg(RISCV::X0, RegState::Kill); 2249 2250 // Default to tail agnostic unless the destination is tied to a source. In 2251 // that case the user would have some control over the tail values. The tail 2252 // policy is also ignored on instructions that only update element 0 like 2253 // vmv.s.x or reductions so use agnostic there to match the common case. 2254 // FIXME: This is conservatively correct, but we might want to detect that 2255 // the input is undefined. 2256 bool TailAgnostic = true; 2257 if (MI.isRegTiedToUseOperand(0) && !WritesElement0) 2258 TailAgnostic = false; 2259 2260 // For simplicity we reuse the vtype representation here. 2261 MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth, 2262 /*TailAgnostic*/ TailAgnostic, 2263 /*MaskAgnostic*/ false)); 2264 2265 // Remove (now) redundant operands from pseudo 2266 MI.getOperand(SEWIndex).setImm(-1); 2267 if (VLIndex >= 0) { 2268 MI.getOperand(VLIndex).setReg(RISCV::NoRegister); 2269 MI.getOperand(VLIndex).setIsKill(false); 2270 } 2271 2272 return BB; 2273 } 2274 2275 MachineBasicBlock * 2276 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 2277 MachineBasicBlock *BB) const { 2278 uint64_t TSFlags = MI.getDesc().TSFlags; 2279 2280 if (TSFlags & RISCVII::HasSEWOpMask) { 2281 unsigned NumOperands = MI.getNumExplicitOperands(); 2282 int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1; 2283 unsigned SEWIndex = NumOperands - 1; 2284 bool WritesElement0 = TSFlags & RISCVII::WritesElement0Mask; 2285 2286 RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >> 2287 RISCVII::VLMulShift); 2288 return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, WritesElement0); 2289 } 2290 2291 switch (MI.getOpcode()) { 2292 default: 2293 llvm_unreachable("Unexpected instr type to insert"); 2294 case RISCV::ReadCycleWide: 2295 assert(!Subtarget.is64Bit() && 2296 "ReadCycleWrite is only to be used on riscv32"); 2297 return emitReadCycleWidePseudo(MI, BB); 2298 case RISCV::Select_GPR_Using_CC_GPR: 2299 case RISCV::Select_FPR16_Using_CC_GPR: 2300 case RISCV::Select_FPR32_Using_CC_GPR: 2301 case RISCV::Select_FPR64_Using_CC_GPR: 2302 return emitSelectPseudo(MI, BB); 2303 case RISCV::BuildPairF64Pseudo: 2304 return emitBuildPairF64Pseudo(MI, BB); 2305 case RISCV::SplitF64Pseudo: 2306 return emitSplitF64Pseudo(MI, BB); 2307 } 2308 } 2309 2310 // Calling Convention Implementation. 2311 // The expectations for frontend ABI lowering vary from target to target. 2312 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 2313 // details, but this is a longer term goal. For now, we simply try to keep the 2314 // role of the frontend as simple and well-defined as possible. The rules can 2315 // be summarised as: 2316 // * Never split up large scalar arguments. We handle them here. 2317 // * If a hardfloat calling convention is being used, and the struct may be 2318 // passed in a pair of registers (fp+fp, int+fp), and both registers are 2319 // available, then pass as two separate arguments. If either the GPRs or FPRs 2320 // are exhausted, then pass according to the rule below. 2321 // * If a struct could never be passed in registers or directly in a stack 2322 // slot (as it is larger than 2*XLEN and the floating point rules don't 2323 // apply), then pass it using a pointer with the byval attribute. 2324 // * If a struct is less than 2*XLEN, then coerce to either a two-element 2325 // word-sized array or a 2*XLEN scalar (depending on alignment). 2326 // * The frontend can determine whether a struct is returned by reference or 2327 // not based on its size and fields. If it will be returned by reference, the 2328 // frontend must modify the prototype so a pointer with the sret annotation is 2329 // passed as the first argument. This is not necessary for large scalar 2330 // returns. 2331 // * Struct return values and varargs should be coerced to structs containing 2332 // register-size fields in the same situations they would be for fixed 2333 // arguments. 2334 2335 static const MCPhysReg ArgGPRs[] = { 2336 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 2337 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 2338 }; 2339 static const MCPhysReg ArgFPR16s[] = { 2340 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 2341 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 2342 }; 2343 static const MCPhysReg ArgFPR32s[] = { 2344 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 2345 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 2346 }; 2347 static const MCPhysReg ArgFPR64s[] = { 2348 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 2349 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 2350 }; 2351 // This is an interim calling convention and it may be changed in the future. 2352 static const MCPhysReg ArgVRs[] = { 2353 RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, RISCV::V20, 2354 RISCV::V21, RISCV::V22, RISCV::V23 2355 }; 2356 static const MCPhysReg ArgVRM2s[] = { 2357 RISCV::V16M2, RISCV::V18M2, RISCV::V20M2, RISCV::V22M2 2358 }; 2359 static const MCPhysReg ArgVRM4s[] = {RISCV::V16M4, RISCV::V20M4}; 2360 static const MCPhysReg ArgVRM8s[] = {RISCV::V16M8}; 2361 2362 // Pass a 2*XLEN argument that has been split into two XLEN values through 2363 // registers or the stack as necessary. 2364 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 2365 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 2366 MVT ValVT2, MVT LocVT2, 2367 ISD::ArgFlagsTy ArgFlags2) { 2368 unsigned XLenInBytes = XLen / 8; 2369 if (Register Reg = State.AllocateReg(ArgGPRs)) { 2370 // At least one half can be passed via register. 2371 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 2372 VA1.getLocVT(), CCValAssign::Full)); 2373 } else { 2374 // Both halves must be passed on the stack, with proper alignment. 2375 Align StackAlign = 2376 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 2377 State.addLoc( 2378 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 2379 State.AllocateStack(XLenInBytes, StackAlign), 2380 VA1.getLocVT(), CCValAssign::Full)); 2381 State.addLoc(CCValAssign::getMem( 2382 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 2383 LocVT2, CCValAssign::Full)); 2384 return false; 2385 } 2386 2387 if (Register Reg = State.AllocateReg(ArgGPRs)) { 2388 // The second half can also be passed via register. 2389 State.addLoc( 2390 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 2391 } else { 2392 // The second half is passed via the stack, without additional alignment. 2393 State.addLoc(CCValAssign::getMem( 2394 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 2395 LocVT2, CCValAssign::Full)); 2396 } 2397 2398 return false; 2399 } 2400 2401 // Implements the RISC-V calling convention. Returns true upon failure. 2402 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 2403 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 2404 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 2405 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, 2406 Optional<unsigned> FirstMaskArgument) { 2407 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 2408 assert(XLen == 32 || XLen == 64); 2409 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 2410 2411 // Any return value split in to more than two values can't be returned 2412 // directly. 2413 if (IsRet && ValNo > 1) 2414 return true; 2415 2416 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 2417 // variadic argument, or if no F16/F32 argument registers are available. 2418 bool UseGPRForF16_F32 = true; 2419 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 2420 // variadic argument, or if no F64 argument registers are available. 2421 bool UseGPRForF64 = true; 2422 2423 switch (ABI) { 2424 default: 2425 llvm_unreachable("Unexpected ABI"); 2426 case RISCVABI::ABI_ILP32: 2427 case RISCVABI::ABI_LP64: 2428 break; 2429 case RISCVABI::ABI_ILP32F: 2430 case RISCVABI::ABI_LP64F: 2431 UseGPRForF16_F32 = !IsFixed; 2432 break; 2433 case RISCVABI::ABI_ILP32D: 2434 case RISCVABI::ABI_LP64D: 2435 UseGPRForF16_F32 = !IsFixed; 2436 UseGPRForF64 = !IsFixed; 2437 break; 2438 } 2439 2440 // FPR16, FPR32, and FPR64 alias each other. 2441 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 2442 UseGPRForF16_F32 = true; 2443 UseGPRForF64 = true; 2444 } 2445 2446 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 2447 // similar local variables rather than directly checking against the target 2448 // ABI. 2449 2450 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 2451 LocVT = XLenVT; 2452 LocInfo = CCValAssign::BCvt; 2453 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 2454 LocVT = MVT::i64; 2455 LocInfo = CCValAssign::BCvt; 2456 } 2457 2458 // If this is a variadic argument, the RISC-V calling convention requires 2459 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 2460 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 2461 // be used regardless of whether the original argument was split during 2462 // legalisation or not. The argument will not be passed by registers if the 2463 // original type is larger than 2*XLEN, so the register alignment rule does 2464 // not apply. 2465 unsigned TwoXLenInBytes = (2 * XLen) / 8; 2466 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 2467 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 2468 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 2469 // Skip 'odd' register if necessary. 2470 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 2471 State.AllocateReg(ArgGPRs); 2472 } 2473 2474 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 2475 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 2476 State.getPendingArgFlags(); 2477 2478 assert(PendingLocs.size() == PendingArgFlags.size() && 2479 "PendingLocs and PendingArgFlags out of sync"); 2480 2481 // Handle passing f64 on RV32D with a soft float ABI or when floating point 2482 // registers are exhausted. 2483 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 2484 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 2485 "Can't lower f64 if it is split"); 2486 // Depending on available argument GPRS, f64 may be passed in a pair of 2487 // GPRs, split between a GPR and the stack, or passed completely on the 2488 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 2489 // cases. 2490 Register Reg = State.AllocateReg(ArgGPRs); 2491 LocVT = MVT::i32; 2492 if (!Reg) { 2493 unsigned StackOffset = State.AllocateStack(8, Align(8)); 2494 State.addLoc( 2495 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 2496 return false; 2497 } 2498 if (!State.AllocateReg(ArgGPRs)) 2499 State.AllocateStack(4, Align(4)); 2500 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2501 return false; 2502 } 2503 2504 // Split arguments might be passed indirectly, so keep track of the pending 2505 // values. 2506 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 2507 LocVT = XLenVT; 2508 LocInfo = CCValAssign::Indirect; 2509 PendingLocs.push_back( 2510 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 2511 PendingArgFlags.push_back(ArgFlags); 2512 if (!ArgFlags.isSplitEnd()) { 2513 return false; 2514 } 2515 } 2516 2517 // If the split argument only had two elements, it should be passed directly 2518 // in registers or on the stack. 2519 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 2520 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 2521 // Apply the normal calling convention rules to the first half of the 2522 // split argument. 2523 CCValAssign VA = PendingLocs[0]; 2524 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 2525 PendingLocs.clear(); 2526 PendingArgFlags.clear(); 2527 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 2528 ArgFlags); 2529 } 2530 2531 // Allocate to a register if possible, or else a stack slot. 2532 Register Reg; 2533 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 2534 Reg = State.AllocateReg(ArgFPR16s); 2535 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 2536 Reg = State.AllocateReg(ArgFPR32s); 2537 else if (ValVT == MVT::f64 && !UseGPRForF64) 2538 Reg = State.AllocateReg(ArgFPR64s); 2539 else if (ValVT.isScalableVector()) { 2540 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 2541 if (RC == &RISCV::VRRegClass) { 2542 // Assign the first mask argument to V0. 2543 // This is an interim calling convention and it may be changed in the 2544 // future. 2545 if (FirstMaskArgument.hasValue() && 2546 ValNo == FirstMaskArgument.getValue()) { 2547 Reg = State.AllocateReg(RISCV::V0); 2548 } else { 2549 Reg = State.AllocateReg(ArgVRs); 2550 } 2551 } else if (RC == &RISCV::VRM2RegClass) { 2552 Reg = State.AllocateReg(ArgVRM2s); 2553 } else if (RC == &RISCV::VRM4RegClass) { 2554 Reg = State.AllocateReg(ArgVRM4s); 2555 } else if (RC == &RISCV::VRM8RegClass) { 2556 Reg = State.AllocateReg(ArgVRM8s); 2557 } else { 2558 llvm_unreachable("Unhandled class register for ValueType"); 2559 } 2560 if (!Reg) { 2561 LocInfo = CCValAssign::Indirect; 2562 // Try using a GPR to pass the address 2563 Reg = State.AllocateReg(ArgGPRs); 2564 LocVT = XLenVT; 2565 } 2566 } else 2567 Reg = State.AllocateReg(ArgGPRs); 2568 unsigned StackOffset = 2569 Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); 2570 2571 // If we reach this point and PendingLocs is non-empty, we must be at the 2572 // end of a split argument that must be passed indirectly. 2573 if (!PendingLocs.empty()) { 2574 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 2575 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 2576 2577 for (auto &It : PendingLocs) { 2578 if (Reg) 2579 It.convertToReg(Reg); 2580 else 2581 It.convertToMem(StackOffset); 2582 State.addLoc(It); 2583 } 2584 PendingLocs.clear(); 2585 PendingArgFlags.clear(); 2586 return false; 2587 } 2588 2589 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 2590 (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && 2591 "Expected an XLenVT or scalable vector types at this stage"); 2592 2593 if (Reg) { 2594 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2595 return false; 2596 } 2597 2598 // When a floating-point value is passed on the stack, no bit-conversion is 2599 // needed. 2600 if (ValVT.isFloatingPoint()) { 2601 LocVT = ValVT; 2602 LocInfo = CCValAssign::Full; 2603 } 2604 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 2605 return false; 2606 } 2607 2608 template <typename ArgTy> 2609 static Optional<unsigned> preAssignMask(const ArgTy &Args) { 2610 for (const auto &ArgIdx : enumerate(Args)) { 2611 MVT ArgVT = ArgIdx.value().VT; 2612 if (ArgVT.isScalableVector() && 2613 ArgVT.getVectorElementType().SimpleTy == MVT::i1) 2614 return ArgIdx.index(); 2615 } 2616 return None; 2617 } 2618 2619 void RISCVTargetLowering::analyzeInputArgs( 2620 MachineFunction &MF, CCState &CCInfo, 2621 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 2622 unsigned NumArgs = Ins.size(); 2623 FunctionType *FType = MF.getFunction().getFunctionType(); 2624 2625 Optional<unsigned> FirstMaskArgument; 2626 if (Subtarget.hasStdExtV()) 2627 FirstMaskArgument = preAssignMask(Ins); 2628 2629 for (unsigned i = 0; i != NumArgs; ++i) { 2630 MVT ArgVT = Ins[i].VT; 2631 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 2632 2633 Type *ArgTy = nullptr; 2634 if (IsRet) 2635 ArgTy = FType->getReturnType(); 2636 else if (Ins[i].isOrigArg()) 2637 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 2638 2639 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2640 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 2641 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, 2642 FirstMaskArgument)) { 2643 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 2644 << EVT(ArgVT).getEVTString() << '\n'); 2645 llvm_unreachable(nullptr); 2646 } 2647 } 2648 } 2649 2650 void RISCVTargetLowering::analyzeOutputArgs( 2651 MachineFunction &MF, CCState &CCInfo, 2652 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 2653 CallLoweringInfo *CLI) const { 2654 unsigned NumArgs = Outs.size(); 2655 2656 Optional<unsigned> FirstMaskArgument; 2657 if (Subtarget.hasStdExtV()) 2658 FirstMaskArgument = preAssignMask(Outs); 2659 2660 for (unsigned i = 0; i != NumArgs; i++) { 2661 MVT ArgVT = Outs[i].VT; 2662 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 2663 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 2664 2665 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 2666 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 2667 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, 2668 FirstMaskArgument)) { 2669 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 2670 << EVT(ArgVT).getEVTString() << "\n"); 2671 llvm_unreachable(nullptr); 2672 } 2673 } 2674 } 2675 2676 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 2677 // values. 2678 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 2679 const CCValAssign &VA, const SDLoc &DL) { 2680 switch (VA.getLocInfo()) { 2681 default: 2682 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 2683 case CCValAssign::Full: 2684 break; 2685 case CCValAssign::BCvt: 2686 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 2687 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 2688 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 2689 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 2690 else 2691 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 2692 break; 2693 } 2694 return Val; 2695 } 2696 2697 // The caller is responsible for loading the full value if the argument is 2698 // passed with CCValAssign::Indirect. 2699 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 2700 const CCValAssign &VA, const SDLoc &DL, 2701 const RISCVTargetLowering &TLI) { 2702 MachineFunction &MF = DAG.getMachineFunction(); 2703 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 2704 EVT LocVT = VA.getLocVT(); 2705 SDValue Val; 2706 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); 2707 Register VReg = RegInfo.createVirtualRegister(RC); 2708 RegInfo.addLiveIn(VA.getLocReg(), VReg); 2709 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 2710 2711 if (VA.getLocInfo() == CCValAssign::Indirect) 2712 return Val; 2713 2714 return convertLocVTToValVT(DAG, Val, VA, DL); 2715 } 2716 2717 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 2718 const CCValAssign &VA, const SDLoc &DL) { 2719 EVT LocVT = VA.getLocVT(); 2720 2721 switch (VA.getLocInfo()) { 2722 default: 2723 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 2724 case CCValAssign::Full: 2725 break; 2726 case CCValAssign::BCvt: 2727 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 2728 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 2729 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 2730 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 2731 else 2732 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 2733 break; 2734 } 2735 return Val; 2736 } 2737 2738 // The caller is responsible for loading the full value if the argument is 2739 // passed with CCValAssign::Indirect. 2740 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 2741 const CCValAssign &VA, const SDLoc &DL) { 2742 MachineFunction &MF = DAG.getMachineFunction(); 2743 MachineFrameInfo &MFI = MF.getFrameInfo(); 2744 EVT LocVT = VA.getLocVT(); 2745 EVT ValVT = VA.getValVT(); 2746 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 2747 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 2748 VA.getLocMemOffset(), /*Immutable=*/true); 2749 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2750 SDValue Val; 2751 2752 ISD::LoadExtType ExtType; 2753 switch (VA.getLocInfo()) { 2754 default: 2755 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 2756 case CCValAssign::Full: 2757 case CCValAssign::Indirect: 2758 case CCValAssign::BCvt: 2759 ExtType = ISD::NON_EXTLOAD; 2760 break; 2761 } 2762 Val = DAG.getExtLoad( 2763 ExtType, DL, LocVT, Chain, FIN, 2764 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 2765 return Val; 2766 } 2767 2768 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 2769 const CCValAssign &VA, const SDLoc &DL) { 2770 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 2771 "Unexpected VA"); 2772 MachineFunction &MF = DAG.getMachineFunction(); 2773 MachineFrameInfo &MFI = MF.getFrameInfo(); 2774 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 2775 2776 if (VA.isMemLoc()) { 2777 // f64 is passed on the stack. 2778 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 2779 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 2780 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 2781 MachinePointerInfo::getFixedStack(MF, FI)); 2782 } 2783 2784 assert(VA.isRegLoc() && "Expected register VA assignment"); 2785 2786 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 2787 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 2788 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 2789 SDValue Hi; 2790 if (VA.getLocReg() == RISCV::X17) { 2791 // Second half of f64 is passed on the stack. 2792 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 2793 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 2794 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 2795 MachinePointerInfo::getFixedStack(MF, FI)); 2796 } else { 2797 // Second half of f64 is passed in another GPR. 2798 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 2799 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 2800 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 2801 } 2802 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 2803 } 2804 2805 // FastCC has less than 1% performance improvement for some particular 2806 // benchmark. But theoretically, it may has benenfit for some cases. 2807 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 2808 CCValAssign::LocInfo LocInfo, 2809 ISD::ArgFlagsTy ArgFlags, CCState &State) { 2810 2811 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 2812 // X5 and X6 might be used for save-restore libcall. 2813 static const MCPhysReg GPRList[] = { 2814 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 2815 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 2816 RISCV::X29, RISCV::X30, RISCV::X31}; 2817 if (unsigned Reg = State.AllocateReg(GPRList)) { 2818 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2819 return false; 2820 } 2821 } 2822 2823 if (LocVT == MVT::f16) { 2824 static const MCPhysReg FPR16List[] = { 2825 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 2826 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 2827 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 2828 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 2829 if (unsigned Reg = State.AllocateReg(FPR16List)) { 2830 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2831 return false; 2832 } 2833 } 2834 2835 if (LocVT == MVT::f32) { 2836 static const MCPhysReg FPR32List[] = { 2837 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 2838 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 2839 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 2840 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 2841 if (unsigned Reg = State.AllocateReg(FPR32List)) { 2842 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2843 return false; 2844 } 2845 } 2846 2847 if (LocVT == MVT::f64) { 2848 static const MCPhysReg FPR64List[] = { 2849 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 2850 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 2851 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 2852 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 2853 if (unsigned Reg = State.AllocateReg(FPR64List)) { 2854 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2855 return false; 2856 } 2857 } 2858 2859 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 2860 unsigned Offset4 = State.AllocateStack(4, Align(4)); 2861 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 2862 return false; 2863 } 2864 2865 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 2866 unsigned Offset5 = State.AllocateStack(8, Align(8)); 2867 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 2868 return false; 2869 } 2870 2871 return true; // CC didn't match. 2872 } 2873 2874 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 2875 CCValAssign::LocInfo LocInfo, 2876 ISD::ArgFlagsTy ArgFlags, CCState &State) { 2877 2878 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 2879 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 2880 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 2881 static const MCPhysReg GPRList[] = { 2882 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 2883 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 2884 if (unsigned Reg = State.AllocateReg(GPRList)) { 2885 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2886 return false; 2887 } 2888 } 2889 2890 if (LocVT == MVT::f32) { 2891 // Pass in STG registers: F1, ..., F6 2892 // fs0 ... fs5 2893 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 2894 RISCV::F18_F, RISCV::F19_F, 2895 RISCV::F20_F, RISCV::F21_F}; 2896 if (unsigned Reg = State.AllocateReg(FPR32List)) { 2897 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2898 return false; 2899 } 2900 } 2901 2902 if (LocVT == MVT::f64) { 2903 // Pass in STG registers: D1, ..., D6 2904 // fs6 ... fs11 2905 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 2906 RISCV::F24_D, RISCV::F25_D, 2907 RISCV::F26_D, RISCV::F27_D}; 2908 if (unsigned Reg = State.AllocateReg(FPR64List)) { 2909 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 2910 return false; 2911 } 2912 } 2913 2914 report_fatal_error("No registers left in GHC calling convention"); 2915 return true; 2916 } 2917 2918 // Transform physical registers into virtual registers. 2919 SDValue RISCVTargetLowering::LowerFormalArguments( 2920 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 2921 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2922 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2923 2924 MachineFunction &MF = DAG.getMachineFunction(); 2925 2926 switch (CallConv) { 2927 default: 2928 report_fatal_error("Unsupported calling convention"); 2929 case CallingConv::C: 2930 case CallingConv::Fast: 2931 break; 2932 case CallingConv::GHC: 2933 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 2934 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 2935 report_fatal_error( 2936 "GHC calling convention requires the F and D instruction set extensions"); 2937 } 2938 2939 const Function &Func = MF.getFunction(); 2940 if (Func.hasFnAttribute("interrupt")) { 2941 if (!Func.arg_empty()) 2942 report_fatal_error( 2943 "Functions with the interrupt attribute cannot have arguments!"); 2944 2945 StringRef Kind = 2946 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 2947 2948 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 2949 report_fatal_error( 2950 "Function interrupt attribute argument not supported!"); 2951 } 2952 2953 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2954 MVT XLenVT = Subtarget.getXLenVT(); 2955 unsigned XLenInBytes = Subtarget.getXLen() / 8; 2956 // Used with vargs to acumulate store chains. 2957 std::vector<SDValue> OutChains; 2958 2959 // Assign locations to all of the incoming arguments. 2960 SmallVector<CCValAssign, 16> ArgLocs; 2961 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 2962 2963 if (CallConv == CallingConv::Fast) 2964 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 2965 else if (CallConv == CallingConv::GHC) 2966 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 2967 else 2968 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 2969 2970 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2971 CCValAssign &VA = ArgLocs[i]; 2972 SDValue ArgValue; 2973 // Passing f64 on RV32D with a soft float ABI must be handled as a special 2974 // case. 2975 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 2976 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 2977 else if (VA.isRegLoc()) 2978 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); 2979 else 2980 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 2981 2982 if (VA.getLocInfo() == CCValAssign::Indirect) { 2983 // If the original argument was split and passed by reference (e.g. i128 2984 // on RV32), we need to load all parts of it here (using the same 2985 // address). 2986 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 2987 MachinePointerInfo())); 2988 unsigned ArgIndex = Ins[i].OrigArgIndex; 2989 assert(Ins[i].PartOffset == 0); 2990 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 2991 CCValAssign &PartVA = ArgLocs[i + 1]; 2992 unsigned PartOffset = Ins[i + 1].PartOffset; 2993 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 2994 DAG.getIntPtrConstant(PartOffset, DL)); 2995 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 2996 MachinePointerInfo())); 2997 ++i; 2998 } 2999 continue; 3000 } 3001 InVals.push_back(ArgValue); 3002 } 3003 3004 if (IsVarArg) { 3005 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 3006 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 3007 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 3008 MachineFrameInfo &MFI = MF.getFrameInfo(); 3009 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 3010 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 3011 3012 // Offset of the first variable argument from stack pointer, and size of 3013 // the vararg save area. For now, the varargs save area is either zero or 3014 // large enough to hold a0-a7. 3015 int VaArgOffset, VarArgsSaveSize; 3016 3017 // If all registers are allocated, then all varargs must be passed on the 3018 // stack and we don't need to save any argregs. 3019 if (ArgRegs.size() == Idx) { 3020 VaArgOffset = CCInfo.getNextStackOffset(); 3021 VarArgsSaveSize = 0; 3022 } else { 3023 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 3024 VaArgOffset = -VarArgsSaveSize; 3025 } 3026 3027 // Record the frame index of the first variable argument 3028 // which is a value necessary to VASTART. 3029 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 3030 RVFI->setVarArgsFrameIndex(FI); 3031 3032 // If saving an odd number of registers then create an extra stack slot to 3033 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 3034 // offsets to even-numbered registered remain 2*XLEN-aligned. 3035 if (Idx % 2) { 3036 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 3037 VarArgsSaveSize += XLenInBytes; 3038 } 3039 3040 // Copy the integer registers that may have been used for passing varargs 3041 // to the vararg save area. 3042 for (unsigned I = Idx; I < ArgRegs.size(); 3043 ++I, VaArgOffset += XLenInBytes) { 3044 const Register Reg = RegInfo.createVirtualRegister(RC); 3045 RegInfo.addLiveIn(ArgRegs[I], Reg); 3046 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 3047 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 3048 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3049 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 3050 MachinePointerInfo::getFixedStack(MF, FI)); 3051 cast<StoreSDNode>(Store.getNode()) 3052 ->getMemOperand() 3053 ->setValue((Value *)nullptr); 3054 OutChains.push_back(Store); 3055 } 3056 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 3057 } 3058 3059 // All stores are grouped in one node to allow the matching between 3060 // the size of Ins and InVals. This only happens for vararg functions. 3061 if (!OutChains.empty()) { 3062 OutChains.push_back(Chain); 3063 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 3064 } 3065 3066 return Chain; 3067 } 3068 3069 /// isEligibleForTailCallOptimization - Check whether the call is eligible 3070 /// for tail call optimization. 3071 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 3072 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 3073 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 3074 const SmallVector<CCValAssign, 16> &ArgLocs) const { 3075 3076 auto &Callee = CLI.Callee; 3077 auto CalleeCC = CLI.CallConv; 3078 auto &Outs = CLI.Outs; 3079 auto &Caller = MF.getFunction(); 3080 auto CallerCC = Caller.getCallingConv(); 3081 3082 // Exception-handling functions need a special set of instructions to 3083 // indicate a return to the hardware. Tail-calling another function would 3084 // probably break this. 3085 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 3086 // should be expanded as new function attributes are introduced. 3087 if (Caller.hasFnAttribute("interrupt")) 3088 return false; 3089 3090 // Do not tail call opt if the stack is used to pass parameters. 3091 if (CCInfo.getNextStackOffset() != 0) 3092 return false; 3093 3094 // Do not tail call opt if any parameters need to be passed indirectly. 3095 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 3096 // passed indirectly. So the address of the value will be passed in a 3097 // register, or if not available, then the address is put on the stack. In 3098 // order to pass indirectly, space on the stack often needs to be allocated 3099 // in order to store the value. In this case the CCInfo.getNextStackOffset() 3100 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 3101 // are passed CCValAssign::Indirect. 3102 for (auto &VA : ArgLocs) 3103 if (VA.getLocInfo() == CCValAssign::Indirect) 3104 return false; 3105 3106 // Do not tail call opt if either caller or callee uses struct return 3107 // semantics. 3108 auto IsCallerStructRet = Caller.hasStructRetAttr(); 3109 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 3110 if (IsCallerStructRet || IsCalleeStructRet) 3111 return false; 3112 3113 // Externally-defined functions with weak linkage should not be 3114 // tail-called. The behaviour of branch instructions in this situation (as 3115 // used for tail calls) is implementation-defined, so we cannot rely on the 3116 // linker replacing the tail call with a return. 3117 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3118 const GlobalValue *GV = G->getGlobal(); 3119 if (GV->hasExternalWeakLinkage()) 3120 return false; 3121 } 3122 3123 // The callee has to preserve all registers the caller needs to preserve. 3124 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3125 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 3126 if (CalleeCC != CallerCC) { 3127 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 3128 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 3129 return false; 3130 } 3131 3132 // Byval parameters hand the function a pointer directly into the stack area 3133 // we want to reuse during a tail call. Working around this *is* possible 3134 // but less efficient and uglier in LowerCall. 3135 for (auto &Arg : Outs) 3136 if (Arg.Flags.isByVal()) 3137 return false; 3138 3139 return true; 3140 } 3141 3142 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 3143 // and output parameter nodes. 3144 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 3145 SmallVectorImpl<SDValue> &InVals) const { 3146 SelectionDAG &DAG = CLI.DAG; 3147 SDLoc &DL = CLI.DL; 3148 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 3149 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 3150 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 3151 SDValue Chain = CLI.Chain; 3152 SDValue Callee = CLI.Callee; 3153 bool &IsTailCall = CLI.IsTailCall; 3154 CallingConv::ID CallConv = CLI.CallConv; 3155 bool IsVarArg = CLI.IsVarArg; 3156 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3157 MVT XLenVT = Subtarget.getXLenVT(); 3158 3159 MachineFunction &MF = DAG.getMachineFunction(); 3160 3161 // Analyze the operands of the call, assigning locations to each operand. 3162 SmallVector<CCValAssign, 16> ArgLocs; 3163 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 3164 3165 if (CallConv == CallingConv::Fast) 3166 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 3167 else if (CallConv == CallingConv::GHC) 3168 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 3169 else 3170 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 3171 3172 // Check if it's really possible to do a tail call. 3173 if (IsTailCall) 3174 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 3175 3176 if (IsTailCall) 3177 ++NumTailCalls; 3178 else if (CLI.CB && CLI.CB->isMustTailCall()) 3179 report_fatal_error("failed to perform tail call elimination on a call " 3180 "site marked musttail"); 3181 3182 // Get a count of how many bytes are to be pushed on the stack. 3183 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 3184 3185 // Create local copies for byval args 3186 SmallVector<SDValue, 8> ByValArgs; 3187 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 3188 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3189 if (!Flags.isByVal()) 3190 continue; 3191 3192 SDValue Arg = OutVals[i]; 3193 unsigned Size = Flags.getByValSize(); 3194 Align Alignment = Flags.getNonZeroByValAlign(); 3195 3196 int FI = 3197 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 3198 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3199 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 3200 3201 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 3202 /*IsVolatile=*/false, 3203 /*AlwaysInline=*/false, IsTailCall, 3204 MachinePointerInfo(), MachinePointerInfo()); 3205 ByValArgs.push_back(FIPtr); 3206 } 3207 3208 if (!IsTailCall) 3209 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 3210 3211 // Copy argument values to their designated locations. 3212 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 3213 SmallVector<SDValue, 8> MemOpChains; 3214 SDValue StackPtr; 3215 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 3216 CCValAssign &VA = ArgLocs[i]; 3217 SDValue ArgValue = OutVals[i]; 3218 ISD::ArgFlagsTy Flags = Outs[i].Flags; 3219 3220 // Handle passing f64 on RV32D with a soft float ABI as a special case. 3221 bool IsF64OnRV32DSoftABI = 3222 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 3223 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 3224 SDValue SplitF64 = DAG.getNode( 3225 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 3226 SDValue Lo = SplitF64.getValue(0); 3227 SDValue Hi = SplitF64.getValue(1); 3228 3229 Register RegLo = VA.getLocReg(); 3230 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 3231 3232 if (RegLo == RISCV::X17) { 3233 // Second half of f64 is passed on the stack. 3234 // Work out the address of the stack slot. 3235 if (!StackPtr.getNode()) 3236 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 3237 // Emit the store. 3238 MemOpChains.push_back( 3239 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 3240 } else { 3241 // Second half of f64 is passed in another GPR. 3242 assert(RegLo < RISCV::X31 && "Invalid register pair"); 3243 Register RegHigh = RegLo + 1; 3244 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 3245 } 3246 continue; 3247 } 3248 3249 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 3250 // as any other MemLoc. 3251 3252 // Promote the value if needed. 3253 // For now, only handle fully promoted and indirect arguments. 3254 if (VA.getLocInfo() == CCValAssign::Indirect) { 3255 // Store the argument in a stack slot and pass its address. 3256 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 3257 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 3258 MemOpChains.push_back( 3259 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 3260 MachinePointerInfo::getFixedStack(MF, FI))); 3261 // If the original argument was split (e.g. i128), we need 3262 // to store all parts of it here (and pass just one address). 3263 unsigned ArgIndex = Outs[i].OrigArgIndex; 3264 assert(Outs[i].PartOffset == 0); 3265 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 3266 SDValue PartValue = OutVals[i + 1]; 3267 unsigned PartOffset = Outs[i + 1].PartOffset; 3268 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 3269 DAG.getIntPtrConstant(PartOffset, DL)); 3270 MemOpChains.push_back( 3271 DAG.getStore(Chain, DL, PartValue, Address, 3272 MachinePointerInfo::getFixedStack(MF, FI))); 3273 ++i; 3274 } 3275 ArgValue = SpillSlot; 3276 } else { 3277 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 3278 } 3279 3280 // Use local copy if it is a byval arg. 3281 if (Flags.isByVal()) 3282 ArgValue = ByValArgs[j++]; 3283 3284 if (VA.isRegLoc()) { 3285 // Queue up the argument copies and emit them at the end. 3286 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 3287 } else { 3288 assert(VA.isMemLoc() && "Argument not register or memory"); 3289 assert(!IsTailCall && "Tail call not allowed if stack is used " 3290 "for passing parameters"); 3291 3292 // Work out the address of the stack slot. 3293 if (!StackPtr.getNode()) 3294 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 3295 SDValue Address = 3296 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 3297 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 3298 3299 // Emit the store. 3300 MemOpChains.push_back( 3301 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 3302 } 3303 } 3304 3305 // Join the stores, which are independent of one another. 3306 if (!MemOpChains.empty()) 3307 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 3308 3309 SDValue Glue; 3310 3311 // Build a sequence of copy-to-reg nodes, chained and glued together. 3312 for (auto &Reg : RegsToPass) { 3313 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 3314 Glue = Chain.getValue(1); 3315 } 3316 3317 // Validate that none of the argument registers have been marked as 3318 // reserved, if so report an error. Do the same for the return address if this 3319 // is not a tailcall. 3320 validateCCReservedRegs(RegsToPass, MF); 3321 if (!IsTailCall && 3322 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 3323 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 3324 MF.getFunction(), 3325 "Return address register required, but has been reserved."}); 3326 3327 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 3328 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 3329 // split it and then direct call can be matched by PseudoCALL. 3330 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 3331 const GlobalValue *GV = S->getGlobal(); 3332 3333 unsigned OpFlags = RISCVII::MO_CALL; 3334 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 3335 OpFlags = RISCVII::MO_PLT; 3336 3337 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 3338 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3339 unsigned OpFlags = RISCVII::MO_CALL; 3340 3341 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 3342 nullptr)) 3343 OpFlags = RISCVII::MO_PLT; 3344 3345 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 3346 } 3347 3348 // The first call operand is the chain and the second is the target address. 3349 SmallVector<SDValue, 8> Ops; 3350 Ops.push_back(Chain); 3351 Ops.push_back(Callee); 3352 3353 // Add argument registers to the end of the list so that they are 3354 // known live into the call. 3355 for (auto &Reg : RegsToPass) 3356 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 3357 3358 if (!IsTailCall) { 3359 // Add a register mask operand representing the call-preserved registers. 3360 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3361 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 3362 assert(Mask && "Missing call preserved mask for calling convention"); 3363 Ops.push_back(DAG.getRegisterMask(Mask)); 3364 } 3365 3366 // Glue the call to the argument copies, if any. 3367 if (Glue.getNode()) 3368 Ops.push_back(Glue); 3369 3370 // Emit the call. 3371 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 3372 3373 if (IsTailCall) { 3374 MF.getFrameInfo().setHasTailCall(); 3375 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 3376 } 3377 3378 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 3379 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 3380 Glue = Chain.getValue(1); 3381 3382 // Mark the end of the call, which is glued to the call itself. 3383 Chain = DAG.getCALLSEQ_END(Chain, 3384 DAG.getConstant(NumBytes, DL, PtrVT, true), 3385 DAG.getConstant(0, DL, PtrVT, true), 3386 Glue, DL); 3387 Glue = Chain.getValue(1); 3388 3389 // Assign locations to each value returned by this call. 3390 SmallVector<CCValAssign, 16> RVLocs; 3391 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 3392 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 3393 3394 // Copy all of the result registers out of their specified physreg. 3395 for (auto &VA : RVLocs) { 3396 // Copy the value out 3397 SDValue RetValue = 3398 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 3399 // Glue the RetValue to the end of the call sequence 3400 Chain = RetValue.getValue(1); 3401 Glue = RetValue.getValue(2); 3402 3403 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 3404 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 3405 SDValue RetValue2 = 3406 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 3407 Chain = RetValue2.getValue(1); 3408 Glue = RetValue2.getValue(2); 3409 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 3410 RetValue2); 3411 } 3412 3413 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 3414 3415 InVals.push_back(RetValue); 3416 } 3417 3418 return Chain; 3419 } 3420 3421 bool RISCVTargetLowering::CanLowerReturn( 3422 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 3423 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 3424 SmallVector<CCValAssign, 16> RVLocs; 3425 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 3426 3427 Optional<unsigned> FirstMaskArgument; 3428 if (Subtarget.hasStdExtV()) 3429 FirstMaskArgument = preAssignMask(Outs); 3430 3431 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 3432 MVT VT = Outs[i].VT; 3433 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3434 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 3435 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 3436 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, 3437 *this, FirstMaskArgument)) 3438 return false; 3439 } 3440 return true; 3441 } 3442 3443 SDValue 3444 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 3445 bool IsVarArg, 3446 const SmallVectorImpl<ISD::OutputArg> &Outs, 3447 const SmallVectorImpl<SDValue> &OutVals, 3448 const SDLoc &DL, SelectionDAG &DAG) const { 3449 const MachineFunction &MF = DAG.getMachineFunction(); 3450 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 3451 3452 // Stores the assignment of the return value to a location. 3453 SmallVector<CCValAssign, 16> RVLocs; 3454 3455 // Info about the registers and stack slot. 3456 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 3457 *DAG.getContext()); 3458 3459 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 3460 nullptr); 3461 3462 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 3463 report_fatal_error("GHC functions return void only"); 3464 3465 SDValue Glue; 3466 SmallVector<SDValue, 4> RetOps(1, Chain); 3467 3468 // Copy the result values into the output registers. 3469 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 3470 SDValue Val = OutVals[i]; 3471 CCValAssign &VA = RVLocs[i]; 3472 assert(VA.isRegLoc() && "Can only return in registers!"); 3473 3474 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 3475 // Handle returning f64 on RV32D with a soft float ABI. 3476 assert(VA.isRegLoc() && "Expected return via registers"); 3477 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 3478 DAG.getVTList(MVT::i32, MVT::i32), Val); 3479 SDValue Lo = SplitF64.getValue(0); 3480 SDValue Hi = SplitF64.getValue(1); 3481 Register RegLo = VA.getLocReg(); 3482 assert(RegLo < RISCV::X31 && "Invalid register pair"); 3483 Register RegHi = RegLo + 1; 3484 3485 if (STI.isRegisterReservedByUser(RegLo) || 3486 STI.isRegisterReservedByUser(RegHi)) 3487 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 3488 MF.getFunction(), 3489 "Return value register required, but has been reserved."}); 3490 3491 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 3492 Glue = Chain.getValue(1); 3493 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 3494 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 3495 Glue = Chain.getValue(1); 3496 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 3497 } else { 3498 // Handle a 'normal' return. 3499 Val = convertValVTToLocVT(DAG, Val, VA, DL); 3500 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 3501 3502 if (STI.isRegisterReservedByUser(VA.getLocReg())) 3503 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 3504 MF.getFunction(), 3505 "Return value register required, but has been reserved."}); 3506 3507 // Guarantee that all emitted copies are stuck together. 3508 Glue = Chain.getValue(1); 3509 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3510 } 3511 } 3512 3513 RetOps[0] = Chain; // Update chain. 3514 3515 // Add the glue node if we have it. 3516 if (Glue.getNode()) { 3517 RetOps.push_back(Glue); 3518 } 3519 3520 // Interrupt service routines use different return instructions. 3521 const Function &Func = DAG.getMachineFunction().getFunction(); 3522 if (Func.hasFnAttribute("interrupt")) { 3523 if (!Func.getReturnType()->isVoidTy()) 3524 report_fatal_error( 3525 "Functions with the interrupt attribute must have void return type!"); 3526 3527 MachineFunction &MF = DAG.getMachineFunction(); 3528 StringRef Kind = 3529 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 3530 3531 unsigned RetOpc; 3532 if (Kind == "user") 3533 RetOpc = RISCVISD::URET_FLAG; 3534 else if (Kind == "supervisor") 3535 RetOpc = RISCVISD::SRET_FLAG; 3536 else 3537 RetOpc = RISCVISD::MRET_FLAG; 3538 3539 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 3540 } 3541 3542 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 3543 } 3544 3545 void RISCVTargetLowering::validateCCReservedRegs( 3546 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 3547 MachineFunction &MF) const { 3548 const Function &F = MF.getFunction(); 3549 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 3550 3551 if (llvm::any_of(Regs, [&STI](auto Reg) { 3552 return STI.isRegisterReservedByUser(Reg.first); 3553 })) 3554 F.getContext().diagnose(DiagnosticInfoUnsupported{ 3555 F, "Argument register required, but has been reserved."}); 3556 } 3557 3558 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 3559 return CI->isTailCall(); 3560 } 3561 3562 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 3563 #define NODE_NAME_CASE(NODE) \ 3564 case RISCVISD::NODE: \ 3565 return "RISCVISD::" #NODE; 3566 // clang-format off 3567 switch ((RISCVISD::NodeType)Opcode) { 3568 case RISCVISD::FIRST_NUMBER: 3569 break; 3570 NODE_NAME_CASE(RET_FLAG) 3571 NODE_NAME_CASE(URET_FLAG) 3572 NODE_NAME_CASE(SRET_FLAG) 3573 NODE_NAME_CASE(MRET_FLAG) 3574 NODE_NAME_CASE(CALL) 3575 NODE_NAME_CASE(SELECT_CC) 3576 NODE_NAME_CASE(BuildPairF64) 3577 NODE_NAME_CASE(SplitF64) 3578 NODE_NAME_CASE(TAIL) 3579 NODE_NAME_CASE(SLLW) 3580 NODE_NAME_CASE(SRAW) 3581 NODE_NAME_CASE(SRLW) 3582 NODE_NAME_CASE(DIVW) 3583 NODE_NAME_CASE(DIVUW) 3584 NODE_NAME_CASE(REMUW) 3585 NODE_NAME_CASE(ROLW) 3586 NODE_NAME_CASE(RORW) 3587 NODE_NAME_CASE(FSLW) 3588 NODE_NAME_CASE(FSRW) 3589 NODE_NAME_CASE(FMV_H_X) 3590 NODE_NAME_CASE(FMV_X_ANYEXTH) 3591 NODE_NAME_CASE(FMV_W_X_RV64) 3592 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 3593 NODE_NAME_CASE(READ_CYCLE_WIDE) 3594 NODE_NAME_CASE(GREVI) 3595 NODE_NAME_CASE(GREVIW) 3596 NODE_NAME_CASE(GORCI) 3597 NODE_NAME_CASE(GORCIW) 3598 NODE_NAME_CASE(VMV_X_S) 3599 NODE_NAME_CASE(SPLAT_VECTOR_I64) 3600 NODE_NAME_CASE(READ_VLENB) 3601 } 3602 // clang-format on 3603 return nullptr; 3604 #undef NODE_NAME_CASE 3605 } 3606 3607 /// getConstraintType - Given a constraint letter, return the type of 3608 /// constraint it is for this target. 3609 RISCVTargetLowering::ConstraintType 3610 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 3611 if (Constraint.size() == 1) { 3612 switch (Constraint[0]) { 3613 default: 3614 break; 3615 case 'f': 3616 return C_RegisterClass; 3617 case 'I': 3618 case 'J': 3619 case 'K': 3620 return C_Immediate; 3621 case 'A': 3622 return C_Memory; 3623 } 3624 } 3625 return TargetLowering::getConstraintType(Constraint); 3626 } 3627 3628 std::pair<unsigned, const TargetRegisterClass *> 3629 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 3630 StringRef Constraint, 3631 MVT VT) const { 3632 // First, see if this is a constraint that directly corresponds to a 3633 // RISCV register class. 3634 if (Constraint.size() == 1) { 3635 switch (Constraint[0]) { 3636 case 'r': 3637 return std::make_pair(0U, &RISCV::GPRRegClass); 3638 case 'f': 3639 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 3640 return std::make_pair(0U, &RISCV::FPR16RegClass); 3641 if (Subtarget.hasStdExtF() && VT == MVT::f32) 3642 return std::make_pair(0U, &RISCV::FPR32RegClass); 3643 if (Subtarget.hasStdExtD() && VT == MVT::f64) 3644 return std::make_pair(0U, &RISCV::FPR64RegClass); 3645 break; 3646 default: 3647 break; 3648 } 3649 } 3650 3651 // Clang will correctly decode the usage of register name aliases into their 3652 // official names. However, other frontends like `rustc` do not. This allows 3653 // users of these frontends to use the ABI names for registers in LLVM-style 3654 // register constraints. 3655 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 3656 .Case("{zero}", RISCV::X0) 3657 .Case("{ra}", RISCV::X1) 3658 .Case("{sp}", RISCV::X2) 3659 .Case("{gp}", RISCV::X3) 3660 .Case("{tp}", RISCV::X4) 3661 .Case("{t0}", RISCV::X5) 3662 .Case("{t1}", RISCV::X6) 3663 .Case("{t2}", RISCV::X7) 3664 .Cases("{s0}", "{fp}", RISCV::X8) 3665 .Case("{s1}", RISCV::X9) 3666 .Case("{a0}", RISCV::X10) 3667 .Case("{a1}", RISCV::X11) 3668 .Case("{a2}", RISCV::X12) 3669 .Case("{a3}", RISCV::X13) 3670 .Case("{a4}", RISCV::X14) 3671 .Case("{a5}", RISCV::X15) 3672 .Case("{a6}", RISCV::X16) 3673 .Case("{a7}", RISCV::X17) 3674 .Case("{s2}", RISCV::X18) 3675 .Case("{s3}", RISCV::X19) 3676 .Case("{s4}", RISCV::X20) 3677 .Case("{s5}", RISCV::X21) 3678 .Case("{s6}", RISCV::X22) 3679 .Case("{s7}", RISCV::X23) 3680 .Case("{s8}", RISCV::X24) 3681 .Case("{s9}", RISCV::X25) 3682 .Case("{s10}", RISCV::X26) 3683 .Case("{s11}", RISCV::X27) 3684 .Case("{t3}", RISCV::X28) 3685 .Case("{t4}", RISCV::X29) 3686 .Case("{t5}", RISCV::X30) 3687 .Case("{t6}", RISCV::X31) 3688 .Default(RISCV::NoRegister); 3689 if (XRegFromAlias != RISCV::NoRegister) 3690 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 3691 3692 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 3693 // TableGen record rather than the AsmName to choose registers for InlineAsm 3694 // constraints, plus we want to match those names to the widest floating point 3695 // register type available, manually select floating point registers here. 3696 // 3697 // The second case is the ABI name of the register, so that frontends can also 3698 // use the ABI names in register constraint lists. 3699 if (Subtarget.hasStdExtF()) { 3700 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 3701 .Cases("{f0}", "{ft0}", RISCV::F0_F) 3702 .Cases("{f1}", "{ft1}", RISCV::F1_F) 3703 .Cases("{f2}", "{ft2}", RISCV::F2_F) 3704 .Cases("{f3}", "{ft3}", RISCV::F3_F) 3705 .Cases("{f4}", "{ft4}", RISCV::F4_F) 3706 .Cases("{f5}", "{ft5}", RISCV::F5_F) 3707 .Cases("{f6}", "{ft6}", RISCV::F6_F) 3708 .Cases("{f7}", "{ft7}", RISCV::F7_F) 3709 .Cases("{f8}", "{fs0}", RISCV::F8_F) 3710 .Cases("{f9}", "{fs1}", RISCV::F9_F) 3711 .Cases("{f10}", "{fa0}", RISCV::F10_F) 3712 .Cases("{f11}", "{fa1}", RISCV::F11_F) 3713 .Cases("{f12}", "{fa2}", RISCV::F12_F) 3714 .Cases("{f13}", "{fa3}", RISCV::F13_F) 3715 .Cases("{f14}", "{fa4}", RISCV::F14_F) 3716 .Cases("{f15}", "{fa5}", RISCV::F15_F) 3717 .Cases("{f16}", "{fa6}", RISCV::F16_F) 3718 .Cases("{f17}", "{fa7}", RISCV::F17_F) 3719 .Cases("{f18}", "{fs2}", RISCV::F18_F) 3720 .Cases("{f19}", "{fs3}", RISCV::F19_F) 3721 .Cases("{f20}", "{fs4}", RISCV::F20_F) 3722 .Cases("{f21}", "{fs5}", RISCV::F21_F) 3723 .Cases("{f22}", "{fs6}", RISCV::F22_F) 3724 .Cases("{f23}", "{fs7}", RISCV::F23_F) 3725 .Cases("{f24}", "{fs8}", RISCV::F24_F) 3726 .Cases("{f25}", "{fs9}", RISCV::F25_F) 3727 .Cases("{f26}", "{fs10}", RISCV::F26_F) 3728 .Cases("{f27}", "{fs11}", RISCV::F27_F) 3729 .Cases("{f28}", "{ft8}", RISCV::F28_F) 3730 .Cases("{f29}", "{ft9}", RISCV::F29_F) 3731 .Cases("{f30}", "{ft10}", RISCV::F30_F) 3732 .Cases("{f31}", "{ft11}", RISCV::F31_F) 3733 .Default(RISCV::NoRegister); 3734 if (FReg != RISCV::NoRegister) { 3735 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 3736 if (Subtarget.hasStdExtD()) { 3737 unsigned RegNo = FReg - RISCV::F0_F; 3738 unsigned DReg = RISCV::F0_D + RegNo; 3739 return std::make_pair(DReg, &RISCV::FPR64RegClass); 3740 } 3741 return std::make_pair(FReg, &RISCV::FPR32RegClass); 3742 } 3743 } 3744 3745 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 3746 } 3747 3748 unsigned 3749 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 3750 // Currently only support length 1 constraints. 3751 if (ConstraintCode.size() == 1) { 3752 switch (ConstraintCode[0]) { 3753 case 'A': 3754 return InlineAsm::Constraint_A; 3755 default: 3756 break; 3757 } 3758 } 3759 3760 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 3761 } 3762 3763 void RISCVTargetLowering::LowerAsmOperandForConstraint( 3764 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 3765 SelectionDAG &DAG) const { 3766 // Currently only support length 1 constraints. 3767 if (Constraint.length() == 1) { 3768 switch (Constraint[0]) { 3769 case 'I': 3770 // Validate & create a 12-bit signed immediate operand. 3771 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3772 uint64_t CVal = C->getSExtValue(); 3773 if (isInt<12>(CVal)) 3774 Ops.push_back( 3775 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 3776 } 3777 return; 3778 case 'J': 3779 // Validate & create an integer zero operand. 3780 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 3781 if (C->getZExtValue() == 0) 3782 Ops.push_back( 3783 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 3784 return; 3785 case 'K': 3786 // Validate & create a 5-bit unsigned immediate operand. 3787 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 3788 uint64_t CVal = C->getZExtValue(); 3789 if (isUInt<5>(CVal)) 3790 Ops.push_back( 3791 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 3792 } 3793 return; 3794 default: 3795 break; 3796 } 3797 } 3798 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 3799 } 3800 3801 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 3802 Instruction *Inst, 3803 AtomicOrdering Ord) const { 3804 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 3805 return Builder.CreateFence(Ord); 3806 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 3807 return Builder.CreateFence(AtomicOrdering::Release); 3808 return nullptr; 3809 } 3810 3811 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 3812 Instruction *Inst, 3813 AtomicOrdering Ord) const { 3814 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 3815 return Builder.CreateFence(AtomicOrdering::Acquire); 3816 return nullptr; 3817 } 3818 3819 TargetLowering::AtomicExpansionKind 3820 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 3821 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 3822 // point operations can't be used in an lr/sc sequence without breaking the 3823 // forward-progress guarantee. 3824 if (AI->isFloatingPointOperation()) 3825 return AtomicExpansionKind::CmpXChg; 3826 3827 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 3828 if (Size == 8 || Size == 16) 3829 return AtomicExpansionKind::MaskedIntrinsic; 3830 return AtomicExpansionKind::None; 3831 } 3832 3833 static Intrinsic::ID 3834 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 3835 if (XLen == 32) { 3836 switch (BinOp) { 3837 default: 3838 llvm_unreachable("Unexpected AtomicRMW BinOp"); 3839 case AtomicRMWInst::Xchg: 3840 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 3841 case AtomicRMWInst::Add: 3842 return Intrinsic::riscv_masked_atomicrmw_add_i32; 3843 case AtomicRMWInst::Sub: 3844 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 3845 case AtomicRMWInst::Nand: 3846 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 3847 case AtomicRMWInst::Max: 3848 return Intrinsic::riscv_masked_atomicrmw_max_i32; 3849 case AtomicRMWInst::Min: 3850 return Intrinsic::riscv_masked_atomicrmw_min_i32; 3851 case AtomicRMWInst::UMax: 3852 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 3853 case AtomicRMWInst::UMin: 3854 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 3855 } 3856 } 3857 3858 if (XLen == 64) { 3859 switch (BinOp) { 3860 default: 3861 llvm_unreachable("Unexpected AtomicRMW BinOp"); 3862 case AtomicRMWInst::Xchg: 3863 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 3864 case AtomicRMWInst::Add: 3865 return Intrinsic::riscv_masked_atomicrmw_add_i64; 3866 case AtomicRMWInst::Sub: 3867 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 3868 case AtomicRMWInst::Nand: 3869 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 3870 case AtomicRMWInst::Max: 3871 return Intrinsic::riscv_masked_atomicrmw_max_i64; 3872 case AtomicRMWInst::Min: 3873 return Intrinsic::riscv_masked_atomicrmw_min_i64; 3874 case AtomicRMWInst::UMax: 3875 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 3876 case AtomicRMWInst::UMin: 3877 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 3878 } 3879 } 3880 3881 llvm_unreachable("Unexpected XLen\n"); 3882 } 3883 3884 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 3885 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 3886 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 3887 unsigned XLen = Subtarget.getXLen(); 3888 Value *Ordering = 3889 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 3890 Type *Tys[] = {AlignedAddr->getType()}; 3891 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 3892 AI->getModule(), 3893 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 3894 3895 if (XLen == 64) { 3896 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 3897 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 3898 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 3899 } 3900 3901 Value *Result; 3902 3903 // Must pass the shift amount needed to sign extend the loaded value prior 3904 // to performing a signed comparison for min/max. ShiftAmt is the number of 3905 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 3906 // is the number of bits to left+right shift the value in order to 3907 // sign-extend. 3908 if (AI->getOperation() == AtomicRMWInst::Min || 3909 AI->getOperation() == AtomicRMWInst::Max) { 3910 const DataLayout &DL = AI->getModule()->getDataLayout(); 3911 unsigned ValWidth = 3912 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 3913 Value *SextShamt = 3914 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 3915 Result = Builder.CreateCall(LrwOpScwLoop, 3916 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 3917 } else { 3918 Result = 3919 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 3920 } 3921 3922 if (XLen == 64) 3923 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 3924 return Result; 3925 } 3926 3927 TargetLowering::AtomicExpansionKind 3928 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 3929 AtomicCmpXchgInst *CI) const { 3930 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 3931 if (Size == 8 || Size == 16) 3932 return AtomicExpansionKind::MaskedIntrinsic; 3933 return AtomicExpansionKind::None; 3934 } 3935 3936 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 3937 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 3938 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 3939 unsigned XLen = Subtarget.getXLen(); 3940 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 3941 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 3942 if (XLen == 64) { 3943 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 3944 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 3945 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 3946 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 3947 } 3948 Type *Tys[] = {AlignedAddr->getType()}; 3949 Function *MaskedCmpXchg = 3950 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 3951 Value *Result = Builder.CreateCall( 3952 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 3953 if (XLen == 64) 3954 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 3955 return Result; 3956 } 3957 3958 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3959 EVT VT) const { 3960 VT = VT.getScalarType(); 3961 3962 if (!VT.isSimple()) 3963 return false; 3964 3965 switch (VT.getSimpleVT().SimpleTy) { 3966 case MVT::f16: 3967 return Subtarget.hasStdExtZfh(); 3968 case MVT::f32: 3969 return Subtarget.hasStdExtF(); 3970 case MVT::f64: 3971 return Subtarget.hasStdExtD(); 3972 default: 3973 break; 3974 } 3975 3976 return false; 3977 } 3978 3979 Register RISCVTargetLowering::getExceptionPointerRegister( 3980 const Constant *PersonalityFn) const { 3981 return RISCV::X10; 3982 } 3983 3984 Register RISCVTargetLowering::getExceptionSelectorRegister( 3985 const Constant *PersonalityFn) const { 3986 return RISCV::X11; 3987 } 3988 3989 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 3990 // Return false to suppress the unnecessary extensions if the LibCall 3991 // arguments or return value is f32 type for LP64 ABI. 3992 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 3993 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 3994 return false; 3995 3996 return true; 3997 } 3998 3999 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 4000 SDValue C) const { 4001 // Check integral scalar types. 4002 if (VT.isScalarInteger()) { 4003 // Omit the optimization if the sub target has the M extension and the data 4004 // size exceeds XLen. 4005 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) 4006 return false; 4007 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 4008 // Break the MUL to a SLLI and an ADD/SUB. 4009 const APInt &Imm = ConstNode->getAPIntValue(); 4010 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 4011 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) 4012 return true; 4013 // Omit the following optimization if the sub target has the M extension 4014 // and the data size >= XLen. 4015 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) 4016 return false; 4017 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs 4018 // a pair of LUI/ADDI. 4019 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { 4020 APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); 4021 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || 4022 (1 - ImmS).isPowerOf2()) 4023 return true; 4024 } 4025 } 4026 } 4027 4028 return false; 4029 } 4030 4031 #define GET_REGISTER_MATCHER 4032 #include "RISCVGenAsmMatcher.inc" 4033 4034 Register 4035 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 4036 const MachineFunction &MF) const { 4037 Register Reg = MatchRegisterAltName(RegName); 4038 if (Reg == RISCV::NoRegister) 4039 Reg = MatchRegisterName(RegName); 4040 if (Reg == RISCV::NoRegister) 4041 report_fatal_error( 4042 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 4043 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 4044 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 4045 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 4046 StringRef(RegName) + "\".")); 4047 return Reg; 4048 } 4049 4050 namespace llvm { 4051 namespace RISCVVIntrinsicsTable { 4052 4053 #define GET_RISCVVIntrinsicsTable_IMPL 4054 #include "RISCVGenSearchableTables.inc" 4055 4056 } // namespace RISCVVIntrinsicsTable 4057 } // namespace llvm 4058