1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/CodeGen/ValueTypes.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "llvm/IR/DiagnosticPrinter.h" 32 #include "llvm/IR/IntrinsicsRISCV.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "riscv-lower" 42 43 STATISTIC(NumTailCalls, "Number of tail calls"); 44 45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 46 const RISCVSubtarget &STI) 47 : TargetLowering(TM), Subtarget(STI) { 48 49 if (Subtarget.isRV32E()) 50 report_fatal_error("Codegen not yet implemented for RV32E"); 51 52 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 53 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 54 55 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 56 !Subtarget.hasStdExtF()) { 57 errs() << "Hard-float 'f' ABI can't be used for a target that " 58 "doesn't support the F instruction set extension (ignoring " 59 "target-abi)\n"; 60 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 61 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 62 !Subtarget.hasStdExtD()) { 63 errs() << "Hard-float 'd' ABI can't be used for a target that " 64 "doesn't support the D instruction set extension (ignoring " 65 "target-abi)\n"; 66 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 67 } 68 69 switch (ABI) { 70 default: 71 report_fatal_error("Don't know how to lower this ABI"); 72 case RISCVABI::ABI_ILP32: 73 case RISCVABI::ABI_ILP32F: 74 case RISCVABI::ABI_ILP32D: 75 case RISCVABI::ABI_LP64: 76 case RISCVABI::ABI_LP64F: 77 case RISCVABI::ABI_LP64D: 78 break; 79 } 80 81 MVT XLenVT = Subtarget.getXLenVT(); 82 83 // Set up the register classes. 84 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 85 86 if (Subtarget.hasStdExtZfh()) 87 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 88 if (Subtarget.hasStdExtF()) 89 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 90 if (Subtarget.hasStdExtD()) 91 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 92 93 static const MVT::SimpleValueType BoolVecVTs[] = { 94 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, 95 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; 96 static const MVT::SimpleValueType IntVecVTs[] = { 97 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, 98 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, 99 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, 100 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, 101 MVT::nxv4i64, MVT::nxv8i64}; 102 static const MVT::SimpleValueType F16VecVTs[] = { 103 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, 104 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; 105 static const MVT::SimpleValueType F32VecVTs[] = { 106 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; 107 static const MVT::SimpleValueType F64VecVTs[] = { 108 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; 109 110 if (Subtarget.hasStdExtV()) { 111 auto addRegClassForRVV = [this](MVT VT) { 112 unsigned Size = VT.getSizeInBits().getKnownMinValue(); 113 assert(Size <= 512 && isPowerOf2_32(Size)); 114 const TargetRegisterClass *RC; 115 if (Size <= 64) 116 RC = &RISCV::VRRegClass; 117 else if (Size == 128) 118 RC = &RISCV::VRM2RegClass; 119 else if (Size == 256) 120 RC = &RISCV::VRM4RegClass; 121 else 122 RC = &RISCV::VRM8RegClass; 123 124 addRegisterClass(VT, RC); 125 }; 126 127 for (MVT VT : BoolVecVTs) 128 addRegClassForRVV(VT); 129 for (MVT VT : IntVecVTs) 130 addRegClassForRVV(VT); 131 132 if (Subtarget.hasStdExtZfh()) 133 for (MVT VT : F16VecVTs) 134 addRegClassForRVV(VT); 135 136 if (Subtarget.hasStdExtF()) 137 for (MVT VT : F32VecVTs) 138 addRegClassForRVV(VT); 139 140 if (Subtarget.hasStdExtD()) 141 for (MVT VT : F64VecVTs) 142 addRegClassForRVV(VT); 143 144 if (Subtarget.useRVVForFixedLengthVectors()) { 145 auto addRegClassForFixedVectors = [this](MVT VT) { 146 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 147 const TargetRegisterClass *RC; 148 if (LMul == 1 || VT.getVectorElementType() == MVT::i1) 149 RC = &RISCV::VRRegClass; 150 else if (LMul == 2) 151 RC = &RISCV::VRM2RegClass; 152 else if (LMul == 4) 153 RC = &RISCV::VRM4RegClass; 154 else if (LMul == 8) 155 RC = &RISCV::VRM8RegClass; 156 else 157 llvm_unreachable("Unexpected LMul!"); 158 159 addRegisterClass(VT, RC); 160 }; 161 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) 162 if (useRVVForFixedLengthVectorVT(VT)) 163 addRegClassForFixedVectors(VT); 164 165 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) 166 if (useRVVForFixedLengthVectorVT(VT)) 167 addRegClassForFixedVectors(VT); 168 } 169 } 170 171 // Compute derived properties from the register classes. 172 computeRegisterProperties(STI.getRegisterInfo()); 173 174 setStackPointerRegisterToSaveRestore(RISCV::X2); 175 176 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 177 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 178 179 // TODO: add all necessary setOperationAction calls. 180 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 181 182 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 183 setOperationAction(ISD::BR_CC, XLenVT, Expand); 184 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 185 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 186 187 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 188 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 189 190 setOperationAction(ISD::VASTART, MVT::Other, Custom); 191 setOperationAction(ISD::VAARG, MVT::Other, Expand); 192 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 193 setOperationAction(ISD::VAEND, MVT::Other, Expand); 194 195 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 196 if (!Subtarget.hasStdExtZbb()) { 197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 198 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 199 } 200 201 if (Subtarget.is64Bit()) { 202 setOperationAction(ISD::ADD, MVT::i32, Custom); 203 setOperationAction(ISD::SUB, MVT::i32, Custom); 204 setOperationAction(ISD::SHL, MVT::i32, Custom); 205 setOperationAction(ISD::SRA, MVT::i32, Custom); 206 setOperationAction(ISD::SRL, MVT::i32, Custom); 207 208 setOperationAction(ISD::UADDO, MVT::i32, Custom); 209 setOperationAction(ISD::USUBO, MVT::i32, Custom); 210 setOperationAction(ISD::UADDSAT, MVT::i32, Custom); 211 setOperationAction(ISD::USUBSAT, MVT::i32, Custom); 212 } 213 214 if (!Subtarget.hasStdExtM()) { 215 setOperationAction(ISD::MUL, XLenVT, Expand); 216 setOperationAction(ISD::MULHS, XLenVT, Expand); 217 setOperationAction(ISD::MULHU, XLenVT, Expand); 218 setOperationAction(ISD::SDIV, XLenVT, Expand); 219 setOperationAction(ISD::UDIV, XLenVT, Expand); 220 setOperationAction(ISD::SREM, XLenVT, Expand); 221 setOperationAction(ISD::UREM, XLenVT, Expand); 222 } 223 224 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 225 setOperationAction(ISD::MUL, MVT::i32, Custom); 226 227 setOperationAction(ISD::SDIV, MVT::i8, Custom); 228 setOperationAction(ISD::UDIV, MVT::i8, Custom); 229 setOperationAction(ISD::UREM, MVT::i8, Custom); 230 setOperationAction(ISD::SDIV, MVT::i16, Custom); 231 setOperationAction(ISD::UDIV, MVT::i16, Custom); 232 setOperationAction(ISD::UREM, MVT::i16, Custom); 233 setOperationAction(ISD::SDIV, MVT::i32, Custom); 234 setOperationAction(ISD::UDIV, MVT::i32, Custom); 235 setOperationAction(ISD::UREM, MVT::i32, Custom); 236 } 237 238 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 239 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 240 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 241 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 242 243 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 244 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 245 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 246 247 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 248 if (Subtarget.is64Bit()) { 249 setOperationAction(ISD::ROTL, MVT::i32, Custom); 250 setOperationAction(ISD::ROTR, MVT::i32, Custom); 251 } 252 } else { 253 setOperationAction(ISD::ROTL, XLenVT, Expand); 254 setOperationAction(ISD::ROTR, XLenVT, Expand); 255 } 256 257 if (Subtarget.hasStdExtZbp()) { 258 // Custom lower bswap/bitreverse so we can convert them to GREVI to enable 259 // more combining. 260 setOperationAction(ISD::BITREVERSE, XLenVT, Custom); 261 setOperationAction(ISD::BSWAP, XLenVT, Custom); 262 263 if (Subtarget.is64Bit()) { 264 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); 265 setOperationAction(ISD::BSWAP, MVT::i32, Custom); 266 } 267 } else { 268 // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll 269 // pattern match it directly in isel. 270 setOperationAction(ISD::BSWAP, XLenVT, 271 Subtarget.hasStdExtZbb() ? Legal : Expand); 272 } 273 274 if (Subtarget.hasStdExtZbb()) { 275 setOperationAction(ISD::SMIN, XLenVT, Legal); 276 setOperationAction(ISD::SMAX, XLenVT, Legal); 277 setOperationAction(ISD::UMIN, XLenVT, Legal); 278 setOperationAction(ISD::UMAX, XLenVT, Legal); 279 } else { 280 setOperationAction(ISD::CTTZ, XLenVT, Expand); 281 setOperationAction(ISD::CTLZ, XLenVT, Expand); 282 setOperationAction(ISD::CTPOP, XLenVT, Expand); 283 } 284 285 if (Subtarget.hasStdExtZbt()) { 286 setOperationAction(ISD::FSHL, XLenVT, Custom); 287 setOperationAction(ISD::FSHR, XLenVT, Custom); 288 setOperationAction(ISD::SELECT, XLenVT, Legal); 289 290 if (Subtarget.is64Bit()) { 291 setOperationAction(ISD::FSHL, MVT::i32, Custom); 292 setOperationAction(ISD::FSHR, MVT::i32, Custom); 293 } 294 } else { 295 setOperationAction(ISD::SELECT, XLenVT, Custom); 296 } 297 298 ISD::CondCode FPCCToExpand[] = { 299 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 300 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 301 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 302 303 ISD::NodeType FPOpToExpand[] = { 304 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 305 ISD::FP_TO_FP16}; 306 307 if (Subtarget.hasStdExtZfh()) 308 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 309 310 if (Subtarget.hasStdExtZfh()) { 311 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 312 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 313 for (auto CC : FPCCToExpand) 314 setCondCodeAction(CC, MVT::f16, Expand); 315 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 316 setOperationAction(ISD::SELECT, MVT::f16, Custom); 317 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 318 for (auto Op : FPOpToExpand) 319 setOperationAction(Op, MVT::f16, Expand); 320 } 321 322 if (Subtarget.hasStdExtF()) { 323 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 324 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 325 for (auto CC : FPCCToExpand) 326 setCondCodeAction(CC, MVT::f32, Expand); 327 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 328 setOperationAction(ISD::SELECT, MVT::f32, Custom); 329 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 330 for (auto Op : FPOpToExpand) 331 setOperationAction(Op, MVT::f32, Expand); 332 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 333 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 334 } 335 336 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 337 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 338 339 if (Subtarget.hasStdExtD()) { 340 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 341 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 342 for (auto CC : FPCCToExpand) 343 setCondCodeAction(CC, MVT::f64, Expand); 344 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 345 setOperationAction(ISD::SELECT, MVT::f64, Custom); 346 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 347 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 348 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 349 for (auto Op : FPOpToExpand) 350 setOperationAction(Op, MVT::f64, Expand); 351 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 352 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 353 } 354 355 if (Subtarget.is64Bit()) { 356 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 357 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 358 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 359 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 360 } 361 362 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 363 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 364 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 365 setOperationAction(ISD::JumpTable, XLenVT, Custom); 366 367 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 368 369 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 370 // Unfortunately this can't be determined just from the ISA naming string. 371 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 372 Subtarget.is64Bit() ? Legal : Custom); 373 374 setOperationAction(ISD::TRAP, MVT::Other, Legal); 375 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 376 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 377 378 if (Subtarget.hasStdExtA()) { 379 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 380 setMinCmpXchgSizeInBits(32); 381 } else { 382 setMaxAtomicSizeInBitsSupported(0); 383 } 384 385 setBooleanContents(ZeroOrOneBooleanContent); 386 387 if (Subtarget.hasStdExtV()) { 388 setBooleanVectorContents(ZeroOrOneBooleanContent); 389 390 setOperationAction(ISD::VSCALE, XLenVT, Custom); 391 392 // RVV intrinsics may have illegal operands. 393 // We also need to custom legalize vmv.x.s. 394 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); 395 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 396 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 397 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 398 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 399 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); 400 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 401 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 402 403 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 404 405 if (!Subtarget.is64Bit()) { 406 // We must custom-lower certain vXi64 operations on RV32 due to the vector 407 // element type being illegal. 408 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); 409 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); 410 411 setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); 412 setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); 413 setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); 414 setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); 415 setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); 416 setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); 417 setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); 418 setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); 419 } 420 421 for (MVT VT : BoolVecVTs) { 422 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 423 424 // Mask VTs are custom-expanded into a series of standard nodes 425 setOperationAction(ISD::TRUNCATE, VT, Custom); 426 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 427 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 428 429 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 430 } 431 432 for (MVT VT : IntVecVTs) { 433 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 434 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); 435 436 setOperationAction(ISD::SMIN, VT, Legal); 437 setOperationAction(ISD::SMAX, VT, Legal); 438 setOperationAction(ISD::UMIN, VT, Legal); 439 setOperationAction(ISD::UMAX, VT, Legal); 440 441 setOperationAction(ISD::ROTL, VT, Expand); 442 setOperationAction(ISD::ROTR, VT, Expand); 443 444 // Custom-lower extensions and truncations from/to mask types. 445 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 446 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 447 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 448 449 // RVV has native int->float & float->int conversions where the 450 // element type sizes are within one power-of-two of each other. Any 451 // wider distances between type sizes have to be lowered as sequences 452 // which progressively narrow the gap in stages. 453 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 454 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 455 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 456 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 457 458 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" 459 // nodes which truncate by one power of two at a time. 460 setOperationAction(ISD::TRUNCATE, VT, Custom); 461 462 // Custom-lower insert/extract operations to simplify patterns. 463 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 464 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 465 466 // Custom-lower reduction operations to set up the corresponding custom 467 // nodes' operands. 468 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 469 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 470 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 471 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 472 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 473 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 474 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 475 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 476 477 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 478 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 479 480 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); 481 } 482 483 // Expand various CCs to best match the RVV ISA, which natively supports UNE 484 // but no other unordered comparisons, and supports all ordered comparisons 485 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization 486 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), 487 // and we pattern-match those back to the "original", swapping operands once 488 // more. This way we catch both operations and both "vf" and "fv" forms with 489 // fewer patterns. 490 ISD::CondCode VFPCCToExpand[] = { 491 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 492 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, 493 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, 494 }; 495 496 // Sets common operation actions on RVV floating-point vector types. 497 const auto SetCommonVFPActions = [&](MVT VT) { 498 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 499 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type 500 // sizes are within one power-of-two of each other. Therefore conversions 501 // between vXf16 and vXf64 must be lowered as sequences which convert via 502 // vXf32. 503 setOperationAction(ISD::FP_ROUND, VT, Custom); 504 setOperationAction(ISD::FP_EXTEND, VT, Custom); 505 // Custom-lower insert/extract operations to simplify patterns. 506 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 507 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 508 // Expand various condition codes (explained above). 509 for (auto CC : VFPCCToExpand) 510 setCondCodeAction(CC, VT, Expand); 511 512 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 513 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 514 setOperationAction(ISD::FCOPYSIGN, VT, Legal); 515 516 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 517 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 518 519 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); 520 }; 521 522 if (Subtarget.hasStdExtZfh()) 523 for (MVT VT : F16VecVTs) 524 SetCommonVFPActions(VT); 525 526 if (Subtarget.hasStdExtF()) 527 for (MVT VT : F32VecVTs) 528 SetCommonVFPActions(VT); 529 530 if (Subtarget.hasStdExtD()) 531 for (MVT VT : F64VecVTs) 532 SetCommonVFPActions(VT); 533 534 if (Subtarget.useRVVForFixedLengthVectors()) { 535 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 536 if (!useRVVForFixedLengthVectorVT(VT)) 537 continue; 538 539 // By default everything must be expanded. 540 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 541 setOperationAction(Op, VT, Expand); 542 for (MVT OtherVT : MVT::fixedlen_vector_valuetypes()) 543 setTruncStoreAction(VT, OtherVT, Expand); 544 545 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 546 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 547 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 548 549 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 550 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 551 552 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 553 554 setOperationAction(ISD::LOAD, VT, Custom); 555 setOperationAction(ISD::STORE, VT, Custom); 556 557 setOperationAction(ISD::SETCC, VT, Custom); 558 559 setOperationAction(ISD::TRUNCATE, VT, Custom); 560 561 // Operations below are different for between masks and other vectors. 562 if (VT.getVectorElementType() == MVT::i1) { 563 setOperationAction(ISD::AND, VT, Custom); 564 setOperationAction(ISD::OR, VT, Custom); 565 setOperationAction(ISD::XOR, VT, Custom); 566 continue; 567 } 568 569 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 570 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 571 572 setOperationAction(ISD::ADD, VT, Custom); 573 setOperationAction(ISD::MUL, VT, Custom); 574 setOperationAction(ISD::SUB, VT, Custom); 575 setOperationAction(ISD::AND, VT, Custom); 576 setOperationAction(ISD::OR, VT, Custom); 577 setOperationAction(ISD::XOR, VT, Custom); 578 setOperationAction(ISD::SDIV, VT, Custom); 579 setOperationAction(ISD::SREM, VT, Custom); 580 setOperationAction(ISD::UDIV, VT, Custom); 581 setOperationAction(ISD::UREM, VT, Custom); 582 setOperationAction(ISD::SHL, VT, Custom); 583 setOperationAction(ISD::SRA, VT, Custom); 584 setOperationAction(ISD::SRL, VT, Custom); 585 586 setOperationAction(ISD::SMIN, VT, Custom); 587 setOperationAction(ISD::SMAX, VT, Custom); 588 setOperationAction(ISD::UMIN, VT, Custom); 589 setOperationAction(ISD::UMAX, VT, Custom); 590 setOperationAction(ISD::ABS, VT, Custom); 591 592 setOperationAction(ISD::MULHS, VT, Custom); 593 setOperationAction(ISD::MULHU, VT, Custom); 594 595 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 596 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 597 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 598 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 599 600 setOperationAction(ISD::VSELECT, VT, Custom); 601 602 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 603 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 604 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 605 606 setOperationAction(ISD::BITCAST, VT, Custom); 607 608 // Custom-lower reduction operations to set up the corresponding custom 609 // nodes' operands. 610 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 611 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 612 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 613 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 614 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 615 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 616 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 617 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 618 } 619 620 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { 621 if (!useRVVForFixedLengthVectorVT(VT)) 622 continue; 623 624 // By default everything must be expanded. 625 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 626 setOperationAction(Op, VT, Expand); 627 for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { 628 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 629 setTruncStoreAction(VT, OtherVT, Expand); 630 } 631 632 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 633 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 634 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 635 636 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 637 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 638 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 639 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 640 641 setOperationAction(ISD::LOAD, VT, Custom); 642 setOperationAction(ISD::STORE, VT, Custom); 643 setOperationAction(ISD::FADD, VT, Custom); 644 setOperationAction(ISD::FSUB, VT, Custom); 645 setOperationAction(ISD::FMUL, VT, Custom); 646 setOperationAction(ISD::FDIV, VT, Custom); 647 setOperationAction(ISD::FNEG, VT, Custom); 648 setOperationAction(ISD::FABS, VT, Custom); 649 setOperationAction(ISD::FCOPYSIGN, VT, Custom); 650 setOperationAction(ISD::FSQRT, VT, Custom); 651 setOperationAction(ISD::FMA, VT, Custom); 652 653 setOperationAction(ISD::FP_ROUND, VT, Custom); 654 setOperationAction(ISD::FP_EXTEND, VT, Custom); 655 656 for (auto CC : VFPCCToExpand) 657 setCondCodeAction(CC, VT, Expand); 658 659 setOperationAction(ISD::VSELECT, VT, Custom); 660 661 setOperationAction(ISD::BITCAST, VT, Custom); 662 663 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 664 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 665 } 666 } 667 } 668 669 // Function alignments. 670 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 671 setMinFunctionAlignment(FunctionAlignment); 672 setPrefFunctionAlignment(FunctionAlignment); 673 674 setMinimumJumpTableEntries(5); 675 676 // Jumps are expensive, compared to logic 677 setJumpIsExpensive(); 678 679 // We can use any register for comparisons 680 setHasMultipleConditionRegisters(); 681 682 if (Subtarget.hasStdExtZbp()) { 683 setTargetDAGCombine(ISD::OR); 684 } 685 if (Subtarget.hasStdExtV()) 686 setTargetDAGCombine(ISD::FCOPYSIGN); 687 } 688 689 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, 690 LLVMContext &Context, 691 EVT VT) const { 692 if (!VT.isVector()) 693 return getPointerTy(DL); 694 if (Subtarget.hasStdExtV() && 695 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) 696 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); 697 return VT.changeVectorElementTypeToInteger(); 698 } 699 700 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 701 const CallInst &I, 702 MachineFunction &MF, 703 unsigned Intrinsic) const { 704 switch (Intrinsic) { 705 default: 706 return false; 707 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 708 case Intrinsic::riscv_masked_atomicrmw_add_i32: 709 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 710 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 711 case Intrinsic::riscv_masked_atomicrmw_max_i32: 712 case Intrinsic::riscv_masked_atomicrmw_min_i32: 713 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 714 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 715 case Intrinsic::riscv_masked_cmpxchg_i32: 716 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 717 Info.opc = ISD::INTRINSIC_W_CHAIN; 718 Info.memVT = MVT::getVT(PtrTy->getElementType()); 719 Info.ptrVal = I.getArgOperand(0); 720 Info.offset = 0; 721 Info.align = Align(4); 722 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 723 MachineMemOperand::MOVolatile; 724 return true; 725 } 726 } 727 728 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 729 const AddrMode &AM, Type *Ty, 730 unsigned AS, 731 Instruction *I) const { 732 // No global is ever allowed as a base. 733 if (AM.BaseGV) 734 return false; 735 736 // Require a 12-bit signed offset. 737 if (!isInt<12>(AM.BaseOffs)) 738 return false; 739 740 switch (AM.Scale) { 741 case 0: // "r+i" or just "i", depending on HasBaseReg. 742 break; 743 case 1: 744 if (!AM.HasBaseReg) // allow "r+i". 745 break; 746 return false; // disallow "r+r" or "r+r+i". 747 default: 748 return false; 749 } 750 751 return true; 752 } 753 754 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 755 return isInt<12>(Imm); 756 } 757 758 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 759 return isInt<12>(Imm); 760 } 761 762 // On RV32, 64-bit integers are split into their high and low parts and held 763 // in two different registers, so the trunc is free since the low register can 764 // just be used. 765 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 766 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 767 return false; 768 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 769 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 770 return (SrcBits == 64 && DestBits == 32); 771 } 772 773 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 774 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 775 !SrcVT.isInteger() || !DstVT.isInteger()) 776 return false; 777 unsigned SrcBits = SrcVT.getSizeInBits(); 778 unsigned DestBits = DstVT.getSizeInBits(); 779 return (SrcBits == 64 && DestBits == 32); 780 } 781 782 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 783 // Zexts are free if they can be combined with a load. 784 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 785 EVT MemVT = LD->getMemoryVT(); 786 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 787 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 788 (LD->getExtensionType() == ISD::NON_EXTLOAD || 789 LD->getExtensionType() == ISD::ZEXTLOAD)) 790 return true; 791 } 792 793 return TargetLowering::isZExtFree(Val, VT2); 794 } 795 796 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 797 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 798 } 799 800 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 801 return Subtarget.hasStdExtZbb(); 802 } 803 804 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 805 return Subtarget.hasStdExtZbb(); 806 } 807 808 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 809 bool ForCodeSize) const { 810 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 811 return false; 812 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 813 return false; 814 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 815 return false; 816 if (Imm.isNegZero()) 817 return false; 818 return Imm.isZero(); 819 } 820 821 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 822 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 823 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 824 (VT == MVT::f64 && Subtarget.hasStdExtD()); 825 } 826 827 // Changes the condition code and swaps operands if necessary, so the SetCC 828 // operation matches one of the comparisons supported directly by branches 829 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare 830 // with 1/-1. 831 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, 832 ISD::CondCode &CC, SelectionDAG &DAG) { 833 // Convert X > -1 to X >= 0. 834 if (CC == ISD::SETGT && isAllOnesConstant(RHS)) { 835 RHS = DAG.getConstant(0, DL, RHS.getValueType()); 836 CC = ISD::SETGE; 837 return; 838 } 839 // Convert X < 1 to 0 >= X. 840 if (CC == ISD::SETLT && isOneConstant(RHS)) { 841 RHS = LHS; 842 LHS = DAG.getConstant(0, DL, RHS.getValueType()); 843 CC = ISD::SETGE; 844 return; 845 } 846 847 switch (CC) { 848 default: 849 break; 850 case ISD::SETGT: 851 case ISD::SETLE: 852 case ISD::SETUGT: 853 case ISD::SETULE: 854 CC = ISD::getSetCCSwappedOperands(CC); 855 std::swap(LHS, RHS); 856 break; 857 } 858 } 859 860 // Return the RISC-V branch opcode that matches the given DAG integer 861 // condition code. The CondCode must be one of those supported by the RISC-V 862 // ISA (see translateSetCCForBranch). 863 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 864 switch (CC) { 865 default: 866 llvm_unreachable("Unsupported CondCode"); 867 case ISD::SETEQ: 868 return RISCV::BEQ; 869 case ISD::SETNE: 870 return RISCV::BNE; 871 case ISD::SETLT: 872 return RISCV::BLT; 873 case ISD::SETGE: 874 return RISCV::BGE; 875 case ISD::SETULT: 876 return RISCV::BLTU; 877 case ISD::SETUGE: 878 return RISCV::BGEU; 879 } 880 } 881 882 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) { 883 assert(VT.isScalableVector() && "Expecting a scalable vector type"); 884 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); 885 if (VT.getVectorElementType() == MVT::i1) 886 KnownSize *= 8; 887 888 switch (KnownSize) { 889 default: 890 llvm_unreachable("Invalid LMUL."); 891 case 8: 892 return RISCVVLMUL::LMUL_F8; 893 case 16: 894 return RISCVVLMUL::LMUL_F4; 895 case 32: 896 return RISCVVLMUL::LMUL_F2; 897 case 64: 898 return RISCVVLMUL::LMUL_1; 899 case 128: 900 return RISCVVLMUL::LMUL_2; 901 case 256: 902 return RISCVVLMUL::LMUL_4; 903 case 512: 904 return RISCVVLMUL::LMUL_8; 905 } 906 } 907 908 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) { 909 switch (LMul) { 910 default: 911 llvm_unreachable("Invalid LMUL."); 912 case RISCVVLMUL::LMUL_F8: 913 case RISCVVLMUL::LMUL_F4: 914 case RISCVVLMUL::LMUL_F2: 915 case RISCVVLMUL::LMUL_1: 916 return RISCV::VRRegClassID; 917 case RISCVVLMUL::LMUL_2: 918 return RISCV::VRM2RegClassID; 919 case RISCVVLMUL::LMUL_4: 920 return RISCV::VRM4RegClassID; 921 case RISCVVLMUL::LMUL_8: 922 return RISCV::VRM8RegClassID; 923 } 924 } 925 926 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { 927 RISCVVLMUL LMUL = getLMUL(VT); 928 if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 || 929 LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) { 930 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 931 "Unexpected subreg numbering"); 932 return RISCV::sub_vrm1_0 + Index; 933 } 934 if (LMUL == RISCVVLMUL::LMUL_2) { 935 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 936 "Unexpected subreg numbering"); 937 return RISCV::sub_vrm2_0 + Index; 938 } 939 if (LMUL == RISCVVLMUL::LMUL_4) { 940 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 941 "Unexpected subreg numbering"); 942 return RISCV::sub_vrm4_0 + Index; 943 } 944 llvm_unreachable("Invalid vector type."); 945 } 946 947 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { 948 if (VT.getVectorElementType() == MVT::i1) 949 return RISCV::VRRegClassID; 950 return getRegClassIDForLMUL(getLMUL(VT)); 951 } 952 953 // Attempt to decompose a subvector insert/extract between VecVT and 954 // SubVecVT via subregister indices. Returns the subregister index that 955 // can perform the subvector insert/extract with the given element index, as 956 // well as the index corresponding to any leftover subvectors that must be 957 // further inserted/extracted within the register class for SubVecVT. 958 std::pair<unsigned, unsigned> 959 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 960 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, 961 const RISCVRegisterInfo *TRI) { 962 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && 963 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && 964 RISCV::VRM2RegClassID > RISCV::VRRegClassID), 965 "Register classes not ordered"); 966 unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); 967 unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); 968 // Try to compose a subregister index that takes us from the incoming 969 // LMUL>1 register class down to the outgoing one. At each step we half 970 // the LMUL: 971 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 972 // Note that this is not guaranteed to find a subregister index, such as 973 // when we are extracting from one VR type to another. 974 unsigned SubRegIdx = RISCV::NoSubRegister; 975 for (const unsigned RCID : 976 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) 977 if (VecRegClassID > RCID && SubRegClassID <= RCID) { 978 VecVT = VecVT.getHalfNumVectorElementsVT(); 979 bool IsHi = 980 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); 981 SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, 982 getSubregIndexByMVT(VecVT, IsHi)); 983 if (IsHi) 984 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); 985 } 986 return {SubRegIdx, InsertExtractIdx}; 987 } 988 989 // Return the largest legal scalable vector type that matches VT's element type. 990 MVT RISCVTargetLowering::getContainerForFixedLengthVector( 991 const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget) { 992 assert(VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && 993 "Expected legal fixed length vector!"); 994 995 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 996 assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!"); 997 998 MVT EltVT = VT.getVectorElementType(); 999 switch (EltVT.SimpleTy) { 1000 default: 1001 llvm_unreachable("unexpected element type for RVV container"); 1002 case MVT::i1: { 1003 // Masks are calculated assuming 8-bit elements since that's when we need 1004 // the most elements. 1005 unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8; 1006 return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock); 1007 } 1008 case MVT::i8: 1009 case MVT::i16: 1010 case MVT::i32: 1011 case MVT::i64: 1012 case MVT::f16: 1013 case MVT::f32: 1014 case MVT::f64: { 1015 unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits(); 1016 return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock); 1017 } 1018 } 1019 } 1020 1021 MVT RISCVTargetLowering::getContainerForFixedLengthVector( 1022 SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) { 1023 return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, 1024 Subtarget); 1025 } 1026 1027 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { 1028 return getContainerForFixedLengthVector(*this, VT, getSubtarget()); 1029 } 1030 1031 // Grow V to consume an entire RVV register. 1032 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1033 const RISCVSubtarget &Subtarget) { 1034 assert(VT.isScalableVector() && 1035 "Expected to convert into a scalable vector!"); 1036 assert(V.getValueType().isFixedLengthVector() && 1037 "Expected a fixed length vector operand!"); 1038 SDLoc DL(V); 1039 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1040 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); 1041 } 1042 1043 // Shrink V so it's just big enough to maintain a VT's worth of data. 1044 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1045 const RISCVSubtarget &Subtarget) { 1046 assert(VT.isFixedLengthVector() && 1047 "Expected to convert into a fixed length vector!"); 1048 assert(V.getValueType().isScalableVector() && 1049 "Expected a scalable vector operand!"); 1050 SDLoc DL(V); 1051 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1052 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); 1053 } 1054 1055 // Gets the two common "VL" operands: an all-ones mask and the vector length. 1056 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is 1057 // the vector type that it is contained in. 1058 static std::pair<SDValue, SDValue> 1059 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, 1060 const RISCVSubtarget &Subtarget) { 1061 assert(ContainerVT.isScalableVector() && "Expecting scalable container type"); 1062 MVT XLenVT = Subtarget.getXLenVT(); 1063 SDValue VL = VecVT.isFixedLengthVector() 1064 ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) 1065 : DAG.getRegister(RISCV::X0, XLenVT); 1066 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 1067 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 1068 return {Mask, VL}; 1069 } 1070 1071 // As above but assuming the given type is a scalable vector type. 1072 static std::pair<SDValue, SDValue> 1073 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, 1074 const RISCVSubtarget &Subtarget) { 1075 assert(VecVT.isScalableVector() && "Expecting a scalable vector"); 1076 return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); 1077 } 1078 1079 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few 1080 // of either is (currently) supported. This can get us into an infinite loop 1081 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR 1082 // as a ..., etc. 1083 // Until either (or both) of these can reliably lower any node, reporting that 1084 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks 1085 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, 1086 // which is not desirable. 1087 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( 1088 EVT VT, unsigned DefinedValues) const { 1089 return false; 1090 } 1091 1092 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 1093 // Only splats are currently supported. 1094 if (ShuffleVectorSDNode::isSplatMask(M.data(), VT)) 1095 return true; 1096 1097 return false; 1098 } 1099 1100 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 1101 const RISCVSubtarget &Subtarget) { 1102 MVT VT = Op.getSimpleValueType(); 1103 assert(VT.isFixedLengthVector() && "Unexpected vector!"); 1104 1105 MVT ContainerVT = 1106 RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget); 1107 1108 SDLoc DL(Op); 1109 SDValue Mask, VL; 1110 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1111 1112 if (VT.getVectorElementType() == MVT::i1) { 1113 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 1114 SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); 1115 return convertFromScalableVector(VT, VMClr, DAG, Subtarget); 1116 } 1117 1118 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 1119 SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 1120 return convertFromScalableVector(VT, VMSet, DAG, Subtarget); 1121 } 1122 1123 return SDValue(); 1124 } 1125 1126 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 1127 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL 1128 : RISCVISD::VMV_V_X_VL; 1129 Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); 1130 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 1131 } 1132 1133 // Try and match an index sequence, which we can lower directly to the vid 1134 // instruction. An all-undef vector is matched by getSplatValue, above. 1135 if (VT.isInteger()) { 1136 bool IsVID = true; 1137 for (unsigned i = 0, e = Op.getNumOperands(); i < e && IsVID; i++) 1138 IsVID &= Op.getOperand(i).isUndef() || 1139 (isa<ConstantSDNode>(Op.getOperand(i)) && 1140 Op.getConstantOperandVal(i) == i); 1141 1142 if (IsVID) { 1143 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); 1144 return convertFromScalableVector(VT, VID, DAG, Subtarget); 1145 } 1146 } 1147 1148 return SDValue(); 1149 } 1150 1151 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 1152 const RISCVSubtarget &Subtarget) { 1153 SDValue V1 = Op.getOperand(0); 1154 SDLoc DL(Op); 1155 MVT VT = Op.getSimpleValueType(); 1156 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 1157 1158 if (SVN->isSplat()) { 1159 int Lane = SVN->getSplatIndex(); 1160 if (Lane >= 0) { 1161 MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector( 1162 DAG, VT, Subtarget); 1163 1164 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 1165 assert(Lane < (int)VT.getVectorNumElements() && "Unexpected lane!"); 1166 1167 SDValue Mask, VL; 1168 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1169 MVT XLenVT = Subtarget.getXLenVT(); 1170 SDValue Gather = 1171 DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, 1172 DAG.getConstant(Lane, DL, XLenVT), Mask, VL); 1173 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 1174 } 1175 } 1176 1177 return SDValue(); 1178 } 1179 1180 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT, 1181 SDLoc DL, SelectionDAG &DAG, 1182 const RISCVSubtarget &Subtarget) { 1183 if (VT.isScalableVector()) 1184 return DAG.getFPExtendOrRound(Op, DL, VT); 1185 assert(VT.isFixedLengthVector() && 1186 "Unexpected value type for RVV FP extend/round lowering"); 1187 SDValue Mask, VL; 1188 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1189 unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType()) 1190 ? RISCVISD::FP_EXTEND_VL 1191 : RISCVISD::FP_ROUND_VL; 1192 return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL); 1193 } 1194 1195 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 1196 SelectionDAG &DAG) const { 1197 switch (Op.getOpcode()) { 1198 default: 1199 report_fatal_error("unimplemented operand"); 1200 case ISD::GlobalAddress: 1201 return lowerGlobalAddress(Op, DAG); 1202 case ISD::BlockAddress: 1203 return lowerBlockAddress(Op, DAG); 1204 case ISD::ConstantPool: 1205 return lowerConstantPool(Op, DAG); 1206 case ISD::JumpTable: 1207 return lowerJumpTable(Op, DAG); 1208 case ISD::GlobalTLSAddress: 1209 return lowerGlobalTLSAddress(Op, DAG); 1210 case ISD::SELECT: 1211 return lowerSELECT(Op, DAG); 1212 case ISD::BRCOND: 1213 return lowerBRCOND(Op, DAG); 1214 case ISD::VASTART: 1215 return lowerVASTART(Op, DAG); 1216 case ISD::FRAMEADDR: 1217 return lowerFRAMEADDR(Op, DAG); 1218 case ISD::RETURNADDR: 1219 return lowerRETURNADDR(Op, DAG); 1220 case ISD::SHL_PARTS: 1221 return lowerShiftLeftParts(Op, DAG); 1222 case ISD::SRA_PARTS: 1223 return lowerShiftRightParts(Op, DAG, true); 1224 case ISD::SRL_PARTS: 1225 return lowerShiftRightParts(Op, DAG, false); 1226 case ISD::BITCAST: { 1227 SDValue Op0 = Op.getOperand(0); 1228 // We can handle fixed length vector bitcasts with a simple replacement 1229 // in isel. 1230 if (Op.getValueType().isFixedLengthVector()) { 1231 if (Op0.getValueType().isFixedLengthVector()) 1232 return Op; 1233 return SDValue(); 1234 } 1235 assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || 1236 Subtarget.hasStdExtZfh()) && 1237 "Unexpected custom legalisation"); 1238 SDLoc DL(Op); 1239 if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) { 1240 if (Op0.getValueType() != MVT::i16) 1241 return SDValue(); 1242 SDValue NewOp0 = 1243 DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0); 1244 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 1245 return FPConv; 1246 } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() && 1247 Subtarget.hasStdExtF()) { 1248 if (Op0.getValueType() != MVT::i32) 1249 return SDValue(); 1250 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 1251 SDValue FPConv = 1252 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 1253 return FPConv; 1254 } 1255 return SDValue(); 1256 } 1257 case ISD::INTRINSIC_WO_CHAIN: 1258 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1259 case ISD::INTRINSIC_W_CHAIN: 1260 return LowerINTRINSIC_W_CHAIN(Op, DAG); 1261 case ISD::BSWAP: 1262 case ISD::BITREVERSE: { 1263 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 1264 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 1265 MVT VT = Op.getSimpleValueType(); 1266 SDLoc DL(Op); 1267 // Start with the maximum immediate value which is the bitwidth - 1. 1268 unsigned Imm = VT.getSizeInBits() - 1; 1269 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 1270 if (Op.getOpcode() == ISD::BSWAP) 1271 Imm &= ~0x7U; 1272 return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), 1273 DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); 1274 } 1275 case ISD::FSHL: 1276 case ISD::FSHR: { 1277 MVT VT = Op.getSimpleValueType(); 1278 assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization"); 1279 SDLoc DL(Op); 1280 // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only 1281 // use log(XLen) bits. Mask the shift amount accordingly. 1282 unsigned ShAmtWidth = Subtarget.getXLen() - 1; 1283 SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), 1284 DAG.getConstant(ShAmtWidth, DL, VT)); 1285 unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR; 1286 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt); 1287 } 1288 case ISD::TRUNCATE: { 1289 SDLoc DL(Op); 1290 MVT VT = Op.getSimpleValueType(); 1291 // Only custom-lower vector truncates 1292 if (!VT.isVector()) 1293 return Op; 1294 1295 // Truncates to mask types are handled differently 1296 if (VT.getVectorElementType() == MVT::i1) 1297 return lowerVectorMaskTrunc(Op, DAG); 1298 1299 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary 1300 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which 1301 // truncate by one power of two at a time. 1302 MVT DstEltVT = VT.getVectorElementType(); 1303 1304 SDValue Src = Op.getOperand(0); 1305 MVT SrcVT = Src.getSimpleValueType(); 1306 MVT SrcEltVT = SrcVT.getVectorElementType(); 1307 1308 assert(DstEltVT.bitsLT(SrcEltVT) && 1309 isPowerOf2_64(DstEltVT.getSizeInBits()) && 1310 isPowerOf2_64(SrcEltVT.getSizeInBits()) && 1311 "Unexpected vector truncate lowering"); 1312 1313 MVT ContainerVT = SrcVT; 1314 if (SrcVT.isFixedLengthVector()) { 1315 ContainerVT = getContainerForFixedLengthVector(SrcVT); 1316 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 1317 } 1318 1319 SDValue Result = Src; 1320 SDValue Mask, VL; 1321 std::tie(Mask, VL) = 1322 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); 1323 LLVMContext &Context = *DAG.getContext(); 1324 const ElementCount Count = ContainerVT.getVectorElementCount(); 1325 do { 1326 SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); 1327 EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); 1328 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, 1329 Mask, VL); 1330 } while (SrcEltVT != DstEltVT); 1331 1332 if (SrcVT.isFixedLengthVector()) 1333 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 1334 1335 return Result; 1336 } 1337 case ISD::ANY_EXTEND: 1338 case ISD::ZERO_EXTEND: 1339 if (Op.getOperand(0).getValueType().isVector() && 1340 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1341 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); 1342 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); 1343 case ISD::SIGN_EXTEND: 1344 if (Op.getOperand(0).getValueType().isVector() && 1345 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1346 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); 1347 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); 1348 case ISD::SPLAT_VECTOR_PARTS: 1349 return lowerSPLAT_VECTOR_PARTS(Op, DAG); 1350 case ISD::INSERT_VECTOR_ELT: 1351 return lowerINSERT_VECTOR_ELT(Op, DAG); 1352 case ISD::EXTRACT_VECTOR_ELT: 1353 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 1354 case ISD::VSCALE: { 1355 MVT VT = Op.getSimpleValueType(); 1356 SDLoc DL(Op); 1357 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); 1358 // We define our scalable vector types for lmul=1 to use a 64 bit known 1359 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate 1360 // vscale as VLENB / 8. 1361 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, 1362 DAG.getConstant(3, DL, VT)); 1363 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); 1364 } 1365 case ISD::FP_EXTEND: { 1366 // RVV can only do fp_extend to types double the size as the source. We 1367 // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going 1368 // via f32. 1369 SDLoc DL(Op); 1370 MVT VT = Op.getSimpleValueType(); 1371 SDValue Src = Op.getOperand(0); 1372 MVT SrcVT = Src.getSimpleValueType(); 1373 1374 // Prepare any fixed-length vector operands. 1375 MVT ContainerVT = VT; 1376 if (SrcVT.isFixedLengthVector()) { 1377 ContainerVT = getContainerForFixedLengthVector(VT); 1378 MVT SrcContainerVT = 1379 ContainerVT.changeVectorElementType(SrcVT.getVectorElementType()); 1380 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1381 } 1382 1383 if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 || 1384 SrcVT.getVectorElementType() != MVT::f16) { 1385 // For scalable vectors, we only need to close the gap between 1386 // vXf16->vXf64. 1387 if (!VT.isFixedLengthVector()) 1388 return Op; 1389 // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version. 1390 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 1391 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1392 } 1393 1394 MVT InterVT = VT.changeVectorElementType(MVT::f32); 1395 MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32); 1396 SDValue IntermediateExtend = getRVVFPExtendOrRound( 1397 Src, InterVT, InterContainerVT, DL, DAG, Subtarget); 1398 1399 SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT, 1400 DL, DAG, Subtarget); 1401 if (VT.isFixedLengthVector()) 1402 return convertFromScalableVector(VT, Extend, DAG, Subtarget); 1403 return Extend; 1404 } 1405 case ISD::FP_ROUND: { 1406 // RVV can only do fp_round to types half the size as the source. We 1407 // custom-lower f64->f16 rounds via RVV's round-to-odd float 1408 // conversion instruction. 1409 SDLoc DL(Op); 1410 MVT VT = Op.getSimpleValueType(); 1411 SDValue Src = Op.getOperand(0); 1412 MVT SrcVT = Src.getSimpleValueType(); 1413 1414 // Prepare any fixed-length vector operands. 1415 MVT ContainerVT = VT; 1416 if (VT.isFixedLengthVector()) { 1417 MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 1418 ContainerVT = 1419 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 1420 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1421 } 1422 1423 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 || 1424 SrcVT.getVectorElementType() != MVT::f64) { 1425 // For scalable vectors, we only need to close the gap between 1426 // vXf64<->vXf16. 1427 if (!VT.isFixedLengthVector()) 1428 return Op; 1429 // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version. 1430 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 1431 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1432 } 1433 1434 SDValue Mask, VL; 1435 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1436 1437 MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); 1438 SDValue IntermediateRound = 1439 DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL); 1440 SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT, 1441 DL, DAG, Subtarget); 1442 1443 if (VT.isFixedLengthVector()) 1444 return convertFromScalableVector(VT, Round, DAG, Subtarget); 1445 return Round; 1446 } 1447 case ISD::FP_TO_SINT: 1448 case ISD::FP_TO_UINT: 1449 case ISD::SINT_TO_FP: 1450 case ISD::UINT_TO_FP: { 1451 // RVV can only do fp<->int conversions to types half/double the size as 1452 // the source. We custom-lower any conversions that do two hops into 1453 // sequences. 1454 MVT VT = Op.getSimpleValueType(); 1455 if (!VT.isVector()) 1456 return Op; 1457 SDLoc DL(Op); 1458 SDValue Src = Op.getOperand(0); 1459 MVT EltVT = VT.getVectorElementType(); 1460 MVT SrcVT = Src.getSimpleValueType(); 1461 MVT SrcEltVT = SrcVT.getVectorElementType(); 1462 unsigned EltSize = EltVT.getSizeInBits(); 1463 unsigned SrcEltSize = SrcEltVT.getSizeInBits(); 1464 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && 1465 "Unexpected vector element types"); 1466 1467 bool IsInt2FP = SrcEltVT.isInteger(); 1468 // Widening conversions 1469 if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) { 1470 if (IsInt2FP) { 1471 // Do a regular integer sign/zero extension then convert to float. 1472 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()), 1473 VT.getVectorElementCount()); 1474 unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP 1475 ? ISD::ZERO_EXTEND 1476 : ISD::SIGN_EXTEND; 1477 SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); 1478 return DAG.getNode(Op.getOpcode(), DL, VT, Ext); 1479 } 1480 // FP2Int 1481 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering"); 1482 // Do one doubling fp_extend then complete the operation by converting 1483 // to int. 1484 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 1485 SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); 1486 return DAG.getNode(Op.getOpcode(), DL, VT, FExt); 1487 } 1488 1489 // Narrowing conversions 1490 if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) { 1491 if (IsInt2FP) { 1492 // One narrowing int_to_fp, then an fp_round. 1493 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering"); 1494 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 1495 SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); 1496 return DAG.getFPExtendOrRound(Int2FP, DL, VT); 1497 } 1498 // FP2Int 1499 // One narrowing fp_to_int, then truncate the integer. If the float isn't 1500 // representable by the integer, the result is poison. 1501 MVT IVecVT = 1502 MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2), 1503 VT.getVectorElementCount()); 1504 SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); 1505 return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); 1506 } 1507 1508 // Scalable vectors can exit here. Patterns will handle equally-sized 1509 // conversions halving/doubling ones. 1510 if (!VT.isFixedLengthVector()) 1511 return Op; 1512 1513 // For fixed-length vectors we lower to a custom "VL" node. 1514 unsigned RVVOpc = 0; 1515 switch (Op.getOpcode()) { 1516 default: 1517 llvm_unreachable("Impossible opcode"); 1518 case ISD::FP_TO_SINT: 1519 RVVOpc = RISCVISD::FP_TO_SINT_VL; 1520 break; 1521 case ISD::FP_TO_UINT: 1522 RVVOpc = RISCVISD::FP_TO_UINT_VL; 1523 break; 1524 case ISD::SINT_TO_FP: 1525 RVVOpc = RISCVISD::SINT_TO_FP_VL; 1526 break; 1527 case ISD::UINT_TO_FP: 1528 RVVOpc = RISCVISD::UINT_TO_FP_VL; 1529 break; 1530 } 1531 1532 MVT ContainerVT, SrcContainerVT; 1533 // Derive the reference container type from the larger vector type. 1534 if (SrcEltSize > EltSize) { 1535 SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 1536 ContainerVT = 1537 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 1538 } else { 1539 ContainerVT = getContainerForFixedLengthVector(VT); 1540 SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); 1541 } 1542 1543 SDValue Mask, VL; 1544 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1545 1546 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1547 Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); 1548 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1549 } 1550 case ISD::VECREDUCE_ADD: 1551 case ISD::VECREDUCE_UMAX: 1552 case ISD::VECREDUCE_SMAX: 1553 case ISD::VECREDUCE_UMIN: 1554 case ISD::VECREDUCE_SMIN: 1555 case ISD::VECREDUCE_AND: 1556 case ISD::VECREDUCE_OR: 1557 case ISD::VECREDUCE_XOR: 1558 return lowerVECREDUCE(Op, DAG); 1559 case ISD::VECREDUCE_FADD: 1560 case ISD::VECREDUCE_SEQ_FADD: 1561 return lowerFPVECREDUCE(Op, DAG); 1562 case ISD::INSERT_SUBVECTOR: 1563 return lowerINSERT_SUBVECTOR(Op, DAG); 1564 case ISD::EXTRACT_SUBVECTOR: 1565 return lowerEXTRACT_SUBVECTOR(Op, DAG); 1566 case ISD::VECTOR_REVERSE: 1567 return lowerVECTOR_REVERSE(Op, DAG); 1568 case ISD::BUILD_VECTOR: 1569 return lowerBUILD_VECTOR(Op, DAG, Subtarget); 1570 case ISD::VECTOR_SHUFFLE: 1571 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 1572 case ISD::CONCAT_VECTORS: { 1573 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is 1574 // better than going through the stack, as the default expansion does. 1575 SDLoc DL(Op); 1576 MVT VT = Op.getSimpleValueType(); 1577 assert(VT.isFixedLengthVector() && "Unexpected CONCAT_VECTORS lowering"); 1578 unsigned NumOpElts = 1579 Op.getOperand(0).getSimpleValueType().getVectorNumElements(); 1580 SDValue Vec = DAG.getUNDEF(VT); 1581 for (const auto &OpIdx : enumerate(Op->ops())) 1582 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), 1583 DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); 1584 return Vec; 1585 } 1586 case ISD::LOAD: 1587 return lowerFixedLengthVectorLoadToRVV(Op, DAG); 1588 case ISD::STORE: 1589 return lowerFixedLengthVectorStoreToRVV(Op, DAG); 1590 case ISD::SETCC: 1591 return lowerFixedLengthVectorSetccToRVV(Op, DAG); 1592 case ISD::ADD: 1593 return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); 1594 case ISD::SUB: 1595 return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); 1596 case ISD::MUL: 1597 return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); 1598 case ISD::MULHS: 1599 return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); 1600 case ISD::MULHU: 1601 return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); 1602 case ISD::AND: 1603 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, 1604 RISCVISD::AND_VL); 1605 case ISD::OR: 1606 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, 1607 RISCVISD::OR_VL); 1608 case ISD::XOR: 1609 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, 1610 RISCVISD::XOR_VL); 1611 case ISD::SDIV: 1612 return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); 1613 case ISD::SREM: 1614 return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); 1615 case ISD::UDIV: 1616 return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); 1617 case ISD::UREM: 1618 return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); 1619 case ISD::SHL: 1620 return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL); 1621 case ISD::SRA: 1622 return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL); 1623 case ISD::SRL: 1624 return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL); 1625 case ISD::FADD: 1626 return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); 1627 case ISD::FSUB: 1628 return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); 1629 case ISD::FMUL: 1630 return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); 1631 case ISD::FDIV: 1632 return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); 1633 case ISD::FNEG: 1634 return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); 1635 case ISD::FABS: 1636 return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); 1637 case ISD::FSQRT: 1638 return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); 1639 case ISD::FMA: 1640 return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); 1641 case ISD::SMIN: 1642 return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); 1643 case ISD::SMAX: 1644 return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); 1645 case ISD::UMIN: 1646 return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); 1647 case ISD::UMAX: 1648 return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); 1649 case ISD::ABS: 1650 return lowerABS(Op, DAG); 1651 case ISD::VSELECT: 1652 return lowerFixedLengthVectorSelectToRVV(Op, DAG); 1653 case ISD::FCOPYSIGN: 1654 return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); 1655 } 1656 } 1657 1658 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 1659 SelectionDAG &DAG, unsigned Flags) { 1660 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 1661 } 1662 1663 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 1664 SelectionDAG &DAG, unsigned Flags) { 1665 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 1666 Flags); 1667 } 1668 1669 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 1670 SelectionDAG &DAG, unsigned Flags) { 1671 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 1672 N->getOffset(), Flags); 1673 } 1674 1675 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 1676 SelectionDAG &DAG, unsigned Flags) { 1677 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 1678 } 1679 1680 template <class NodeTy> 1681 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 1682 bool IsLocal) const { 1683 SDLoc DL(N); 1684 EVT Ty = getPointerTy(DAG.getDataLayout()); 1685 1686 if (isPositionIndependent()) { 1687 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 1688 if (IsLocal) 1689 // Use PC-relative addressing to access the symbol. This generates the 1690 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 1691 // %pcrel_lo(auipc)). 1692 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 1693 1694 // Use PC-relative addressing to access the GOT for this symbol, then load 1695 // the address from the GOT. This generates the pattern (PseudoLA sym), 1696 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 1697 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 1698 } 1699 1700 switch (getTargetMachine().getCodeModel()) { 1701 default: 1702 report_fatal_error("Unsupported code model for lowering"); 1703 case CodeModel::Small: { 1704 // Generate a sequence for accessing addresses within the first 2 GiB of 1705 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 1706 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 1707 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 1708 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 1709 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 1710 } 1711 case CodeModel::Medium: { 1712 // Generate a sequence for accessing addresses within any 2GiB range within 1713 // the address space. This generates the pattern (PseudoLLA sym), which 1714 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 1715 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 1716 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 1717 } 1718 } 1719 } 1720 1721 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 1722 SelectionDAG &DAG) const { 1723 SDLoc DL(Op); 1724 EVT Ty = Op.getValueType(); 1725 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 1726 int64_t Offset = N->getOffset(); 1727 MVT XLenVT = Subtarget.getXLenVT(); 1728 1729 const GlobalValue *GV = N->getGlobal(); 1730 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 1731 SDValue Addr = getAddr(N, DAG, IsLocal); 1732 1733 // In order to maximise the opportunity for common subexpression elimination, 1734 // emit a separate ADD node for the global address offset instead of folding 1735 // it in the global address node. Later peephole optimisations may choose to 1736 // fold it back in when profitable. 1737 if (Offset != 0) 1738 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 1739 DAG.getConstant(Offset, DL, XLenVT)); 1740 return Addr; 1741 } 1742 1743 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 1744 SelectionDAG &DAG) const { 1745 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 1746 1747 return getAddr(N, DAG); 1748 } 1749 1750 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 1751 SelectionDAG &DAG) const { 1752 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 1753 1754 return getAddr(N, DAG); 1755 } 1756 1757 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, 1758 SelectionDAG &DAG) const { 1759 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 1760 1761 return getAddr(N, DAG); 1762 } 1763 1764 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 1765 SelectionDAG &DAG, 1766 bool UseGOT) const { 1767 SDLoc DL(N); 1768 EVT Ty = getPointerTy(DAG.getDataLayout()); 1769 const GlobalValue *GV = N->getGlobal(); 1770 MVT XLenVT = Subtarget.getXLenVT(); 1771 1772 if (UseGOT) { 1773 // Use PC-relative addressing to access the GOT for this TLS symbol, then 1774 // load the address from the GOT and add the thread pointer. This generates 1775 // the pattern (PseudoLA_TLS_IE sym), which expands to 1776 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 1777 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 1778 SDValue Load = 1779 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 1780 1781 // Add the thread pointer. 1782 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 1783 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 1784 } 1785 1786 // Generate a sequence for accessing the address relative to the thread 1787 // pointer, with the appropriate adjustment for the thread pointer offset. 1788 // This generates the pattern 1789 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 1790 SDValue AddrHi = 1791 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 1792 SDValue AddrAdd = 1793 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 1794 SDValue AddrLo = 1795 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 1796 1797 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 1798 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 1799 SDValue MNAdd = SDValue( 1800 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 1801 0); 1802 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 1803 } 1804 1805 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 1806 SelectionDAG &DAG) const { 1807 SDLoc DL(N); 1808 EVT Ty = getPointerTy(DAG.getDataLayout()); 1809 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 1810 const GlobalValue *GV = N->getGlobal(); 1811 1812 // Use a PC-relative addressing mode to access the global dynamic GOT address. 1813 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 1814 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 1815 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 1816 SDValue Load = 1817 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 1818 1819 // Prepare argument list to generate call. 1820 ArgListTy Args; 1821 ArgListEntry Entry; 1822 Entry.Node = Load; 1823 Entry.Ty = CallTy; 1824 Args.push_back(Entry); 1825 1826 // Setup call to __tls_get_addr. 1827 TargetLowering::CallLoweringInfo CLI(DAG); 1828 CLI.setDebugLoc(DL) 1829 .setChain(DAG.getEntryNode()) 1830 .setLibCallee(CallingConv::C, CallTy, 1831 DAG.getExternalSymbol("__tls_get_addr", Ty), 1832 std::move(Args)); 1833 1834 return LowerCallTo(CLI).first; 1835 } 1836 1837 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 1838 SelectionDAG &DAG) const { 1839 SDLoc DL(Op); 1840 EVT Ty = Op.getValueType(); 1841 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 1842 int64_t Offset = N->getOffset(); 1843 MVT XLenVT = Subtarget.getXLenVT(); 1844 1845 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 1846 1847 if (DAG.getMachineFunction().getFunction().getCallingConv() == 1848 CallingConv::GHC) 1849 report_fatal_error("In GHC calling convention TLS is not supported"); 1850 1851 SDValue Addr; 1852 switch (Model) { 1853 case TLSModel::LocalExec: 1854 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 1855 break; 1856 case TLSModel::InitialExec: 1857 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 1858 break; 1859 case TLSModel::LocalDynamic: 1860 case TLSModel::GeneralDynamic: 1861 Addr = getDynamicTLSAddr(N, DAG); 1862 break; 1863 } 1864 1865 // In order to maximise the opportunity for common subexpression elimination, 1866 // emit a separate ADD node for the global address offset instead of folding 1867 // it in the global address node. Later peephole optimisations may choose to 1868 // fold it back in when profitable. 1869 if (Offset != 0) 1870 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 1871 DAG.getConstant(Offset, DL, XLenVT)); 1872 return Addr; 1873 } 1874 1875 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 1876 SDValue CondV = Op.getOperand(0); 1877 SDValue TrueV = Op.getOperand(1); 1878 SDValue FalseV = Op.getOperand(2); 1879 SDLoc DL(Op); 1880 MVT XLenVT = Subtarget.getXLenVT(); 1881 1882 // If the result type is XLenVT and CondV is the output of a SETCC node 1883 // which also operated on XLenVT inputs, then merge the SETCC node into the 1884 // lowered RISCVISD::SELECT_CC to take advantage of the integer 1885 // compare+branch instructions. i.e.: 1886 // (select (setcc lhs, rhs, cc), truev, falsev) 1887 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 1888 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 1889 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 1890 SDValue LHS = CondV.getOperand(0); 1891 SDValue RHS = CondV.getOperand(1); 1892 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 1893 ISD::CondCode CCVal = CC->get(); 1894 1895 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 1896 1897 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 1898 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 1899 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 1900 } 1901 1902 // Otherwise: 1903 // (select condv, truev, falsev) 1904 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 1905 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 1906 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 1907 1908 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 1909 1910 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 1911 } 1912 1913 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1914 SDValue CondV = Op.getOperand(1); 1915 SDLoc DL(Op); 1916 MVT XLenVT = Subtarget.getXLenVT(); 1917 1918 if (CondV.getOpcode() == ISD::SETCC && 1919 CondV.getOperand(0).getValueType() == XLenVT) { 1920 SDValue LHS = CondV.getOperand(0); 1921 SDValue RHS = CondV.getOperand(1); 1922 ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get(); 1923 1924 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 1925 1926 SDValue TargetCC = DAG.getCondCode(CCVal); 1927 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 1928 LHS, RHS, TargetCC, Op.getOperand(2)); 1929 } 1930 1931 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 1932 CondV, DAG.getConstant(0, DL, XLenVT), 1933 DAG.getCondCode(ISD::SETNE), Op.getOperand(2)); 1934 } 1935 1936 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 1937 MachineFunction &MF = DAG.getMachineFunction(); 1938 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 1939 1940 SDLoc DL(Op); 1941 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1942 getPointerTy(MF.getDataLayout())); 1943 1944 // vastart just stores the address of the VarArgsFrameIndex slot into the 1945 // memory location argument. 1946 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1947 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 1948 MachinePointerInfo(SV)); 1949 } 1950 1951 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 1952 SelectionDAG &DAG) const { 1953 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1954 MachineFunction &MF = DAG.getMachineFunction(); 1955 MachineFrameInfo &MFI = MF.getFrameInfo(); 1956 MFI.setFrameAddressIsTaken(true); 1957 Register FrameReg = RI.getFrameRegister(MF); 1958 int XLenInBytes = Subtarget.getXLen() / 8; 1959 1960 EVT VT = Op.getValueType(); 1961 SDLoc DL(Op); 1962 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 1963 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1964 while (Depth--) { 1965 int Offset = -(XLenInBytes * 2); 1966 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 1967 DAG.getIntPtrConstant(Offset, DL)); 1968 FrameAddr = 1969 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 1970 } 1971 return FrameAddr; 1972 } 1973 1974 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 1975 SelectionDAG &DAG) const { 1976 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1977 MachineFunction &MF = DAG.getMachineFunction(); 1978 MachineFrameInfo &MFI = MF.getFrameInfo(); 1979 MFI.setReturnAddressIsTaken(true); 1980 MVT XLenVT = Subtarget.getXLenVT(); 1981 int XLenInBytes = Subtarget.getXLen() / 8; 1982 1983 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1984 return SDValue(); 1985 1986 EVT VT = Op.getValueType(); 1987 SDLoc DL(Op); 1988 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1989 if (Depth) { 1990 int Off = -XLenInBytes; 1991 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 1992 SDValue Offset = DAG.getConstant(Off, DL, VT); 1993 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 1994 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 1995 MachinePointerInfo()); 1996 } 1997 1998 // Return the value of the return address register, marking it an implicit 1999 // live-in. 2000 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 2001 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 2002 } 2003 2004 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 2005 SelectionDAG &DAG) const { 2006 SDLoc DL(Op); 2007 SDValue Lo = Op.getOperand(0); 2008 SDValue Hi = Op.getOperand(1); 2009 SDValue Shamt = Op.getOperand(2); 2010 EVT VT = Lo.getValueType(); 2011 2012 // if Shamt-XLEN < 0: // Shamt < XLEN 2013 // Lo = Lo << Shamt 2014 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 2015 // else: 2016 // Lo = 0 2017 // Hi = Lo << (Shamt-XLEN) 2018 2019 SDValue Zero = DAG.getConstant(0, DL, VT); 2020 SDValue One = DAG.getConstant(1, DL, VT); 2021 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 2022 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 2023 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 2024 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 2025 2026 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 2027 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 2028 SDValue ShiftRightLo = 2029 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 2030 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 2031 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 2032 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 2033 2034 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 2035 2036 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 2037 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 2038 2039 SDValue Parts[2] = {Lo, Hi}; 2040 return DAG.getMergeValues(Parts, DL); 2041 } 2042 2043 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 2044 bool IsSRA) const { 2045 SDLoc DL(Op); 2046 SDValue Lo = Op.getOperand(0); 2047 SDValue Hi = Op.getOperand(1); 2048 SDValue Shamt = Op.getOperand(2); 2049 EVT VT = Lo.getValueType(); 2050 2051 // SRA expansion: 2052 // if Shamt-XLEN < 0: // Shamt < XLEN 2053 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 2054 // Hi = Hi >>s Shamt 2055 // else: 2056 // Lo = Hi >>s (Shamt-XLEN); 2057 // Hi = Hi >>s (XLEN-1) 2058 // 2059 // SRL expansion: 2060 // if Shamt-XLEN < 0: // Shamt < XLEN 2061 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 2062 // Hi = Hi >>u Shamt 2063 // else: 2064 // Lo = Hi >>u (Shamt-XLEN); 2065 // Hi = 0; 2066 2067 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 2068 2069 SDValue Zero = DAG.getConstant(0, DL, VT); 2070 SDValue One = DAG.getConstant(1, DL, VT); 2071 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 2072 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 2073 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 2074 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 2075 2076 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 2077 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 2078 SDValue ShiftLeftHi = 2079 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 2080 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 2081 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 2082 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 2083 SDValue HiFalse = 2084 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 2085 2086 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 2087 2088 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 2089 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 2090 2091 SDValue Parts[2] = {Lo, Hi}; 2092 return DAG.getMergeValues(Parts, DL); 2093 } 2094 2095 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is 2096 // illegal (currently only vXi64 RV32). 2097 // FIXME: We could also catch non-constant sign-extended i32 values and lower 2098 // them to SPLAT_VECTOR_I64 2099 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, 2100 SelectionDAG &DAG) const { 2101 SDLoc DL(Op); 2102 EVT VecVT = Op.getValueType(); 2103 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && 2104 "Unexpected SPLAT_VECTOR_PARTS lowering"); 2105 2106 assert(Op.getNumOperands() == 2 && "Unexpected number of operands!"); 2107 SDValue Lo = Op.getOperand(0); 2108 SDValue Hi = Op.getOperand(1); 2109 2110 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { 2111 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); 2112 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); 2113 // If Hi constant is all the same sign bit as Lo, lower this as a custom 2114 // node in order to try and match RVV vector/scalar instructions. 2115 if ((LoC >> 31) == HiC) 2116 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 2117 } 2118 2119 // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not 2120 // to accidentally sign-extend the 32-bit halves to the e64 SEW: 2121 // vmv.v.x vX, hi 2122 // vsll.vx vX, vX, /*32*/ 2123 // vmv.v.x vY, lo 2124 // vsll.vx vY, vY, /*32*/ 2125 // vsrl.vx vY, vY, /*32*/ 2126 // vor.vv vX, vX, vY 2127 SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT); 2128 2129 Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 2130 Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV); 2131 Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV); 2132 2133 if (isNullConstant(Hi)) 2134 return Lo; 2135 2136 Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi); 2137 Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV); 2138 2139 return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); 2140 } 2141 2142 // Custom-lower extensions from mask vectors by using a vselect either with 1 2143 // for zero/any-extension or -1 for sign-extension: 2144 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) 2145 // Note that any-extension is lowered identically to zero-extension. 2146 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, 2147 int64_t ExtTrueVal) const { 2148 SDLoc DL(Op); 2149 MVT VecVT = Op.getSimpleValueType(); 2150 SDValue Src = Op.getOperand(0); 2151 // Only custom-lower extensions from mask types 2152 assert(Src.getValueType().isVector() && 2153 Src.getValueType().getVectorElementType() == MVT::i1); 2154 2155 MVT XLenVT = Subtarget.getXLenVT(); 2156 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); 2157 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); 2158 2159 if (VecVT.isScalableVector()) { 2160 // Be careful not to introduce illegal scalar types at this stage, and be 2161 // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is 2162 // illegal and must be expanded. Since we know that the constants are 2163 // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. 2164 bool IsRV32E64 = 2165 !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; 2166 2167 if (!IsRV32E64) { 2168 SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); 2169 SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); 2170 } else { 2171 SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); 2172 SplatTrueVal = 2173 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); 2174 } 2175 2176 return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); 2177 } 2178 2179 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 2180 MVT I1ContainerVT = 2181 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2182 2183 SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); 2184 2185 SDValue Mask, VL; 2186 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2187 2188 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); 2189 SplatTrueVal = 2190 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); 2191 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, 2192 SplatTrueVal, SplatZero, VL); 2193 2194 return convertFromScalableVector(VecVT, Select, DAG, Subtarget); 2195 } 2196 2197 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( 2198 SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { 2199 MVT ExtVT = Op.getSimpleValueType(); 2200 // Only custom-lower extensions from fixed-length vector types. 2201 if (!ExtVT.isFixedLengthVector()) 2202 return Op; 2203 MVT VT = Op.getOperand(0).getSimpleValueType(); 2204 // Grab the canonical container type for the extended type. Infer the smaller 2205 // type from that to ensure the same number of vector elements, as we know 2206 // the LMUL will be sufficient to hold the smaller type. 2207 MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); 2208 // Get the extended container type manually to ensure the same number of 2209 // vector elements between source and dest. 2210 MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 2211 ContainerExtVT.getVectorElementCount()); 2212 2213 SDValue Op1 = 2214 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 2215 2216 SDLoc DL(Op); 2217 SDValue Mask, VL; 2218 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2219 2220 SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); 2221 2222 return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); 2223 } 2224 2225 // Custom-lower truncations from vectors to mask vectors by using a mask and a 2226 // setcc operation: 2227 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) 2228 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, 2229 SelectionDAG &DAG) const { 2230 SDLoc DL(Op); 2231 EVT MaskVT = Op.getValueType(); 2232 // Only expect to custom-lower truncations to mask types 2233 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && 2234 "Unexpected type for vector mask lowering"); 2235 SDValue Src = Op.getOperand(0); 2236 MVT VecVT = Src.getSimpleValueType(); 2237 2238 // If this is a fixed vector, we need to convert it to a scalable vector. 2239 MVT ContainerVT = VecVT; 2240 if (VecVT.isFixedLengthVector()) { 2241 ContainerVT = getContainerForFixedLengthVector(VecVT); 2242 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 2243 } 2244 2245 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); 2246 SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 2247 2248 SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); 2249 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); 2250 2251 if (VecVT.isScalableVector()) { 2252 SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); 2253 return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); 2254 } 2255 2256 SDValue Mask, VL; 2257 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2258 2259 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 2260 SDValue Trunc = 2261 DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); 2262 Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, 2263 DAG.getCondCode(ISD::SETNE), Mask, VL); 2264 return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); 2265 } 2266 2267 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the 2268 // first position of a vector, and that vector is slid up to the insert index. 2269 // By limiting the active vector length to index+1 and merging with the 2270 // original vector (with an undisturbed tail policy for elements >= VL), we 2271 // achieve the desired result of leaving all elements untouched except the one 2272 // at VL-1, which is replaced with the desired value. 2273 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2274 SelectionDAG &DAG) const { 2275 SDLoc DL(Op); 2276 MVT VecVT = Op.getSimpleValueType(); 2277 SDValue Vec = Op.getOperand(0); 2278 SDValue Val = Op.getOperand(1); 2279 SDValue Idx = Op.getOperand(2); 2280 2281 MVT ContainerVT = VecVT; 2282 // If the operand is a fixed-length vector, convert to a scalable one. 2283 if (VecVT.isFixedLengthVector()) { 2284 ContainerVT = getContainerForFixedLengthVector(VecVT); 2285 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2286 } 2287 2288 MVT XLenVT = Subtarget.getXLenVT(); 2289 2290 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 2291 bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64; 2292 // Even i64-element vectors on RV32 can be lowered without scalar 2293 // legalization if the most-significant 32 bits of the value are not affected 2294 // by the sign-extension of the lower 32 bits. 2295 // TODO: We could also catch sign extensions of a 32-bit value. 2296 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) { 2297 const auto *CVal = cast<ConstantSDNode>(Val); 2298 if (isInt<32>(CVal->getSExtValue())) { 2299 IsLegalInsert = true; 2300 Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 2301 } 2302 } 2303 2304 SDValue Mask, VL; 2305 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2306 2307 SDValue ValInVec; 2308 2309 if (IsLegalInsert) { 2310 if (isNullConstant(Idx)) 2311 return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL); 2312 ValInVec = DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, 2313 DAG.getUNDEF(ContainerVT), Val, VL); 2314 } else { 2315 // On RV32, i64-element vectors must be specially handled to place the 2316 // value at element 0, by using two vslide1up instructions in sequence on 2317 // the i32 split lo/hi value. Use an equivalently-sized i32 vector for 2318 // this. 2319 SDValue One = DAG.getConstant(1, DL, XLenVT); 2320 SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero); 2321 SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One); 2322 MVT I32ContainerVT = 2323 MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2); 2324 SDValue I32Mask = 2325 getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first; 2326 // Limit the active VL to two. 2327 SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT); 2328 // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied 2329 // undef doesn't obey the earlyclobber constraint. Just splat a zero value. 2330 ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, 2331 InsertI64VL); 2332 // First slide in the hi value, then the lo in underneath it. 2333 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, 2334 ValHi, I32Mask, InsertI64VL); 2335 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, 2336 ValLo, I32Mask, InsertI64VL); 2337 // Bitcast back to the right container type. 2338 ValInVec = DAG.getBitcast(ContainerVT, ValInVec); 2339 } 2340 2341 // Now that the value is in a vector, slide it into position. 2342 SDValue InsertVL = 2343 DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); 2344 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 2345 ValInVec, Idx, Mask, InsertVL); 2346 if (!VecVT.isFixedLengthVector()) 2347 return Slideup; 2348 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 2349 } 2350 2351 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then 2352 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer 2353 // types this is done using VMV_X_S to allow us to glean information about the 2354 // sign bits of the result. 2355 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2356 SelectionDAG &DAG) const { 2357 SDLoc DL(Op); 2358 SDValue Idx = Op.getOperand(1); 2359 SDValue Vec = Op.getOperand(0); 2360 EVT EltVT = Op.getValueType(); 2361 MVT VecVT = Vec.getSimpleValueType(); 2362 MVT XLenVT = Subtarget.getXLenVT(); 2363 2364 if (VecVT.getVectorElementType() == MVT::i1) { 2365 // FIXME: For now we just promote to an i8 vector and extract from that, 2366 // but this is probably not optimal. 2367 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); 2368 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); 2369 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx); 2370 } 2371 2372 // If this is a fixed vector, we need to convert it to a scalable vector. 2373 MVT ContainerVT = VecVT; 2374 if (VecVT.isFixedLengthVector()) { 2375 ContainerVT = getContainerForFixedLengthVector(VecVT); 2376 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2377 } 2378 2379 // If the index is 0, the vector is already in the right position. 2380 if (!isNullConstant(Idx)) { 2381 // Use a VL of 1 to avoid processing more elements than we need. 2382 SDValue VL = DAG.getConstant(1, DL, XLenVT); 2383 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2384 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2385 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 2386 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 2387 } 2388 2389 if (!EltVT.isInteger()) { 2390 // Floating-point extracts are handled in TableGen. 2391 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, 2392 DAG.getConstant(0, DL, XLenVT)); 2393 } 2394 2395 SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 2396 return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); 2397 } 2398 2399 // Called by type legalization to handle splat of i64 on RV32. 2400 // FIXME: We can optimize this when the type has sign or zero bits in one 2401 // of the halves. 2402 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, 2403 SDValue VL, SelectionDAG &DAG) { 2404 SDValue ThirtyTwoV = DAG.getConstant(32, DL, VT); 2405 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 2406 DAG.getConstant(0, DL, MVT::i32)); 2407 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 2408 DAG.getConstant(1, DL, MVT::i32)); 2409 2410 // vmv.v.x vX, hi 2411 // vsll.vx vX, vX, /*32*/ 2412 // vmv.v.x vY, lo 2413 // vsll.vx vY, vY, /*32*/ 2414 // vsrl.vx vY, vY, /*32*/ 2415 // vor.vv vX, vX, vY 2416 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 2417 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2418 Lo = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); 2419 Lo = DAG.getNode(RISCVISD::SHL_VL, DL, VT, Lo, ThirtyTwoV, Mask, VL); 2420 Lo = DAG.getNode(RISCVISD::SRL_VL, DL, VT, Lo, ThirtyTwoV, Mask, VL); 2421 2422 Hi = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Hi, VL); 2423 Hi = DAG.getNode(RISCVISD::SHL_VL, DL, VT, Hi, ThirtyTwoV, Mask, VL); 2424 2425 return DAG.getNode(RISCVISD::OR_VL, DL, VT, Lo, Hi, Mask, VL); 2426 } 2427 2428 // Some RVV intrinsics may claim that they want an integer operand to be 2429 // promoted or expanded. 2430 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG, 2431 const RISCVSubtarget &Subtarget) { 2432 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2433 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && 2434 "Unexpected opcode"); 2435 2436 if (!Subtarget.hasStdExtV()) 2437 return SDValue(); 2438 2439 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; 2440 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); 2441 SDLoc DL(Op); 2442 2443 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 2444 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); 2445 if (!II || !II->SplatOperand) 2446 return SDValue(); 2447 2448 unsigned SplatOp = II->SplatOperand + HasChain; 2449 assert(SplatOp < Op.getNumOperands()); 2450 2451 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 2452 SDValue &ScalarOp = Operands[SplatOp]; 2453 MVT OpVT = ScalarOp.getSimpleValueType(); 2454 MVT VT = Op.getSimpleValueType(); 2455 MVT XLenVT = Subtarget.getXLenVT(); 2456 2457 // If this isn't a scalar, or its type is XLenVT we're done. 2458 if (!OpVT.isScalarInteger() || OpVT == XLenVT) 2459 return SDValue(); 2460 2461 // Simplest case is that the operand needs to be promoted to XLenVT. 2462 if (OpVT.bitsLT(XLenVT)) { 2463 // If the operand is a constant, sign extend to increase our chances 2464 // of being able to use a .vi instruction. ANY_EXTEND would become a 2465 // a zero extend and the simm5 check in isel would fail. 2466 // FIXME: Should we ignore the upper bits in isel instead? 2467 unsigned ExtOpc = 2468 isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 2469 ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp); 2470 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 2471 } 2472 2473 // The more complex case is when the scalar is larger than XLenVT. 2474 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 && 2475 VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!"); 2476 2477 // If this is a sign-extended 32-bit constant, we can truncate it and rely 2478 // on the instruction to sign-extend since SEW>XLEN. 2479 if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) { 2480 if (isInt<32>(CVal->getSExtValue())) { 2481 ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 2482 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 2483 } 2484 } 2485 2486 // We need to convert the scalar to a splat vector. 2487 // FIXME: Can we implicitly truncate the scalar if it is known to 2488 // be sign extended? 2489 // VL should be the last operand. 2490 SDValue VL = Op.getOperand(Op.getNumOperands() - 1); 2491 assert(VL.getValueType() == XLenVT); 2492 ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG); 2493 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 2494 } 2495 2496 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2497 SelectionDAG &DAG) const { 2498 unsigned IntNo = Op.getConstantOperandVal(0); 2499 SDLoc DL(Op); 2500 MVT XLenVT = Subtarget.getXLenVT(); 2501 2502 switch (IntNo) { 2503 default: 2504 break; // Don't custom lower most intrinsics. 2505 case Intrinsic::thread_pointer: { 2506 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2507 return DAG.getRegister(RISCV::X4, PtrVT); 2508 } 2509 case Intrinsic::riscv_vmv_x_s: 2510 assert(Op.getValueType() == XLenVT && "Unexpected VT!"); 2511 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), 2512 Op.getOperand(1)); 2513 case Intrinsic::riscv_vmv_v_x: { 2514 SDValue Scalar = Op.getOperand(1); 2515 if (Scalar.getValueType().bitsLE(XLenVT)) { 2516 unsigned ExtOpc = 2517 isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 2518 Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar); 2519 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), Scalar, 2520 Op.getOperand(2)); 2521 } 2522 2523 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!"); 2524 2525 // If this is a sign-extended 32-bit constant, we can truncate it and rely 2526 // on the instruction to sign-extend since SEW>XLEN. 2527 if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) { 2528 if (isInt<32>(CVal->getSExtValue())) 2529 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), 2530 DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32), 2531 Op.getOperand(2)); 2532 } 2533 2534 // Otherwise use the more complicated splatting algorithm. 2535 return splatSplitI64WithVL(DL, Op.getSimpleValueType(), Scalar, 2536 Op.getOperand(2), DAG); 2537 } 2538 case Intrinsic::riscv_vfmv_v_f: 2539 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), 2540 Op.getOperand(1), Op.getOperand(2)); 2541 case Intrinsic::riscv_vmv_s_x: { 2542 SDValue Scalar = Op.getOperand(2); 2543 2544 if (Scalar.getValueType().bitsLE(XLenVT)) { 2545 Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar); 2546 return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, Op.getValueType(), 2547 Op.getOperand(1), Scalar, Op.getOperand(3)); 2548 } 2549 2550 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!"); 2551 2552 // This is an i64 value that lives in two scalar registers. We have to 2553 // insert this in a convoluted way. First we build vXi64 splat containing 2554 // the/ two values that we assemble using some bit math. Next we'll use 2555 // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask 2556 // to merge element 0 from our splat into the source vector. 2557 // FIXME: This is probably not the best way to do this, but it is 2558 // consistent with INSERT_VECTOR_ELT lowering so it is a good starting 2559 // point. 2560 // vmv.v.x vX, hi 2561 // vsll.vx vX, vX, /*32*/ 2562 // vmv.v.x vY, lo 2563 // vsll.vx vY, vY, /*32*/ 2564 // vsrl.vx vY, vY, /*32*/ 2565 // vor.vv vX, vX, vY 2566 // 2567 // vid.v vVid 2568 // vmseq.vx mMask, vVid, 0 2569 // vmerge.vvm vDest, vSrc, vVal, mMask 2570 MVT VT = Op.getSimpleValueType(); 2571 SDValue Vec = Op.getOperand(1); 2572 SDValue VL = Op.getOperand(3); 2573 2574 SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); 2575 SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, 2576 DAG.getConstant(0, DL, MVT::i32), VL); 2577 2578 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 2579 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2580 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 2581 SDValue SelectCond = 2582 DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx, 2583 DAG.getCondCode(ISD::SETEQ), Mask, VL); 2584 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal, 2585 Vec, VL); 2586 } 2587 } 2588 2589 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); 2590 } 2591 2592 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 2593 SelectionDAG &DAG) const { 2594 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); 2595 } 2596 2597 static MVT getLMUL1VT(MVT VT) { 2598 assert(VT.getVectorElementType().getSizeInBits() <= 64 && 2599 "Unexpected vector MVT"); 2600 return MVT::getScalableVectorVT( 2601 VT.getVectorElementType(), 2602 RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); 2603 } 2604 2605 static unsigned getRVVReductionOp(unsigned ISDOpcode) { 2606 switch (ISDOpcode) { 2607 default: 2608 llvm_unreachable("Unhandled reduction"); 2609 case ISD::VECREDUCE_ADD: 2610 return RISCVISD::VECREDUCE_ADD_VL; 2611 case ISD::VECREDUCE_UMAX: 2612 return RISCVISD::VECREDUCE_UMAX_VL; 2613 case ISD::VECREDUCE_SMAX: 2614 return RISCVISD::VECREDUCE_SMAX_VL; 2615 case ISD::VECREDUCE_UMIN: 2616 return RISCVISD::VECREDUCE_UMIN_VL; 2617 case ISD::VECREDUCE_SMIN: 2618 return RISCVISD::VECREDUCE_SMIN_VL; 2619 case ISD::VECREDUCE_AND: 2620 return RISCVISD::VECREDUCE_AND_VL; 2621 case ISD::VECREDUCE_OR: 2622 return RISCVISD::VECREDUCE_OR_VL; 2623 case ISD::VECREDUCE_XOR: 2624 return RISCVISD::VECREDUCE_XOR_VL; 2625 } 2626 } 2627 2628 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, 2629 SelectionDAG &DAG) const { 2630 SDLoc DL(Op); 2631 SDValue Vec = Op.getOperand(0); 2632 EVT VecEVT = Vec.getValueType(); 2633 2634 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode()); 2635 2636 // Due to ordering in legalize types we may have a vector type that needs to 2637 // be split. Do that manually so we can get down to a legal type. 2638 while (getTypeAction(*DAG.getContext(), VecEVT) == 2639 TargetLowering::TypeSplitVector) { 2640 SDValue Lo, Hi; 2641 std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL); 2642 VecEVT = Lo.getValueType(); 2643 Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi); 2644 } 2645 2646 // TODO: The type may need to be widened rather than split. Or widened before 2647 // it can be split. 2648 if (!isTypeLegal(VecEVT)) 2649 return SDValue(); 2650 2651 MVT VecVT = VecEVT.getSimpleVT(); 2652 MVT VecEltVT = VecVT.getVectorElementType(); 2653 unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode()); 2654 2655 MVT ContainerVT = VecVT; 2656 if (VecVT.isFixedLengthVector()) { 2657 ContainerVT = getContainerForFixedLengthVector(VecVT); 2658 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2659 } 2660 2661 MVT M1VT = getLMUL1VT(ContainerVT); 2662 2663 SDValue Mask, VL; 2664 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2665 2666 // FIXME: This is a VLMAX splat which might be too large and can prevent 2667 // vsetvli removal. 2668 SDValue NeutralElem = 2669 DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); 2670 SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem); 2671 SDValue Reduction = 2672 DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL); 2673 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 2674 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 2675 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); 2676 } 2677 2678 // Given a reduction op, this function returns the matching reduction opcode, 2679 // the vector SDValue and the scalar SDValue required to lower this to a 2680 // RISCVISD node. 2681 static std::tuple<unsigned, SDValue, SDValue> 2682 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { 2683 SDLoc DL(Op); 2684 switch (Op.getOpcode()) { 2685 default: 2686 llvm_unreachable("Unhandled reduction"); 2687 case ISD::VECREDUCE_FADD: 2688 return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), 2689 DAG.getConstantFP(0.0, DL, EltVT)); 2690 case ISD::VECREDUCE_SEQ_FADD: 2691 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1), 2692 Op.getOperand(0)); 2693 } 2694 } 2695 2696 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, 2697 SelectionDAG &DAG) const { 2698 SDLoc DL(Op); 2699 MVT VecEltVT = Op.getSimpleValueType(); 2700 2701 unsigned RVVOpcode; 2702 SDValue VectorVal, ScalarVal; 2703 std::tie(RVVOpcode, VectorVal, ScalarVal) = 2704 getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); 2705 MVT VecVT = VectorVal.getSimpleValueType(); 2706 2707 MVT ContainerVT = VecVT; 2708 if (VecVT.isFixedLengthVector()) { 2709 ContainerVT = getContainerForFixedLengthVector(VecVT); 2710 VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget); 2711 } 2712 2713 MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); 2714 2715 SDValue Mask, VL; 2716 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2717 2718 // FIXME: This is a VLMAX splat which might be too large and can prevent 2719 // vsetvli removal. 2720 SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); 2721 SDValue Reduction = 2722 DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL); 2723 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 2724 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 2725 } 2726 2727 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 2728 SelectionDAG &DAG) const { 2729 SDValue Vec = Op.getOperand(0); 2730 SDValue SubVec = Op.getOperand(1); 2731 MVT VecVT = Vec.getSimpleValueType(); 2732 MVT SubVecVT = SubVec.getSimpleValueType(); 2733 2734 SDLoc DL(Op); 2735 MVT XLenVT = Subtarget.getXLenVT(); 2736 unsigned OrigIdx = Op.getConstantOperandVal(2); 2737 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2738 2739 // We don't have the ability to slide mask vectors up indexed by their i1 2740 // elements; the smallest we can do is i8. Often we are able to bitcast to 2741 // equivalent i8 vectors. Note that when inserting a fixed-length vector 2742 // into a scalable one, we might not necessarily have enough scalable 2743 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. 2744 if (SubVecVT.getVectorElementType() == MVT::i1 && 2745 (OrigIdx != 0 || !Vec.isUndef())) { 2746 if (VecVT.getVectorMinNumElements() >= 8 && 2747 SubVecVT.getVectorMinNumElements() >= 8) { 2748 assert(OrigIdx % 8 == 0 && "Invalid index"); 2749 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 2750 SubVecVT.getVectorMinNumElements() % 8 == 0 && 2751 "Unexpected mask vector lowering"); 2752 OrigIdx /= 8; 2753 SubVecVT = 2754 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 2755 SubVecVT.isScalableVector()); 2756 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 2757 VecVT.isScalableVector()); 2758 Vec = DAG.getBitcast(VecVT, Vec); 2759 SubVec = DAG.getBitcast(SubVecVT, SubVec); 2760 } else { 2761 // We can't slide this mask vector up indexed by its i1 elements. 2762 // This poses a problem when we wish to insert a scalable vector which 2763 // can't be re-expressed as a larger type. Just choose the slow path and 2764 // extend to a larger type, then truncate back down. 2765 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 2766 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 2767 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 2768 SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); 2769 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, 2770 Op.getOperand(2)); 2771 SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); 2772 return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); 2773 } 2774 } 2775 2776 // If the subvector vector is a fixed-length type, we cannot use subregister 2777 // manipulation to simplify the codegen; we don't know which register of a 2778 // LMUL group contains the specific subvector as we only know the minimum 2779 // register size. Therefore we must slide the vector group up the full 2780 // amount. 2781 if (SubVecVT.isFixedLengthVector()) { 2782 if (OrigIdx == 0 && Vec.isUndef()) 2783 return Op; 2784 MVT ContainerVT = VecVT; 2785 if (VecVT.isFixedLengthVector()) { 2786 ContainerVT = getContainerForFixedLengthVector(VecVT); 2787 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2788 } 2789 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, 2790 DAG.getUNDEF(ContainerVT), SubVec, 2791 DAG.getConstant(0, DL, XLenVT)); 2792 SDValue Mask = 2793 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 2794 // Set the vector length to only the number of elements we care about. Note 2795 // that for slideup this includes the offset. 2796 SDValue VL = 2797 DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); 2798 SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 2799 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 2800 SubVec, SlideupAmt, Mask, VL); 2801 if (!VecVT.isFixedLengthVector()) 2802 return Slideup; 2803 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 2804 } 2805 2806 unsigned SubRegIdx, RemIdx; 2807 std::tie(SubRegIdx, RemIdx) = 2808 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 2809 VecVT, SubVecVT, OrigIdx, TRI); 2810 2811 RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); 2812 bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 || 2813 SubVecLMUL == RISCVVLMUL::LMUL_F4 || 2814 SubVecLMUL == RISCVVLMUL::LMUL_F8; 2815 2816 // 1. If the Idx has been completely eliminated and this subvector's size is 2817 // a vector register or a multiple thereof, or the surrounding elements are 2818 // undef, then this is a subvector insert which naturally aligns to a vector 2819 // register. These can easily be handled using subregister manipulation. 2820 // 2. If the subvector is smaller than a vector register, then the insertion 2821 // must preserve the undisturbed elements of the register. We do this by 2822 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type 2823 // (which resolves to a subregister copy), performing a VSLIDEUP to place the 2824 // subvector within the vector register, and an INSERT_SUBVECTOR of that 2825 // LMUL=1 type back into the larger vector (resolving to another subregister 2826 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type 2827 // to avoid allocating a large register group to hold our subvector. 2828 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) 2829 return Op; 2830 2831 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements 2832 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy 2833 // (in our case undisturbed). This means we can set up a subvector insertion 2834 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the 2835 // size of the subvector. 2836 MVT InterSubVT = VecVT; 2837 SDValue AlignedExtract = Vec; 2838 unsigned AlignedIdx = OrigIdx - RemIdx; 2839 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 2840 InterSubVT = getLMUL1VT(VecVT); 2841 // Extract a subvector equal to the nearest full vector register type. This 2842 // should resolve to a EXTRACT_SUBREG instruction. 2843 AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 2844 DAG.getConstant(AlignedIdx, DL, XLenVT)); 2845 } 2846 2847 SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); 2848 // For scalable vectors this must be further multiplied by vscale. 2849 SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); 2850 2851 SDValue Mask, VL; 2852 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 2853 2854 // Construct the vector length corresponding to RemIdx + length(SubVecVT). 2855 VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); 2856 VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); 2857 VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); 2858 2859 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, 2860 DAG.getUNDEF(InterSubVT), SubVec, 2861 DAG.getConstant(0, DL, XLenVT)); 2862 2863 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, 2864 AlignedExtract, SubVec, SlideupAmt, Mask, VL); 2865 2866 // If required, insert this subvector back into the correct vector register. 2867 // This should resolve to an INSERT_SUBREG instruction. 2868 if (VecVT.bitsGT(InterSubVT)) 2869 Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, 2870 DAG.getConstant(AlignedIdx, DL, XLenVT)); 2871 2872 // We might have bitcast from a mask type: cast back to the original type if 2873 // required. 2874 return DAG.getBitcast(Op.getSimpleValueType(), Slideup); 2875 } 2876 2877 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, 2878 SelectionDAG &DAG) const { 2879 SDValue Vec = Op.getOperand(0); 2880 MVT SubVecVT = Op.getSimpleValueType(); 2881 MVT VecVT = Vec.getSimpleValueType(); 2882 2883 SDLoc DL(Op); 2884 MVT XLenVT = Subtarget.getXLenVT(); 2885 unsigned OrigIdx = Op.getConstantOperandVal(1); 2886 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2887 2888 // We don't have the ability to slide mask vectors down indexed by their i1 2889 // elements; the smallest we can do is i8. Often we are able to bitcast to 2890 // equivalent i8 vectors. Note that when extracting a fixed-length vector 2891 // from a scalable one, we might not necessarily have enough scalable 2892 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. 2893 if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { 2894 if (VecVT.getVectorMinNumElements() >= 8 && 2895 SubVecVT.getVectorMinNumElements() >= 8) { 2896 assert(OrigIdx % 8 == 0 && "Invalid index"); 2897 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 2898 SubVecVT.getVectorMinNumElements() % 8 == 0 && 2899 "Unexpected mask vector lowering"); 2900 OrigIdx /= 8; 2901 SubVecVT = 2902 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 2903 SubVecVT.isScalableVector()); 2904 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 2905 VecVT.isScalableVector()); 2906 Vec = DAG.getBitcast(VecVT, Vec); 2907 } else { 2908 // We can't slide this mask vector down, indexed by its i1 elements. 2909 // This poses a problem when we wish to extract a scalable vector which 2910 // can't be re-expressed as a larger type. Just choose the slow path and 2911 // extend to a larger type, then truncate back down. 2912 // TODO: We could probably improve this when extracting certain fixed 2913 // from fixed, where we can extract as i8 and shift the correct element 2914 // right to reach the desired subvector? 2915 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 2916 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 2917 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 2918 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, 2919 Op.getOperand(1)); 2920 SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); 2921 return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); 2922 } 2923 } 2924 2925 // If the subvector vector is a fixed-length type, we cannot use subregister 2926 // manipulation to simplify the codegen; we don't know which register of a 2927 // LMUL group contains the specific subvector as we only know the minimum 2928 // register size. Therefore we must slide the vector group down the full 2929 // amount. 2930 if (SubVecVT.isFixedLengthVector()) { 2931 // With an index of 0 this is a cast-like subvector, which can be performed 2932 // with subregister operations. 2933 if (OrigIdx == 0) 2934 return Op; 2935 MVT ContainerVT = VecVT; 2936 if (VecVT.isFixedLengthVector()) { 2937 ContainerVT = getContainerForFixedLengthVector(VecVT); 2938 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2939 } 2940 SDValue Mask = 2941 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 2942 // Set the vector length to only the number of elements we care about. This 2943 // avoids sliding down elements we're going to discard straight away. 2944 SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); 2945 SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 2946 SDValue Slidedown = 2947 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 2948 DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); 2949 // Now we can use a cast-like subvector extract to get the result. 2950 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 2951 DAG.getConstant(0, DL, XLenVT)); 2952 } 2953 2954 unsigned SubRegIdx, RemIdx; 2955 std::tie(SubRegIdx, RemIdx) = 2956 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 2957 VecVT, SubVecVT, OrigIdx, TRI); 2958 2959 // If the Idx has been completely eliminated then this is a subvector extract 2960 // which naturally aligns to a vector register. These can easily be handled 2961 // using subregister manipulation. 2962 if (RemIdx == 0) 2963 return Op; 2964 2965 // Else we must shift our vector register directly to extract the subvector. 2966 // Do this using VSLIDEDOWN. 2967 2968 // If the vector type is an LMUL-group type, extract a subvector equal to the 2969 // nearest full vector register type. This should resolve to a EXTRACT_SUBREG 2970 // instruction. 2971 MVT InterSubVT = VecVT; 2972 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 2973 InterSubVT = getLMUL1VT(VecVT); 2974 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 2975 DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); 2976 } 2977 2978 // Slide this vector register down by the desired number of elements in order 2979 // to place the desired subvector starting at element 0. 2980 SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); 2981 // For scalable vectors this must be further multiplied by vscale. 2982 SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); 2983 2984 SDValue Mask, VL; 2985 std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); 2986 SDValue Slidedown = 2987 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, 2988 DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); 2989 2990 // Now the vector is in the right position, extract our final subvector. This 2991 // should resolve to a COPY. 2992 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 2993 DAG.getConstant(0, DL, XLenVT)); 2994 2995 // We might have bitcast from a mask type: cast back to the original type if 2996 // required. 2997 return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); 2998 } 2999 3000 // Implement vector_reverse using vrgather.vv with indices determined by 3001 // subtracting the id of each element from (VLMAX-1). This will convert 3002 // the indices like so: 3003 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0). 3004 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16. 3005 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op, 3006 SelectionDAG &DAG) const { 3007 SDLoc DL(Op); 3008 MVT VecVT = Op.getSimpleValueType(); 3009 unsigned EltSize = VecVT.getScalarSizeInBits(); 3010 unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); 3011 3012 unsigned MaxVLMAX = 0; 3013 unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits(); 3014 if (VectorBitsMax != 0) 3015 MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock; 3016 3017 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL; 3018 MVT IntVT = VecVT.changeVectorElementTypeToInteger(); 3019 3020 // If this is SEW=8 and VLMAX is unknown or more than 256, we need 3021 // to use vrgatherei16.vv. 3022 // TODO: It's also possible to use vrgatherei16.vv for other types to 3023 // decrease register width for the index calculation. 3024 if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) { 3025 // If this is LMUL=8, we have to split before can use vrgatherei16.vv. 3026 // Reverse each half, then reassemble them in reverse order. 3027 // NOTE: It's also possible that after splitting that VLMAX no longer 3028 // requires vrgatherei16.vv. 3029 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) { 3030 SDValue Lo, Hi; 3031 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 3032 EVT LoVT, HiVT; 3033 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); 3034 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo); 3035 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi); 3036 // Reassemble the low and high pieces reversed. 3037 // FIXME: This is a CONCAT_VECTORS. 3038 SDValue Res = 3039 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi, 3040 DAG.getIntPtrConstant(0, DL)); 3041 return DAG.getNode( 3042 ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo, 3043 DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL)); 3044 } 3045 3046 // Just promote the int type to i16 which will double the LMUL. 3047 IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount()); 3048 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL; 3049 } 3050 3051 MVT XLenVT = Subtarget.getXLenVT(); 3052 SDValue Mask, VL; 3053 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 3054 3055 // Calculate VLMAX-1 for the desired SEW. 3056 unsigned MinElts = VecVT.getVectorMinNumElements(); 3057 SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT, 3058 DAG.getConstant(MinElts, DL, XLenVT)); 3059 SDValue VLMinus1 = 3060 DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT)); 3061 3062 // Splat VLMAX-1 taking care to handle SEW==64 on RV32. 3063 bool IsRV32E64 = 3064 !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64; 3065 SDValue SplatVL; 3066 if (!IsRV32E64) 3067 SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); 3068 else 3069 SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1); 3070 3071 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); 3072 SDValue Indices = 3073 DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL); 3074 3075 return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL); 3076 } 3077 3078 SDValue 3079 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, 3080 SelectionDAG &DAG) const { 3081 auto *Load = cast<LoadSDNode>(Op); 3082 3083 SDLoc DL(Op); 3084 MVT VT = Op.getSimpleValueType(); 3085 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3086 3087 SDValue VL = 3088 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 3089 3090 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 3091 SDValue NewLoad = DAG.getMemIntrinsicNode( 3092 RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, 3093 Load->getMemoryVT(), Load->getMemOperand()); 3094 3095 SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 3096 return DAG.getMergeValues({Result, Load->getChain()}, DL); 3097 } 3098 3099 SDValue 3100 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, 3101 SelectionDAG &DAG) const { 3102 auto *Store = cast<StoreSDNode>(Op); 3103 3104 SDLoc DL(Op); 3105 MVT VT = Store->getValue().getSimpleValueType(); 3106 3107 // FIXME: We probably need to zero any extra bits in a byte for mask stores. 3108 // This is tricky to do. 3109 3110 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3111 3112 SDValue VL = 3113 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 3114 3115 SDValue NewValue = 3116 convertToScalableVector(ContainerVT, Store->getValue(), DAG, Subtarget); 3117 return DAG.getMemIntrinsicNode( 3118 RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), 3119 {Store->getChain(), NewValue, Store->getBasePtr(), VL}, 3120 Store->getMemoryVT(), Store->getMemOperand()); 3121 } 3122 3123 SDValue 3124 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, 3125 SelectionDAG &DAG) const { 3126 MVT InVT = Op.getOperand(0).getSimpleValueType(); 3127 MVT ContainerVT = getContainerForFixedLengthVector(InVT); 3128 3129 MVT VT = Op.getSimpleValueType(); 3130 3131 SDValue Op1 = 3132 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 3133 SDValue Op2 = 3134 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 3135 3136 SDLoc DL(Op); 3137 SDValue VL = 3138 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 3139 3140 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 3141 3142 bool Invert = false; 3143 Optional<unsigned> LogicOpc; 3144 if (ContainerVT.isFloatingPoint()) { 3145 bool Swap = false; 3146 switch (CC) { 3147 default: 3148 break; 3149 case ISD::SETULE: 3150 case ISD::SETULT: 3151 Swap = true; 3152 LLVM_FALLTHROUGH; 3153 case ISD::SETUGE: 3154 case ISD::SETUGT: 3155 CC = getSetCCInverse(CC, ContainerVT); 3156 Invert = true; 3157 break; 3158 case ISD::SETOGE: 3159 case ISD::SETOGT: 3160 case ISD::SETGE: 3161 case ISD::SETGT: 3162 Swap = true; 3163 break; 3164 case ISD::SETUEQ: 3165 // Use !((OLT Op1, Op2) || (OLT Op2, Op1)) 3166 Invert = true; 3167 LogicOpc = RISCVISD::VMOR_VL; 3168 CC = ISD::SETOLT; 3169 break; 3170 case ISD::SETONE: 3171 // Use ((OLT Op1, Op2) || (OLT Op2, Op1)) 3172 LogicOpc = RISCVISD::VMOR_VL; 3173 CC = ISD::SETOLT; 3174 break; 3175 case ISD::SETO: 3176 // Use (OEQ Op1, Op1) && (OEQ Op2, Op2) 3177 LogicOpc = RISCVISD::VMAND_VL; 3178 CC = ISD::SETOEQ; 3179 break; 3180 case ISD::SETUO: 3181 // Use (UNE Op1, Op1) || (UNE Op2, Op2) 3182 LogicOpc = RISCVISD::VMOR_VL; 3183 CC = ISD::SETUNE; 3184 break; 3185 } 3186 3187 if (Swap) { 3188 CC = getSetCCSwappedOperands(CC); 3189 std::swap(Op1, Op2); 3190 } 3191 } 3192 3193 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3194 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3195 3196 // There are 3 cases we need to emit. 3197 // 1. For (OEQ Op1, Op1) && (OEQ Op2, Op2) or (UNE Op1, Op1) || (UNE Op2, Op2) 3198 // we need to compare each operand with itself. 3199 // 2. For (OLT Op1, Op2) || (OLT Op2, Op1) we need to compare Op1 and Op2 in 3200 // both orders. 3201 // 3. For any other case we just need one compare with Op1 and Op2. 3202 SDValue Cmp; 3203 if (LogicOpc && (CC == ISD::SETOEQ || CC == ISD::SETUNE)) { 3204 Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op1, 3205 DAG.getCondCode(CC), Mask, VL); 3206 SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op2, 3207 DAG.getCondCode(CC), Mask, VL); 3208 Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL); 3209 } else { 3210 Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, 3211 DAG.getCondCode(CC), Mask, VL); 3212 if (LogicOpc) { 3213 SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op1, 3214 DAG.getCondCode(CC), Mask, VL); 3215 Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL); 3216 } 3217 } 3218 3219 if (Invert) { 3220 SDValue AllOnes = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3221 Cmp = DAG.getNode(RISCVISD::VMXOR_VL, DL, MaskVT, Cmp, AllOnes, VL); 3222 } 3223 3224 return convertFromScalableVector(VT, Cmp, DAG, Subtarget); 3225 } 3226 3227 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( 3228 SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { 3229 MVT VT = Op.getSimpleValueType(); 3230 3231 if (VT.getVectorElementType() == MVT::i1) 3232 return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); 3233 3234 return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); 3235 } 3236 3237 // Lower vector ABS to smax(X, sub(0, X)). 3238 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const { 3239 SDLoc DL(Op); 3240 MVT VT = Op.getSimpleValueType(); 3241 SDValue X = Op.getOperand(0); 3242 3243 assert(VT.isFixedLengthVector() && "Unexpected type"); 3244 3245 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3246 X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); 3247 3248 SDValue Mask, VL; 3249 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3250 3251 SDValue SplatZero = 3252 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 3253 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 3254 SDValue NegX = 3255 DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL); 3256 SDValue Max = 3257 DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL); 3258 3259 return convertFromScalableVector(VT, Max, DAG, Subtarget); 3260 } 3261 3262 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV( 3263 SDValue Op, SelectionDAG &DAG) const { 3264 SDLoc DL(Op); 3265 MVT VT = Op.getSimpleValueType(); 3266 SDValue Mag = Op.getOperand(0); 3267 SDValue Sign = Op.getOperand(1); 3268 assert(Mag.getValueType() == Sign.getValueType() && 3269 "Can only handle COPYSIGN with matching types."); 3270 3271 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3272 Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget); 3273 Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget); 3274 3275 SDValue Mask, VL; 3276 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3277 3278 SDValue CopySign = 3279 DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL); 3280 3281 return convertFromScalableVector(VT, CopySign, DAG, Subtarget); 3282 } 3283 3284 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( 3285 SDValue Op, SelectionDAG &DAG) const { 3286 MVT VT = Op.getSimpleValueType(); 3287 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3288 3289 MVT I1ContainerVT = 3290 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3291 3292 SDValue CC = 3293 convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); 3294 SDValue Op1 = 3295 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 3296 SDValue Op2 = 3297 convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); 3298 3299 SDLoc DL(Op); 3300 SDValue Mask, VL; 3301 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3302 3303 SDValue Select = 3304 DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); 3305 3306 return convertFromScalableVector(VT, Select, DAG, Subtarget); 3307 } 3308 3309 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, 3310 unsigned NewOpc, 3311 bool HasMask) const { 3312 MVT VT = Op.getSimpleValueType(); 3313 assert(useRVVForFixedLengthVectorVT(VT) && 3314 "Only expected to lower fixed length vector operation!"); 3315 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3316 3317 // Create list of operands by converting existing ones to scalable types. 3318 SmallVector<SDValue, 6> Ops; 3319 for (const SDValue &V : Op->op_values()) { 3320 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 3321 3322 // Pass through non-vector operands. 3323 if (!V.getValueType().isVector()) { 3324 Ops.push_back(V); 3325 continue; 3326 } 3327 3328 // "cast" fixed length vector to a scalable vector. 3329 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && 3330 "Only fixed length vectors are supported!"); 3331 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 3332 } 3333 3334 SDLoc DL(Op); 3335 SDValue Mask, VL; 3336 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3337 if (HasMask) 3338 Ops.push_back(Mask); 3339 Ops.push_back(VL); 3340 3341 SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); 3342 return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); 3343 } 3344 3345 // Returns the opcode of the target-specific SDNode that implements the 32-bit 3346 // form of the given Opcode. 3347 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 3348 switch (Opcode) { 3349 default: 3350 llvm_unreachable("Unexpected opcode"); 3351 case ISD::SHL: 3352 return RISCVISD::SLLW; 3353 case ISD::SRA: 3354 return RISCVISD::SRAW; 3355 case ISD::SRL: 3356 return RISCVISD::SRLW; 3357 case ISD::SDIV: 3358 return RISCVISD::DIVW; 3359 case ISD::UDIV: 3360 return RISCVISD::DIVUW; 3361 case ISD::UREM: 3362 return RISCVISD::REMUW; 3363 case ISD::ROTL: 3364 return RISCVISD::ROLW; 3365 case ISD::ROTR: 3366 return RISCVISD::RORW; 3367 case RISCVISD::GREVI: 3368 return RISCVISD::GREVIW; 3369 case RISCVISD::GORCI: 3370 return RISCVISD::GORCIW; 3371 } 3372 } 3373 3374 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 3375 // Because i32 isn't a legal type for RV64, these operations would otherwise 3376 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 3377 // later one because the fact the operation was originally of type i32 is 3378 // lost. 3379 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, 3380 unsigned ExtOpc = ISD::ANY_EXTEND) { 3381 SDLoc DL(N); 3382 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 3383 SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); 3384 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); 3385 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 3386 // ReplaceNodeResults requires we maintain the same type for the return value. 3387 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); 3388 } 3389 3390 // Converts the given 32-bit operation to a i64 operation with signed extension 3391 // semantic to reduce the signed extension instructions. 3392 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 3393 SDLoc DL(N); 3394 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3395 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 3396 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 3397 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 3398 DAG.getValueType(MVT::i32)); 3399 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 3400 } 3401 3402 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 3403 SmallVectorImpl<SDValue> &Results, 3404 SelectionDAG &DAG) const { 3405 SDLoc DL(N); 3406 switch (N->getOpcode()) { 3407 default: 3408 llvm_unreachable("Don't know how to custom type legalize this operation!"); 3409 case ISD::STRICT_FP_TO_SINT: 3410 case ISD::STRICT_FP_TO_UINT: 3411 case ISD::FP_TO_SINT: 3412 case ISD::FP_TO_UINT: { 3413 bool IsStrict = N->isStrictFPOpcode(); 3414 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3415 "Unexpected custom legalisation"); 3416 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 3417 // If the FP type needs to be softened, emit a library call using the 'si' 3418 // version. If we left it to default legalization we'd end up with 'di'. If 3419 // the FP type doesn't need to be softened just let generic type 3420 // legalization promote the result type. 3421 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 3422 TargetLowering::TypeSoftenFloat) 3423 return; 3424 RTLIB::Libcall LC; 3425 if (N->getOpcode() == ISD::FP_TO_SINT || 3426 N->getOpcode() == ISD::STRICT_FP_TO_SINT) 3427 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 3428 else 3429 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 3430 MakeLibCallOptions CallOptions; 3431 EVT OpVT = Op0.getValueType(); 3432 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 3433 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 3434 SDValue Result; 3435 std::tie(Result, Chain) = 3436 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 3437 Results.push_back(Result); 3438 if (IsStrict) 3439 Results.push_back(Chain); 3440 break; 3441 } 3442 case ISD::READCYCLECOUNTER: { 3443 assert(!Subtarget.is64Bit() && 3444 "READCYCLECOUNTER only has custom type legalization on riscv32"); 3445 3446 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 3447 SDValue RCW = 3448 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 3449 3450 Results.push_back( 3451 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 3452 Results.push_back(RCW.getValue(2)); 3453 break; 3454 } 3455 case ISD::ADD: 3456 case ISD::SUB: 3457 case ISD::MUL: 3458 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3459 "Unexpected custom legalisation"); 3460 if (N->getOperand(1).getOpcode() == ISD::Constant) 3461 return; 3462 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 3463 break; 3464 case ISD::SHL: 3465 case ISD::SRA: 3466 case ISD::SRL: 3467 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3468 "Unexpected custom legalisation"); 3469 if (N->getOperand(1).getOpcode() == ISD::Constant) 3470 return; 3471 Results.push_back(customLegalizeToWOp(N, DAG)); 3472 break; 3473 case ISD::ROTL: 3474 case ISD::ROTR: 3475 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3476 "Unexpected custom legalisation"); 3477 Results.push_back(customLegalizeToWOp(N, DAG)); 3478 break; 3479 case ISD::SDIV: 3480 case ISD::UDIV: 3481 case ISD::UREM: { 3482 MVT VT = N->getSimpleValueType(0); 3483 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 3484 Subtarget.is64Bit() && Subtarget.hasStdExtM() && 3485 "Unexpected custom legalisation"); 3486 if (N->getOperand(0).getOpcode() == ISD::Constant || 3487 N->getOperand(1).getOpcode() == ISD::Constant) 3488 return; 3489 3490 // If the input is i32, use ANY_EXTEND since the W instructions don't read 3491 // the upper 32 bits. For other types we need to sign or zero extend 3492 // based on the opcode. 3493 unsigned ExtOpc = ISD::ANY_EXTEND; 3494 if (VT != MVT::i32) 3495 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND 3496 : ISD::ZERO_EXTEND; 3497 3498 Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); 3499 break; 3500 } 3501 case ISD::UADDO: 3502 case ISD::USUBO: { 3503 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3504 "Unexpected custom legalisation"); 3505 bool IsAdd = N->getOpcode() == ISD::UADDO; 3506 SDLoc DL(N); 3507 // Create an ADDW or SUBW. 3508 SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3509 SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 3510 SDValue Res = 3511 DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS); 3512 Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res, 3513 DAG.getValueType(MVT::i32)); 3514 3515 // Sign extend the LHS and perform an unsigned compare with the ADDW result. 3516 // Since the inputs are sign extended from i32, this is equivalent to 3517 // comparing the lower 32 bits. 3518 LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 3519 SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS, 3520 IsAdd ? ISD::SETULT : ISD::SETUGT); 3521 3522 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 3523 Results.push_back(Overflow); 3524 return; 3525 } 3526 case ISD::UADDSAT: 3527 case ISD::USUBSAT: { 3528 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3529 "Unexpected custom legalisation"); 3530 SDLoc DL(N); 3531 if (Subtarget.hasStdExtZbb()) { 3532 // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using 3533 // sign extend allows overflow of the lower 32 bits to be detected on 3534 // the promoted size. 3535 SDValue LHS = 3536 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 3537 SDValue RHS = 3538 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1)); 3539 SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS); 3540 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 3541 return; 3542 } 3543 3544 // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom 3545 // promotion for UADDO/USUBO. 3546 Results.push_back(expandAddSubSat(N, DAG)); 3547 return; 3548 } 3549 case ISD::BITCAST: { 3550 assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3551 Subtarget.hasStdExtF()) || 3552 (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && 3553 "Unexpected custom legalisation"); 3554 SDValue Op0 = N->getOperand(0); 3555 if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) { 3556 if (Op0.getValueType() != MVT::f16) 3557 return; 3558 SDValue FPConv = 3559 DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0); 3560 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 3561 } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3562 Subtarget.hasStdExtF()) { 3563 if (Op0.getValueType() != MVT::f32) 3564 return; 3565 SDValue FPConv = 3566 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 3567 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 3568 } 3569 break; 3570 } 3571 case RISCVISD::GREVI: 3572 case RISCVISD::GORCI: { 3573 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3574 "Unexpected custom legalisation"); 3575 // This is similar to customLegalizeToWOp, except that we pass the second 3576 // operand (a TargetConstant) straight through: it is already of type 3577 // XLenVT. 3578 SDLoc DL(N); 3579 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 3580 SDValue NewOp0 = 3581 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3582 SDValue NewRes = 3583 DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1)); 3584 // ReplaceNodeResults requires we maintain the same type for the return 3585 // value. 3586 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 3587 break; 3588 } 3589 case RISCVISD::SHFLI: { 3590 // There is no SHFLIW instruction, but we can just promote the operation. 3591 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3592 "Unexpected custom legalisation"); 3593 SDLoc DL(N); 3594 SDValue NewOp0 = 3595 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3596 SDValue NewRes = 3597 DAG.getNode(RISCVISD::SHFLI, DL, MVT::i64, NewOp0, N->getOperand(1)); 3598 // ReplaceNodeResults requires we maintain the same type for the return 3599 // value. 3600 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 3601 break; 3602 } 3603 case ISD::BSWAP: 3604 case ISD::BITREVERSE: { 3605 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3606 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 3607 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 3608 N->getOperand(0)); 3609 unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24; 3610 SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0, 3611 DAG.getTargetConstant(Imm, DL, 3612 Subtarget.getXLenVT())); 3613 // ReplaceNodeResults requires we maintain the same type for the return 3614 // value. 3615 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW)); 3616 break; 3617 } 3618 case ISD::FSHL: 3619 case ISD::FSHR: { 3620 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3621 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 3622 SDValue NewOp0 = 3623 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3624 SDValue NewOp1 = 3625 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 3626 SDValue NewOp2 = 3627 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 3628 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 3629 // Mask the shift amount to 5 bits. 3630 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 3631 DAG.getConstant(0x1f, DL, MVT::i64)); 3632 unsigned Opc = 3633 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; 3634 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); 3635 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 3636 break; 3637 } 3638 case ISD::EXTRACT_VECTOR_ELT: { 3639 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element 3640 // type is illegal (currently only vXi64 RV32). 3641 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are 3642 // transferred to the destination register. We issue two of these from the 3643 // upper- and lower- halves of the SEW-bit vector element, slid down to the 3644 // first element. 3645 SDLoc DL(N); 3646 SDValue Vec = N->getOperand(0); 3647 SDValue Idx = N->getOperand(1); 3648 3649 // The vector type hasn't been legalized yet so we can't issue target 3650 // specific nodes if it needs legalization. 3651 // FIXME: We would manually legalize if it's important. 3652 if (!isTypeLegal(Vec.getValueType())) 3653 return; 3654 3655 MVT VecVT = Vec.getSimpleValueType(); 3656 3657 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && 3658 VecVT.getVectorElementType() == MVT::i64 && 3659 "Unexpected EXTRACT_VECTOR_ELT legalization"); 3660 3661 // If this is a fixed vector, we need to convert it to a scalable vector. 3662 MVT ContainerVT = VecVT; 3663 if (VecVT.isFixedLengthVector()) { 3664 ContainerVT = getContainerForFixedLengthVector(VecVT); 3665 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3666 } 3667 3668 MVT XLenVT = Subtarget.getXLenVT(); 3669 3670 // Use a VL of 1 to avoid processing more elements than we need. 3671 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 3672 SDValue VL = DAG.getConstant(1, DL, XLenVT); 3673 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3674 3675 // Unless the index is known to be 0, we must slide the vector down to get 3676 // the desired element into index 0. 3677 if (!isNullConstant(Idx)) { 3678 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 3679 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 3680 } 3681 3682 // Extract the lower XLEN bits of the correct vector element. 3683 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 3684 3685 // To extract the upper XLEN bits of the vector element, shift the first 3686 // element right by 32 bits and re-extract the lower XLEN bits. 3687 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 3688 DAG.getConstant(32, DL, XLenVT), VL); 3689 SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, 3690 ThirtyTwoV, Mask, VL); 3691 3692 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 3693 3694 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 3695 break; 3696 } 3697 case ISD::INTRINSIC_WO_CHAIN: { 3698 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3699 switch (IntNo) { 3700 default: 3701 llvm_unreachable( 3702 "Don't know how to custom type legalize this intrinsic!"); 3703 case Intrinsic::riscv_vmv_x_s: { 3704 EVT VT = N->getValueType(0); 3705 MVT XLenVT = Subtarget.getXLenVT(); 3706 if (VT.bitsLT(XLenVT)) { 3707 // Simple case just extract using vmv.x.s and truncate. 3708 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, 3709 Subtarget.getXLenVT(), N->getOperand(1)); 3710 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); 3711 return; 3712 } 3713 3714 assert(VT == MVT::i64 && !Subtarget.is64Bit() && 3715 "Unexpected custom legalization"); 3716 3717 // We need to do the move in two steps. 3718 SDValue Vec = N->getOperand(1); 3719 MVT VecVT = Vec.getSimpleValueType(); 3720 3721 // First extract the lower XLEN bits of the element. 3722 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 3723 3724 // To extract the upper XLEN bits of the vector element, shift the first 3725 // element right by 32 bits and re-extract the lower XLEN bits. 3726 SDValue VL = DAG.getConstant(1, DL, XLenVT); 3727 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 3728 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3729 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, 3730 DAG.getConstant(32, DL, XLenVT), VL); 3731 SDValue LShr32 = 3732 DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL); 3733 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 3734 3735 Results.push_back( 3736 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 3737 break; 3738 } 3739 } 3740 break; 3741 } 3742 case ISD::VECREDUCE_ADD: 3743 case ISD::VECREDUCE_AND: 3744 case ISD::VECREDUCE_OR: 3745 case ISD::VECREDUCE_XOR: 3746 case ISD::VECREDUCE_SMAX: 3747 case ISD::VECREDUCE_UMAX: 3748 case ISD::VECREDUCE_SMIN: 3749 case ISD::VECREDUCE_UMIN: 3750 if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG)) 3751 Results.push_back(V); 3752 break; 3753 } 3754 } 3755 3756 // A structure to hold one of the bit-manipulation patterns below. Together, a 3757 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 3758 // (or (and (shl x, 1), 0xAAAAAAAA), 3759 // (and (srl x, 1), 0x55555555)) 3760 struct RISCVBitmanipPat { 3761 SDValue Op; 3762 unsigned ShAmt; 3763 bool IsSHL; 3764 3765 bool formsPairWith(const RISCVBitmanipPat &Other) const { 3766 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 3767 } 3768 }; 3769 3770 // Matches patterns of the form 3771 // (and (shl x, C2), (C1 << C2)) 3772 // (and (srl x, C2), C1) 3773 // (shl (and x, C1), C2) 3774 // (srl (and x, (C1 << C2)), C2) 3775 // Where C2 is a power of 2 and C1 has at least that many leading zeroes. 3776 // The expected masks for each shift amount are specified in BitmanipMasks where 3777 // BitmanipMasks[log2(C2)] specifies the expected C1 value. 3778 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether 3779 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible 3780 // XLen is 64. 3781 static Optional<RISCVBitmanipPat> 3782 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { 3783 assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && 3784 "Unexpected number of masks"); 3785 Optional<uint64_t> Mask; 3786 // Optionally consume a mask around the shift operation. 3787 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 3788 Mask = Op.getConstantOperandVal(1); 3789 Op = Op.getOperand(0); 3790 } 3791 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 3792 return None; 3793 bool IsSHL = Op.getOpcode() == ISD::SHL; 3794 3795 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3796 return None; 3797 uint64_t ShAmt = Op.getConstantOperandVal(1); 3798 3799 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 3800 if (ShAmt >= Width && !isPowerOf2_64(ShAmt)) 3801 return None; 3802 // If we don't have enough masks for 64 bit, then we must be trying to 3803 // match SHFL so we're only allowed to shift 1/4 of the width. 3804 if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) 3805 return None; 3806 3807 SDValue Src = Op.getOperand(0); 3808 3809 // The expected mask is shifted left when the AND is found around SHL 3810 // patterns. 3811 // ((x >> 1) & 0x55555555) 3812 // ((x << 1) & 0xAAAAAAAA) 3813 bool SHLExpMask = IsSHL; 3814 3815 if (!Mask) { 3816 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 3817 // the mask is all ones: consume that now. 3818 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 3819 Mask = Src.getConstantOperandVal(1); 3820 Src = Src.getOperand(0); 3821 // The expected mask is now in fact shifted left for SRL, so reverse the 3822 // decision. 3823 // ((x & 0xAAAAAAAA) >> 1) 3824 // ((x & 0x55555555) << 1) 3825 SHLExpMask = !SHLExpMask; 3826 } else { 3827 // Use a default shifted mask of all-ones if there's no AND, truncated 3828 // down to the expected width. This simplifies the logic later on. 3829 Mask = maskTrailingOnes<uint64_t>(Width); 3830 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 3831 } 3832 } 3833 3834 unsigned MaskIdx = Log2_32(ShAmt); 3835 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 3836 3837 if (SHLExpMask) 3838 ExpMask <<= ShAmt; 3839 3840 if (Mask != ExpMask) 3841 return None; 3842 3843 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 3844 } 3845 3846 // Matches any of the following bit-manipulation patterns: 3847 // (and (shl x, 1), (0x55555555 << 1)) 3848 // (and (srl x, 1), 0x55555555) 3849 // (shl (and x, 0x55555555), 1) 3850 // (srl (and x, (0x55555555 << 1)), 1) 3851 // where the shift amount and mask may vary thus: 3852 // [1] = 0x55555555 / 0xAAAAAAAA 3853 // [2] = 0x33333333 / 0xCCCCCCCC 3854 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 3855 // [8] = 0x00FF00FF / 0xFF00FF00 3856 // [16] = 0x0000FFFF / 0xFFFFFFFF 3857 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 3858 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { 3859 // These are the unshifted masks which we use to match bit-manipulation 3860 // patterns. They may be shifted left in certain circumstances. 3861 static const uint64_t BitmanipMasks[] = { 3862 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 3863 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; 3864 3865 return matchRISCVBitmanipPat(Op, BitmanipMasks); 3866 } 3867 3868 // Match the following pattern as a GREVI(W) operation 3869 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 3870 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 3871 const RISCVSubtarget &Subtarget) { 3872 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 3873 EVT VT = Op.getValueType(); 3874 3875 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 3876 auto LHS = matchGREVIPat(Op.getOperand(0)); 3877 auto RHS = matchGREVIPat(Op.getOperand(1)); 3878 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 3879 SDLoc DL(Op); 3880 return DAG.getNode( 3881 RISCVISD::GREVI, DL, VT, LHS->Op, 3882 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 3883 } 3884 } 3885 return SDValue(); 3886 } 3887 3888 // Matches any the following pattern as a GORCI(W) operation 3889 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 3890 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 3891 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 3892 // Note that with the variant of 3., 3893 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 3894 // the inner pattern will first be matched as GREVI and then the outer 3895 // pattern will be matched to GORC via the first rule above. 3896 // 4. (or (rotl/rotr x, bitwidth/2), x) 3897 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 3898 const RISCVSubtarget &Subtarget) { 3899 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 3900 EVT VT = Op.getValueType(); 3901 3902 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 3903 SDLoc DL(Op); 3904 SDValue Op0 = Op.getOperand(0); 3905 SDValue Op1 = Op.getOperand(1); 3906 3907 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 3908 if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X && 3909 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 3910 return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1)); 3911 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 3912 if ((Reverse.getOpcode() == ISD::ROTL || 3913 Reverse.getOpcode() == ISD::ROTR) && 3914 Reverse.getOperand(0) == X && 3915 isa<ConstantSDNode>(Reverse.getOperand(1))) { 3916 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 3917 if (RotAmt == (VT.getSizeInBits() / 2)) 3918 return DAG.getNode( 3919 RISCVISD::GORCI, DL, VT, X, 3920 DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT())); 3921 } 3922 return SDValue(); 3923 }; 3924 3925 // Check for either commutable permutation of (or (GREVI x, shamt), x) 3926 if (SDValue V = MatchOROfReverse(Op0, Op1)) 3927 return V; 3928 if (SDValue V = MatchOROfReverse(Op1, Op0)) 3929 return V; 3930 3931 // OR is commutable so canonicalize its OR operand to the left 3932 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 3933 std::swap(Op0, Op1); 3934 if (Op0.getOpcode() != ISD::OR) 3935 return SDValue(); 3936 SDValue OrOp0 = Op0.getOperand(0); 3937 SDValue OrOp1 = Op0.getOperand(1); 3938 auto LHS = matchGREVIPat(OrOp0); 3939 // OR is commutable so swap the operands and try again: x might have been 3940 // on the left 3941 if (!LHS) { 3942 std::swap(OrOp0, OrOp1); 3943 LHS = matchGREVIPat(OrOp0); 3944 } 3945 auto RHS = matchGREVIPat(Op1); 3946 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 3947 return DAG.getNode( 3948 RISCVISD::GORCI, DL, VT, LHS->Op, 3949 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 3950 } 3951 } 3952 return SDValue(); 3953 } 3954 3955 // Matches any of the following bit-manipulation patterns: 3956 // (and (shl x, 1), (0x22222222 << 1)) 3957 // (and (srl x, 1), 0x22222222) 3958 // (shl (and x, 0x22222222), 1) 3959 // (srl (and x, (0x22222222 << 1)), 1) 3960 // where the shift amount and mask may vary thus: 3961 // [1] = 0x22222222 / 0x44444444 3962 // [2] = 0x0C0C0C0C / 0x3C3C3C3C 3963 // [4] = 0x00F000F0 / 0x0F000F00 3964 // [8] = 0x0000FF00 / 0x00FF0000 3965 // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) 3966 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { 3967 // These are the unshifted masks which we use to match bit-manipulation 3968 // patterns. They may be shifted left in certain circumstances. 3969 static const uint64_t BitmanipMasks[] = { 3970 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, 3971 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; 3972 3973 return matchRISCVBitmanipPat(Op, BitmanipMasks); 3974 } 3975 3976 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) 3977 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, 3978 const RISCVSubtarget &Subtarget) { 3979 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 3980 EVT VT = Op.getValueType(); 3981 3982 if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) 3983 return SDValue(); 3984 3985 SDValue Op0 = Op.getOperand(0); 3986 SDValue Op1 = Op.getOperand(1); 3987 3988 // Or is commutable so canonicalize the second OR to the LHS. 3989 if (Op0.getOpcode() != ISD::OR) 3990 std::swap(Op0, Op1); 3991 if (Op0.getOpcode() != ISD::OR) 3992 return SDValue(); 3993 3994 // We found an inner OR, so our operands are the operands of the inner OR 3995 // and the other operand of the outer OR. 3996 SDValue A = Op0.getOperand(0); 3997 SDValue B = Op0.getOperand(1); 3998 SDValue C = Op1; 3999 4000 auto Match1 = matchSHFLPat(A); 4001 auto Match2 = matchSHFLPat(B); 4002 4003 // If neither matched, we failed. 4004 if (!Match1 && !Match2) 4005 return SDValue(); 4006 4007 // We had at least one match. if one failed, try the remaining C operand. 4008 if (!Match1) { 4009 std::swap(A, C); 4010 Match1 = matchSHFLPat(A); 4011 if (!Match1) 4012 return SDValue(); 4013 } else if (!Match2) { 4014 std::swap(B, C); 4015 Match2 = matchSHFLPat(B); 4016 if (!Match2) 4017 return SDValue(); 4018 } 4019 assert(Match1 && Match2); 4020 4021 // Make sure our matches pair up. 4022 if (!Match1->formsPairWith(*Match2)) 4023 return SDValue(); 4024 4025 // All the remains is to make sure C is an AND with the same input, that masks 4026 // out the bits that are being shuffled. 4027 if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || 4028 C.getOperand(0) != Match1->Op) 4029 return SDValue(); 4030 4031 uint64_t Mask = C.getConstantOperandVal(1); 4032 4033 static const uint64_t BitmanipMasks[] = { 4034 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, 4035 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, 4036 }; 4037 4038 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 4039 unsigned MaskIdx = Log2_32(Match1->ShAmt); 4040 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 4041 4042 if (Mask != ExpMask) 4043 return SDValue(); 4044 4045 SDLoc DL(Op); 4046 return DAG.getNode( 4047 RISCVISD::SHFLI, DL, VT, Match1->Op, 4048 DAG.getTargetConstant(Match1->ShAmt, DL, Subtarget.getXLenVT())); 4049 } 4050 4051 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 4052 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 4053 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 4054 // not undo itself, but they are redundant. 4055 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 4056 unsigned ShAmt1 = N->getConstantOperandVal(1); 4057 SDValue Src = N->getOperand(0); 4058 4059 if (Src.getOpcode() != N->getOpcode()) 4060 return SDValue(); 4061 4062 unsigned ShAmt2 = Src.getConstantOperandVal(1); 4063 Src = Src.getOperand(0); 4064 4065 unsigned CombinedShAmt; 4066 if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW) 4067 CombinedShAmt = ShAmt1 | ShAmt2; 4068 else 4069 CombinedShAmt = ShAmt1 ^ ShAmt2; 4070 4071 if (CombinedShAmt == 0) 4072 return Src; 4073 4074 SDLoc DL(N); 4075 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src, 4076 DAG.getTargetConstant(CombinedShAmt, DL, 4077 N->getOperand(1).getValueType())); 4078 } 4079 4080 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 4081 DAGCombinerInfo &DCI) const { 4082 SelectionDAG &DAG = DCI.DAG; 4083 4084 switch (N->getOpcode()) { 4085 default: 4086 break; 4087 case RISCVISD::SplitF64: { 4088 SDValue Op0 = N->getOperand(0); 4089 // If the input to SplitF64 is just BuildPairF64 then the operation is 4090 // redundant. Instead, use BuildPairF64's operands directly. 4091 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 4092 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 4093 4094 SDLoc DL(N); 4095 4096 // It's cheaper to materialise two 32-bit integers than to load a double 4097 // from the constant pool and transfer it to integer registers through the 4098 // stack. 4099 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 4100 APInt V = C->getValueAPF().bitcastToAPInt(); 4101 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 4102 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 4103 return DCI.CombineTo(N, Lo, Hi); 4104 } 4105 4106 // This is a target-specific version of a DAGCombine performed in 4107 // DAGCombiner::visitBITCAST. It performs the equivalent of: 4108 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 4109 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 4110 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 4111 !Op0.getNode()->hasOneUse()) 4112 break; 4113 SDValue NewSplitF64 = 4114 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 4115 Op0.getOperand(0)); 4116 SDValue Lo = NewSplitF64.getValue(0); 4117 SDValue Hi = NewSplitF64.getValue(1); 4118 APInt SignBit = APInt::getSignMask(32); 4119 if (Op0.getOpcode() == ISD::FNEG) { 4120 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 4121 DAG.getConstant(SignBit, DL, MVT::i32)); 4122 return DCI.CombineTo(N, Lo, NewHi); 4123 } 4124 assert(Op0.getOpcode() == ISD::FABS); 4125 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 4126 DAG.getConstant(~SignBit, DL, MVT::i32)); 4127 return DCI.CombineTo(N, Lo, NewHi); 4128 } 4129 case RISCVISD::SLLW: 4130 case RISCVISD::SRAW: 4131 case RISCVISD::SRLW: 4132 case RISCVISD::ROLW: 4133 case RISCVISD::RORW: { 4134 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 4135 SDValue LHS = N->getOperand(0); 4136 SDValue RHS = N->getOperand(1); 4137 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 4138 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 4139 if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || 4140 SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { 4141 if (N->getOpcode() != ISD::DELETED_NODE) 4142 DCI.AddToWorklist(N); 4143 return SDValue(N, 0); 4144 } 4145 break; 4146 } 4147 case RISCVISD::FSL: 4148 case RISCVISD::FSR: { 4149 // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. 4150 SDValue ShAmt = N->getOperand(2); 4151 unsigned BitWidth = ShAmt.getValueSizeInBits(); 4152 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 4153 APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1); 4154 if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 4155 if (N->getOpcode() != ISD::DELETED_NODE) 4156 DCI.AddToWorklist(N); 4157 return SDValue(N, 0); 4158 } 4159 break; 4160 } 4161 case RISCVISD::FSLW: 4162 case RISCVISD::FSRW: { 4163 // Only the lower 32 bits of Values and lower 6 bits of shift amount are 4164 // read. 4165 SDValue Op0 = N->getOperand(0); 4166 SDValue Op1 = N->getOperand(1); 4167 SDValue ShAmt = N->getOperand(2); 4168 APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 4169 APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); 4170 if (SimplifyDemandedBits(Op0, OpMask, DCI) || 4171 SimplifyDemandedBits(Op1, OpMask, DCI) || 4172 SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 4173 if (N->getOpcode() != ISD::DELETED_NODE) 4174 DCI.AddToWorklist(N); 4175 return SDValue(N, 0); 4176 } 4177 break; 4178 } 4179 case RISCVISD::GREVIW: 4180 case RISCVISD::GORCIW: { 4181 // Only the lower 32 bits of the first operand are read 4182 SDValue Op0 = N->getOperand(0); 4183 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 4184 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 4185 if (N->getOpcode() != ISD::DELETED_NODE) 4186 DCI.AddToWorklist(N); 4187 return SDValue(N, 0); 4188 } 4189 4190 return combineGREVI_GORCI(N, DCI.DAG); 4191 } 4192 case RISCVISD::FMV_X_ANYEXTW_RV64: { 4193 SDLoc DL(N); 4194 SDValue Op0 = N->getOperand(0); 4195 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 4196 // conversion is unnecessary and can be replaced with an ANY_EXTEND 4197 // of the FMV_W_X_RV64 operand. 4198 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 4199 assert(Op0.getOperand(0).getValueType() == MVT::i64 && 4200 "Unexpected value type!"); 4201 return Op0.getOperand(0); 4202 } 4203 4204 // This is a target-specific version of a DAGCombine performed in 4205 // DAGCombiner::visitBITCAST. It performs the equivalent of: 4206 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 4207 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 4208 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 4209 !Op0.getNode()->hasOneUse()) 4210 break; 4211 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 4212 Op0.getOperand(0)); 4213 APInt SignBit = APInt::getSignMask(32).sext(64); 4214 if (Op0.getOpcode() == ISD::FNEG) 4215 return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 4216 DAG.getConstant(SignBit, DL, MVT::i64)); 4217 4218 assert(Op0.getOpcode() == ISD::FABS); 4219 return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 4220 DAG.getConstant(~SignBit, DL, MVT::i64)); 4221 } 4222 case RISCVISD::GREVI: 4223 case RISCVISD::GORCI: 4224 return combineGREVI_GORCI(N, DCI.DAG); 4225 case ISD::OR: 4226 if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget)) 4227 return GREV; 4228 if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget)) 4229 return GORC; 4230 if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget)) 4231 return SHFL; 4232 break; 4233 case RISCVISD::SELECT_CC: { 4234 // Transform 4235 SDValue LHS = N->getOperand(0); 4236 SDValue RHS = N->getOperand(1); 4237 auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2)); 4238 if (!ISD::isIntEqualitySetCC(CCVal)) 4239 break; 4240 4241 // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> 4242 // (select_cc X, Y, lt, trueV, falseV) 4243 // Sometimes the setcc is introduced after select_cc has been formed. 4244 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 4245 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 4246 // If we're looking for eq 0 instead of ne 0, we need to invert the 4247 // condition. 4248 bool Invert = CCVal == ISD::SETEQ; 4249 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 4250 if (Invert) 4251 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 4252 4253 SDLoc DL(N); 4254 RHS = LHS.getOperand(1); 4255 LHS = LHS.getOperand(0); 4256 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 4257 4258 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 4259 return DAG.getNode( 4260 RISCVISD::SELECT_CC, DL, N->getValueType(0), 4261 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 4262 } 4263 4264 // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> 4265 // (select_cc X, Y, eq/ne, trueV, falseV) 4266 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 4267 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), 4268 {LHS.getOperand(0), LHS.getOperand(1), 4269 N->getOperand(2), N->getOperand(3), 4270 N->getOperand(4)}); 4271 // (select_cc X, 1, setne, trueV, falseV) -> 4272 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. 4273 // This can occur when legalizing some floating point comparisons. 4274 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 4275 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 4276 SDLoc DL(N); 4277 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 4278 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 4279 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 4280 return DAG.getNode( 4281 RISCVISD::SELECT_CC, DL, N->getValueType(0), 4282 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 4283 } 4284 4285 break; 4286 } 4287 case RISCVISD::BR_CC: { 4288 SDValue LHS = N->getOperand(1); 4289 SDValue RHS = N->getOperand(2); 4290 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get(); 4291 if (!ISD::isIntEqualitySetCC(CCVal)) 4292 break; 4293 4294 // Fold (br_cc (setlt X, Y), 0, ne, dest) -> 4295 // (br_cc X, Y, lt, dest) 4296 // Sometimes the setcc is introduced after br_cc has been formed. 4297 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 4298 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 4299 // If we're looking for eq 0 instead of ne 0, we need to invert the 4300 // condition. 4301 bool Invert = CCVal == ISD::SETEQ; 4302 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 4303 if (Invert) 4304 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 4305 4306 SDLoc DL(N); 4307 RHS = LHS.getOperand(1); 4308 LHS = LHS.getOperand(0); 4309 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 4310 4311 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 4312 N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal), 4313 N->getOperand(4)); 4314 } 4315 4316 // Fold (br_cc (xor X, Y), 0, eq/ne, dest) -> 4317 // (br_cc X, Y, eq/ne, trueV, falseV) 4318 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 4319 return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0), 4320 N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1), 4321 N->getOperand(3), N->getOperand(4)); 4322 4323 // (br_cc X, 1, setne, br_cc) -> 4324 // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1. 4325 // This can occur when legalizing some floating point comparisons. 4326 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 4327 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 4328 SDLoc DL(N); 4329 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 4330 SDValue TargetCC = DAG.getCondCode(CCVal); 4331 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 4332 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 4333 N->getOperand(0), LHS, RHS, TargetCC, 4334 N->getOperand(4)); 4335 } 4336 break; 4337 } 4338 case ISD::FCOPYSIGN: { 4339 EVT VT = N->getValueType(0); 4340 if (!VT.isVector()) 4341 break; 4342 // There is a form of VFSGNJ which injects the negated sign of its second 4343 // operand. Try and bubble any FNEG up after the extend/round to produce 4344 // this optimized pattern. Avoid modifying cases where FP_ROUND and 4345 // TRUNC=1. 4346 SDValue In2 = N->getOperand(1); 4347 // Avoid cases where the extend/round has multiple uses, as duplicating 4348 // those is typically more expensive than removing a fneg. 4349 if (!In2.hasOneUse()) 4350 break; 4351 if (In2.getOpcode() != ISD::FP_EXTEND && 4352 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) 4353 break; 4354 In2 = In2.getOperand(0); 4355 if (In2.getOpcode() != ISD::FNEG) 4356 break; 4357 SDLoc DL(N); 4358 SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); 4359 return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), 4360 DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); 4361 } 4362 } 4363 4364 return SDValue(); 4365 } 4366 4367 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 4368 const SDNode *N, CombineLevel Level) const { 4369 // The following folds are only desirable if `(OP _, c1 << c2)` can be 4370 // materialised in fewer instructions than `(OP _, c1)`: 4371 // 4372 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 4373 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 4374 SDValue N0 = N->getOperand(0); 4375 EVT Ty = N0.getValueType(); 4376 if (Ty.isScalarInteger() && 4377 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 4378 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 4379 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4380 if (C1 && C2) { 4381 const APInt &C1Int = C1->getAPIntValue(); 4382 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 4383 4384 // We can materialise `c1 << c2` into an add immediate, so it's "free", 4385 // and the combine should happen, to potentially allow further combines 4386 // later. 4387 if (ShiftedC1Int.getMinSignedBits() <= 64 && 4388 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 4389 return true; 4390 4391 // We can materialise `c1` in an add immediate, so it's "free", and the 4392 // combine should be prevented. 4393 if (C1Int.getMinSignedBits() <= 64 && 4394 isLegalAddImmediate(C1Int.getSExtValue())) 4395 return false; 4396 4397 // Neither constant will fit into an immediate, so find materialisation 4398 // costs. 4399 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 4400 Subtarget.is64Bit()); 4401 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 4402 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 4403 4404 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 4405 // combine should be prevented. 4406 if (C1Cost < ShiftedC1Cost) 4407 return false; 4408 } 4409 } 4410 return true; 4411 } 4412 4413 bool RISCVTargetLowering::targetShrinkDemandedConstant( 4414 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 4415 TargetLoweringOpt &TLO) const { 4416 // Delay this optimization as late as possible. 4417 if (!TLO.LegalOps) 4418 return false; 4419 4420 EVT VT = Op.getValueType(); 4421 if (VT.isVector()) 4422 return false; 4423 4424 // Only handle AND for now. 4425 if (Op.getOpcode() != ISD::AND) 4426 return false; 4427 4428 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 4429 if (!C) 4430 return false; 4431 4432 const APInt &Mask = C->getAPIntValue(); 4433 4434 // Clear all non-demanded bits initially. 4435 APInt ShrunkMask = Mask & DemandedBits; 4436 4437 // If the shrunk mask fits in sign extended 12 bits, let the target 4438 // independent code apply it. 4439 if (ShrunkMask.isSignedIntN(12)) 4440 return false; 4441 4442 // Try to make a smaller immediate by setting undemanded bits. 4443 4444 // We need to be able to make a negative number through a combination of mask 4445 // and undemanded bits. 4446 APInt ExpandedMask = Mask | ~DemandedBits; 4447 if (!ExpandedMask.isNegative()) 4448 return false; 4449 4450 // What is the fewest number of bits we need to represent the negative number. 4451 unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); 4452 4453 // Try to make a 12 bit negative immediate. If that fails try to make a 32 4454 // bit negative immediate unless the shrunk immediate already fits in 32 bits. 4455 APInt NewMask = ShrunkMask; 4456 if (MinSignedBits <= 12) 4457 NewMask.setBitsFrom(11); 4458 else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) 4459 NewMask.setBitsFrom(31); 4460 else 4461 return false; 4462 4463 // Sanity check that our new mask is a subset of the demanded mask. 4464 assert(NewMask.isSubsetOf(ExpandedMask)); 4465 4466 // If we aren't changing the mask, just return true to keep it and prevent 4467 // the caller from optimizing. 4468 if (NewMask == Mask) 4469 return true; 4470 4471 // Replace the constant with the new mask. 4472 SDLoc DL(Op); 4473 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 4474 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 4475 return TLO.CombineTo(Op, NewOp); 4476 } 4477 4478 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 4479 KnownBits &Known, 4480 const APInt &DemandedElts, 4481 const SelectionDAG &DAG, 4482 unsigned Depth) const { 4483 unsigned BitWidth = Known.getBitWidth(); 4484 unsigned Opc = Op.getOpcode(); 4485 assert((Opc >= ISD::BUILTIN_OP_END || 4486 Opc == ISD::INTRINSIC_WO_CHAIN || 4487 Opc == ISD::INTRINSIC_W_CHAIN || 4488 Opc == ISD::INTRINSIC_VOID) && 4489 "Should use MaskedValueIsZero if you don't know whether Op" 4490 " is a target node!"); 4491 4492 Known.resetAll(); 4493 switch (Opc) { 4494 default: break; 4495 case RISCVISD::SELECT_CC: { 4496 Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1); 4497 // If we don't know any bits, early out. 4498 if (Known.isUnknown()) 4499 break; 4500 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1); 4501 4502 // Only known if known in both the LHS and RHS. 4503 Known = KnownBits::commonBits(Known, Known2); 4504 break; 4505 } 4506 case RISCVISD::REMUW: { 4507 KnownBits Known2; 4508 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 4509 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 4510 // We only care about the lower 32 bits. 4511 Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); 4512 // Restore the original width by sign extending. 4513 Known = Known.sext(BitWidth); 4514 break; 4515 } 4516 case RISCVISD::DIVUW: { 4517 KnownBits Known2; 4518 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 4519 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 4520 // We only care about the lower 32 bits. 4521 Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); 4522 // Restore the original width by sign extending. 4523 Known = Known.sext(BitWidth); 4524 break; 4525 } 4526 case RISCVISD::READ_VLENB: 4527 // We assume VLENB is at least 8 bytes. 4528 // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits. 4529 Known.Zero.setLowBits(3); 4530 break; 4531 } 4532 } 4533 4534 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 4535 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4536 unsigned Depth) const { 4537 switch (Op.getOpcode()) { 4538 default: 4539 break; 4540 case RISCVISD::SLLW: 4541 case RISCVISD::SRAW: 4542 case RISCVISD::SRLW: 4543 case RISCVISD::DIVW: 4544 case RISCVISD::DIVUW: 4545 case RISCVISD::REMUW: 4546 case RISCVISD::ROLW: 4547 case RISCVISD::RORW: 4548 case RISCVISD::GREVIW: 4549 case RISCVISD::GORCIW: 4550 case RISCVISD::FSLW: 4551 case RISCVISD::FSRW: 4552 // TODO: As the result is sign-extended, this is conservatively correct. A 4553 // more precise answer could be calculated for SRAW depending on known 4554 // bits in the shift amount. 4555 return 33; 4556 case RISCVISD::SHFLI: { 4557 // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word 4558 // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but 4559 // will stay within the upper 32 bits. If there were more than 32 sign bits 4560 // before there will be at least 33 sign bits after. 4561 if (Op.getValueType() == MVT::i64 && 4562 (Op.getConstantOperandVal(1) & 0x10) == 0) { 4563 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4564 if (Tmp > 32) 4565 return 33; 4566 } 4567 break; 4568 } 4569 case RISCVISD::VMV_X_S: 4570 // The number of sign bits of the scalar result is computed by obtaining the 4571 // element type of the input vector operand, subtracting its width from the 4572 // XLEN, and then adding one (sign bit within the element type). If the 4573 // element type is wider than XLen, the least-significant XLEN bits are 4574 // taken. 4575 if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen()) 4576 return 1; 4577 return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; 4578 } 4579 4580 return 1; 4581 } 4582 4583 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 4584 MachineBasicBlock *BB) { 4585 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 4586 4587 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 4588 // Should the count have wrapped while it was being read, we need to try 4589 // again. 4590 // ... 4591 // read: 4592 // rdcycleh x3 # load high word of cycle 4593 // rdcycle x2 # load low word of cycle 4594 // rdcycleh x4 # load high word of cycle 4595 // bne x3, x4, read # check if high word reads match, otherwise try again 4596 // ... 4597 4598 MachineFunction &MF = *BB->getParent(); 4599 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4600 MachineFunction::iterator It = ++BB->getIterator(); 4601 4602 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 4603 MF.insert(It, LoopMBB); 4604 4605 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 4606 MF.insert(It, DoneMBB); 4607 4608 // Transfer the remainder of BB and its successor edges to DoneMBB. 4609 DoneMBB->splice(DoneMBB->begin(), BB, 4610 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 4611 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 4612 4613 BB->addSuccessor(LoopMBB); 4614 4615 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4616 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 4617 Register LoReg = MI.getOperand(0).getReg(); 4618 Register HiReg = MI.getOperand(1).getReg(); 4619 DebugLoc DL = MI.getDebugLoc(); 4620 4621 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 4622 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 4623 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 4624 .addReg(RISCV::X0); 4625 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 4626 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 4627 .addReg(RISCV::X0); 4628 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 4629 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 4630 .addReg(RISCV::X0); 4631 4632 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 4633 .addReg(HiReg) 4634 .addReg(ReadAgainReg) 4635 .addMBB(LoopMBB); 4636 4637 LoopMBB->addSuccessor(LoopMBB); 4638 LoopMBB->addSuccessor(DoneMBB); 4639 4640 MI.eraseFromParent(); 4641 4642 return DoneMBB; 4643 } 4644 4645 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 4646 MachineBasicBlock *BB) { 4647 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 4648 4649 MachineFunction &MF = *BB->getParent(); 4650 DebugLoc DL = MI.getDebugLoc(); 4651 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4652 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 4653 Register LoReg = MI.getOperand(0).getReg(); 4654 Register HiReg = MI.getOperand(1).getReg(); 4655 Register SrcReg = MI.getOperand(2).getReg(); 4656 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 4657 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 4658 4659 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 4660 RI); 4661 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 4662 MachineMemOperand *MMOLo = 4663 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 4664 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 4665 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 4666 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 4667 .addFrameIndex(FI) 4668 .addImm(0) 4669 .addMemOperand(MMOLo); 4670 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 4671 .addFrameIndex(FI) 4672 .addImm(4) 4673 .addMemOperand(MMOHi); 4674 MI.eraseFromParent(); // The pseudo instruction is gone now. 4675 return BB; 4676 } 4677 4678 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 4679 MachineBasicBlock *BB) { 4680 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 4681 "Unexpected instruction"); 4682 4683 MachineFunction &MF = *BB->getParent(); 4684 DebugLoc DL = MI.getDebugLoc(); 4685 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4686 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 4687 Register DstReg = MI.getOperand(0).getReg(); 4688 Register LoReg = MI.getOperand(1).getReg(); 4689 Register HiReg = MI.getOperand(2).getReg(); 4690 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 4691 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 4692 4693 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 4694 MachineMemOperand *MMOLo = 4695 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 4696 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 4697 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 4698 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 4699 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 4700 .addFrameIndex(FI) 4701 .addImm(0) 4702 .addMemOperand(MMOLo); 4703 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 4704 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 4705 .addFrameIndex(FI) 4706 .addImm(4) 4707 .addMemOperand(MMOHi); 4708 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 4709 MI.eraseFromParent(); // The pseudo instruction is gone now. 4710 return BB; 4711 } 4712 4713 static bool isSelectPseudo(MachineInstr &MI) { 4714 switch (MI.getOpcode()) { 4715 default: 4716 return false; 4717 case RISCV::Select_GPR_Using_CC_GPR: 4718 case RISCV::Select_FPR16_Using_CC_GPR: 4719 case RISCV::Select_FPR32_Using_CC_GPR: 4720 case RISCV::Select_FPR64_Using_CC_GPR: 4721 return true; 4722 } 4723 } 4724 4725 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 4726 MachineBasicBlock *BB) { 4727 // To "insert" Select_* instructions, we actually have to insert the triangle 4728 // control-flow pattern. The incoming instructions know the destination vreg 4729 // to set, the condition code register to branch on, the true/false values to 4730 // select between, and the condcode to use to select the appropriate branch. 4731 // 4732 // We produce the following control flow: 4733 // HeadMBB 4734 // | \ 4735 // | IfFalseMBB 4736 // | / 4737 // TailMBB 4738 // 4739 // When we find a sequence of selects we attempt to optimize their emission 4740 // by sharing the control flow. Currently we only handle cases where we have 4741 // multiple selects with the exact same condition (same LHS, RHS and CC). 4742 // The selects may be interleaved with other instructions if the other 4743 // instructions meet some requirements we deem safe: 4744 // - They are debug instructions. Otherwise, 4745 // - They do not have side-effects, do not access memory and their inputs do 4746 // not depend on the results of the select pseudo-instructions. 4747 // The TrueV/FalseV operands of the selects cannot depend on the result of 4748 // previous selects in the sequence. 4749 // These conditions could be further relaxed. See the X86 target for a 4750 // related approach and more information. 4751 Register LHS = MI.getOperand(1).getReg(); 4752 Register RHS = MI.getOperand(2).getReg(); 4753 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 4754 4755 SmallVector<MachineInstr *, 4> SelectDebugValues; 4756 SmallSet<Register, 4> SelectDests; 4757 SelectDests.insert(MI.getOperand(0).getReg()); 4758 4759 MachineInstr *LastSelectPseudo = &MI; 4760 4761 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 4762 SequenceMBBI != E; ++SequenceMBBI) { 4763 if (SequenceMBBI->isDebugInstr()) 4764 continue; 4765 else if (isSelectPseudo(*SequenceMBBI)) { 4766 if (SequenceMBBI->getOperand(1).getReg() != LHS || 4767 SequenceMBBI->getOperand(2).getReg() != RHS || 4768 SequenceMBBI->getOperand(3).getImm() != CC || 4769 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 4770 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 4771 break; 4772 LastSelectPseudo = &*SequenceMBBI; 4773 SequenceMBBI->collectDebugValues(SelectDebugValues); 4774 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 4775 } else { 4776 if (SequenceMBBI->hasUnmodeledSideEffects() || 4777 SequenceMBBI->mayLoadOrStore()) 4778 break; 4779 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 4780 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 4781 })) 4782 break; 4783 } 4784 } 4785 4786 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 4787 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4788 DebugLoc DL = MI.getDebugLoc(); 4789 MachineFunction::iterator I = ++BB->getIterator(); 4790 4791 MachineBasicBlock *HeadMBB = BB; 4792 MachineFunction *F = BB->getParent(); 4793 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 4794 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 4795 4796 F->insert(I, IfFalseMBB); 4797 F->insert(I, TailMBB); 4798 4799 // Transfer debug instructions associated with the selects to TailMBB. 4800 for (MachineInstr *DebugInstr : SelectDebugValues) { 4801 TailMBB->push_back(DebugInstr->removeFromParent()); 4802 } 4803 4804 // Move all instructions after the sequence to TailMBB. 4805 TailMBB->splice(TailMBB->end(), HeadMBB, 4806 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 4807 // Update machine-CFG edges by transferring all successors of the current 4808 // block to the new block which will contain the Phi nodes for the selects. 4809 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 4810 // Set the successors for HeadMBB. 4811 HeadMBB->addSuccessor(IfFalseMBB); 4812 HeadMBB->addSuccessor(TailMBB); 4813 4814 // Insert appropriate branch. 4815 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 4816 4817 BuildMI(HeadMBB, DL, TII.get(Opcode)) 4818 .addReg(LHS) 4819 .addReg(RHS) 4820 .addMBB(TailMBB); 4821 4822 // IfFalseMBB just falls through to TailMBB. 4823 IfFalseMBB->addSuccessor(TailMBB); 4824 4825 // Create PHIs for all of the select pseudo-instructions. 4826 auto SelectMBBI = MI.getIterator(); 4827 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 4828 auto InsertionPoint = TailMBB->begin(); 4829 while (SelectMBBI != SelectEnd) { 4830 auto Next = std::next(SelectMBBI); 4831 if (isSelectPseudo(*SelectMBBI)) { 4832 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 4833 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 4834 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 4835 .addReg(SelectMBBI->getOperand(4).getReg()) 4836 .addMBB(HeadMBB) 4837 .addReg(SelectMBBI->getOperand(5).getReg()) 4838 .addMBB(IfFalseMBB); 4839 SelectMBBI->eraseFromParent(); 4840 } 4841 SelectMBBI = Next; 4842 } 4843 4844 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 4845 return TailMBB; 4846 } 4847 4848 static MachineInstr *elideCopies(MachineInstr *MI, 4849 const MachineRegisterInfo &MRI) { 4850 while (true) { 4851 if (!MI->isFullCopy()) 4852 return MI; 4853 if (!Register::isVirtualRegister(MI->getOperand(1).getReg())) 4854 return nullptr; 4855 MI = MRI.getVRegDef(MI->getOperand(1).getReg()); 4856 if (!MI) 4857 return nullptr; 4858 } 4859 } 4860 4861 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, 4862 int VLIndex, unsigned SEWIndex, 4863 RISCVVLMUL VLMul, bool ForceTailAgnostic) { 4864 MachineFunction &MF = *BB->getParent(); 4865 DebugLoc DL = MI.getDebugLoc(); 4866 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4867 4868 unsigned SEW = MI.getOperand(SEWIndex).getImm(); 4869 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); 4870 RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8)); 4871 4872 MachineRegisterInfo &MRI = MF.getRegInfo(); 4873 4874 auto BuildVSETVLI = [&]() { 4875 if (VLIndex >= 0) { 4876 Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 4877 Register VLReg = MI.getOperand(VLIndex).getReg(); 4878 4879 // VL might be a compile time constant, but isel would have to put it 4880 // in a register. See if VL comes from an ADDI X0, imm. 4881 if (VLReg.isVirtual()) { 4882 MachineInstr *Def = MRI.getVRegDef(VLReg); 4883 if (Def && Def->getOpcode() == RISCV::ADDI && 4884 Def->getOperand(1).getReg() == RISCV::X0 && 4885 Def->getOperand(2).isImm()) { 4886 uint64_t Imm = Def->getOperand(2).getImm(); 4887 // VSETIVLI allows a 5-bit zero extended immediate. 4888 if (isUInt<5>(Imm)) 4889 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI)) 4890 .addReg(DestReg, RegState::Define | RegState::Dead) 4891 .addImm(Imm); 4892 } 4893 } 4894 4895 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) 4896 .addReg(DestReg, RegState::Define | RegState::Dead) 4897 .addReg(VLReg); 4898 } 4899 4900 // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0). 4901 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) 4902 .addReg(RISCV::X0, RegState::Define | RegState::Dead) 4903 .addReg(RISCV::X0, RegState::Kill); 4904 }; 4905 4906 MachineInstrBuilder MIB = BuildVSETVLI(); 4907 4908 // Default to tail agnostic unless the destination is tied to a source. In 4909 // that case the user would have some control over the tail values. The tail 4910 // policy is also ignored on instructions that only update element 0 like 4911 // vmv.s.x or reductions so use agnostic there to match the common case. 4912 // FIXME: This is conservatively correct, but we might want to detect that 4913 // the input is undefined. 4914 bool TailAgnostic = true; 4915 unsigned UseOpIdx; 4916 if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) { 4917 TailAgnostic = false; 4918 // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. 4919 const MachineOperand &UseMO = MI.getOperand(UseOpIdx); 4920 MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg()); 4921 if (UseMI) { 4922 UseMI = elideCopies(UseMI, MRI); 4923 if (UseMI && UseMI->isImplicitDef()) 4924 TailAgnostic = true; 4925 } 4926 } 4927 4928 // For simplicity we reuse the vtype representation here. 4929 MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth, 4930 /*TailAgnostic*/ TailAgnostic, 4931 /*MaskAgnostic*/ false)); 4932 4933 // Remove (now) redundant operands from pseudo 4934 if (VLIndex >= 0) { 4935 MI.getOperand(VLIndex).setReg(RISCV::NoRegister); 4936 MI.getOperand(VLIndex).setIsKill(false); 4937 } 4938 4939 return BB; 4940 } 4941 4942 MachineBasicBlock * 4943 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 4944 MachineBasicBlock *BB) const { 4945 uint64_t TSFlags = MI.getDesc().TSFlags; 4946 4947 if (TSFlags & RISCVII::HasSEWOpMask) { 4948 unsigned NumOperands = MI.getNumExplicitOperands(); 4949 int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1; 4950 unsigned SEWIndex = NumOperands - 1; 4951 bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask; 4952 4953 RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >> 4954 RISCVII::VLMulShift); 4955 return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic); 4956 } 4957 4958 switch (MI.getOpcode()) { 4959 default: 4960 llvm_unreachable("Unexpected instr type to insert"); 4961 case RISCV::ReadCycleWide: 4962 assert(!Subtarget.is64Bit() && 4963 "ReadCycleWrite is only to be used on riscv32"); 4964 return emitReadCycleWidePseudo(MI, BB); 4965 case RISCV::Select_GPR_Using_CC_GPR: 4966 case RISCV::Select_FPR16_Using_CC_GPR: 4967 case RISCV::Select_FPR32_Using_CC_GPR: 4968 case RISCV::Select_FPR64_Using_CC_GPR: 4969 return emitSelectPseudo(MI, BB); 4970 case RISCV::BuildPairF64Pseudo: 4971 return emitBuildPairF64Pseudo(MI, BB); 4972 case RISCV::SplitF64Pseudo: 4973 return emitSplitF64Pseudo(MI, BB); 4974 } 4975 } 4976 4977 // Calling Convention Implementation. 4978 // The expectations for frontend ABI lowering vary from target to target. 4979 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 4980 // details, but this is a longer term goal. For now, we simply try to keep the 4981 // role of the frontend as simple and well-defined as possible. The rules can 4982 // be summarised as: 4983 // * Never split up large scalar arguments. We handle them here. 4984 // * If a hardfloat calling convention is being used, and the struct may be 4985 // passed in a pair of registers (fp+fp, int+fp), and both registers are 4986 // available, then pass as two separate arguments. If either the GPRs or FPRs 4987 // are exhausted, then pass according to the rule below. 4988 // * If a struct could never be passed in registers or directly in a stack 4989 // slot (as it is larger than 2*XLEN and the floating point rules don't 4990 // apply), then pass it using a pointer with the byval attribute. 4991 // * If a struct is less than 2*XLEN, then coerce to either a two-element 4992 // word-sized array or a 2*XLEN scalar (depending on alignment). 4993 // * The frontend can determine whether a struct is returned by reference or 4994 // not based on its size and fields. If it will be returned by reference, the 4995 // frontend must modify the prototype so a pointer with the sret annotation is 4996 // passed as the first argument. This is not necessary for large scalar 4997 // returns. 4998 // * Struct return values and varargs should be coerced to structs containing 4999 // register-size fields in the same situations they would be for fixed 5000 // arguments. 5001 5002 static const MCPhysReg ArgGPRs[] = { 5003 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 5004 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 5005 }; 5006 static const MCPhysReg ArgFPR16s[] = { 5007 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 5008 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 5009 }; 5010 static const MCPhysReg ArgFPR32s[] = { 5011 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 5012 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 5013 }; 5014 static const MCPhysReg ArgFPR64s[] = { 5015 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 5016 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 5017 }; 5018 // This is an interim calling convention and it may be changed in the future. 5019 static const MCPhysReg ArgVRs[] = { 5020 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 5021 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 5022 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 5023 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 5024 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 5025 RISCV::V20M2, RISCV::V22M2}; 5026 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 5027 RISCV::V20M4}; 5028 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 5029 5030 // Pass a 2*XLEN argument that has been split into two XLEN values through 5031 // registers or the stack as necessary. 5032 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 5033 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 5034 MVT ValVT2, MVT LocVT2, 5035 ISD::ArgFlagsTy ArgFlags2) { 5036 unsigned XLenInBytes = XLen / 8; 5037 if (Register Reg = State.AllocateReg(ArgGPRs)) { 5038 // At least one half can be passed via register. 5039 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 5040 VA1.getLocVT(), CCValAssign::Full)); 5041 } else { 5042 // Both halves must be passed on the stack, with proper alignment. 5043 Align StackAlign = 5044 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 5045 State.addLoc( 5046 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 5047 State.AllocateStack(XLenInBytes, StackAlign), 5048 VA1.getLocVT(), CCValAssign::Full)); 5049 State.addLoc(CCValAssign::getMem( 5050 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 5051 LocVT2, CCValAssign::Full)); 5052 return false; 5053 } 5054 5055 if (Register Reg = State.AllocateReg(ArgGPRs)) { 5056 // The second half can also be passed via register. 5057 State.addLoc( 5058 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 5059 } else { 5060 // The second half is passed via the stack, without additional alignment. 5061 State.addLoc(CCValAssign::getMem( 5062 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 5063 LocVT2, CCValAssign::Full)); 5064 } 5065 5066 return false; 5067 } 5068 5069 // Implements the RISC-V calling convention. Returns true upon failure. 5070 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 5071 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 5072 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 5073 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, 5074 Optional<unsigned> FirstMaskArgument) { 5075 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 5076 assert(XLen == 32 || XLen == 64); 5077 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 5078 5079 // Any return value split in to more than two values can't be returned 5080 // directly. Vectors are returned via the available vector registers. 5081 if (!LocVT.isVector() && IsRet && ValNo > 1) 5082 return true; 5083 5084 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 5085 // variadic argument, or if no F16/F32 argument registers are available. 5086 bool UseGPRForF16_F32 = true; 5087 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 5088 // variadic argument, or if no F64 argument registers are available. 5089 bool UseGPRForF64 = true; 5090 5091 switch (ABI) { 5092 default: 5093 llvm_unreachable("Unexpected ABI"); 5094 case RISCVABI::ABI_ILP32: 5095 case RISCVABI::ABI_LP64: 5096 break; 5097 case RISCVABI::ABI_ILP32F: 5098 case RISCVABI::ABI_LP64F: 5099 UseGPRForF16_F32 = !IsFixed; 5100 break; 5101 case RISCVABI::ABI_ILP32D: 5102 case RISCVABI::ABI_LP64D: 5103 UseGPRForF16_F32 = !IsFixed; 5104 UseGPRForF64 = !IsFixed; 5105 break; 5106 } 5107 5108 // FPR16, FPR32, and FPR64 alias each other. 5109 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 5110 UseGPRForF16_F32 = true; 5111 UseGPRForF64 = true; 5112 } 5113 5114 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 5115 // similar local variables rather than directly checking against the target 5116 // ABI. 5117 5118 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 5119 LocVT = XLenVT; 5120 LocInfo = CCValAssign::BCvt; 5121 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 5122 LocVT = MVT::i64; 5123 LocInfo = CCValAssign::BCvt; 5124 } 5125 5126 // If this is a variadic argument, the RISC-V calling convention requires 5127 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 5128 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 5129 // be used regardless of whether the original argument was split during 5130 // legalisation or not. The argument will not be passed by registers if the 5131 // original type is larger than 2*XLEN, so the register alignment rule does 5132 // not apply. 5133 unsigned TwoXLenInBytes = (2 * XLen) / 8; 5134 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 5135 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 5136 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 5137 // Skip 'odd' register if necessary. 5138 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 5139 State.AllocateReg(ArgGPRs); 5140 } 5141 5142 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 5143 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 5144 State.getPendingArgFlags(); 5145 5146 assert(PendingLocs.size() == PendingArgFlags.size() && 5147 "PendingLocs and PendingArgFlags out of sync"); 5148 5149 // Handle passing f64 on RV32D with a soft float ABI or when floating point 5150 // registers are exhausted. 5151 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 5152 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 5153 "Can't lower f64 if it is split"); 5154 // Depending on available argument GPRS, f64 may be passed in a pair of 5155 // GPRs, split between a GPR and the stack, or passed completely on the 5156 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 5157 // cases. 5158 Register Reg = State.AllocateReg(ArgGPRs); 5159 LocVT = MVT::i32; 5160 if (!Reg) { 5161 unsigned StackOffset = State.AllocateStack(8, Align(8)); 5162 State.addLoc( 5163 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 5164 return false; 5165 } 5166 if (!State.AllocateReg(ArgGPRs)) 5167 State.AllocateStack(4, Align(4)); 5168 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5169 return false; 5170 } 5171 5172 // Fixed-length vectors are located in the corresponding scalable-vector 5173 // container types. 5174 if (ValVT.isFixedLengthVector()) 5175 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 5176 5177 // Split arguments might be passed indirectly, so keep track of the pending 5178 // values. Split vectors are passed via a mix of registers and indirectly, so 5179 // treat them as we would any other argument. 5180 if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 5181 LocVT = XLenVT; 5182 LocInfo = CCValAssign::Indirect; 5183 PendingLocs.push_back( 5184 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 5185 PendingArgFlags.push_back(ArgFlags); 5186 if (!ArgFlags.isSplitEnd()) { 5187 return false; 5188 } 5189 } 5190 5191 // If the split argument only had two elements, it should be passed directly 5192 // in registers or on the stack. 5193 if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 5194 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 5195 // Apply the normal calling convention rules to the first half of the 5196 // split argument. 5197 CCValAssign VA = PendingLocs[0]; 5198 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 5199 PendingLocs.clear(); 5200 PendingArgFlags.clear(); 5201 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 5202 ArgFlags); 5203 } 5204 5205 // Allocate to a register if possible, or else a stack slot. 5206 Register Reg; 5207 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 5208 Reg = State.AllocateReg(ArgFPR16s); 5209 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 5210 Reg = State.AllocateReg(ArgFPR32s); 5211 else if (ValVT == MVT::f64 && !UseGPRForF64) 5212 Reg = State.AllocateReg(ArgFPR64s); 5213 else if (ValVT.isVector()) { 5214 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 5215 if (RC == &RISCV::VRRegClass) { 5216 // Assign the first mask argument to V0. 5217 // This is an interim calling convention and it may be changed in the 5218 // future. 5219 if (FirstMaskArgument.hasValue() && 5220 ValNo == FirstMaskArgument.getValue()) { 5221 Reg = State.AllocateReg(RISCV::V0); 5222 } else { 5223 Reg = State.AllocateReg(ArgVRs); 5224 } 5225 } else if (RC == &RISCV::VRM2RegClass) { 5226 Reg = State.AllocateReg(ArgVRM2s); 5227 } else if (RC == &RISCV::VRM4RegClass) { 5228 Reg = State.AllocateReg(ArgVRM4s); 5229 } else if (RC == &RISCV::VRM8RegClass) { 5230 Reg = State.AllocateReg(ArgVRM8s); 5231 } else { 5232 llvm_unreachable("Unhandled class register for ValueType"); 5233 } 5234 if (!Reg) { 5235 // For return values, the vector must be passed fully via registers or 5236 // via the stack. 5237 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 5238 // but we're using all of them. 5239 if (IsRet) 5240 return true; 5241 LocInfo = CCValAssign::Indirect; 5242 // Try using a GPR to pass the address 5243 Reg = State.AllocateReg(ArgGPRs); 5244 LocVT = XLenVT; 5245 } 5246 } else 5247 Reg = State.AllocateReg(ArgGPRs); 5248 unsigned StackOffset = 5249 Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); 5250 5251 // If we reach this point and PendingLocs is non-empty, we must be at the 5252 // end of a split argument that must be passed indirectly. 5253 if (!PendingLocs.empty()) { 5254 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 5255 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 5256 5257 for (auto &It : PendingLocs) { 5258 if (Reg) 5259 It.convertToReg(Reg); 5260 else 5261 It.convertToMem(StackOffset); 5262 State.addLoc(It); 5263 } 5264 PendingLocs.clear(); 5265 PendingArgFlags.clear(); 5266 return false; 5267 } 5268 5269 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 5270 (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && 5271 "Expected an XLenVT or vector types at this stage"); 5272 5273 if (Reg) { 5274 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5275 return false; 5276 } 5277 5278 // When a floating-point value is passed on the stack, no bit-conversion is 5279 // needed. 5280 if (ValVT.isFloatingPoint()) { 5281 LocVT = ValVT; 5282 LocInfo = CCValAssign::Full; 5283 } 5284 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 5285 return false; 5286 } 5287 5288 template <typename ArgTy> 5289 static Optional<unsigned> preAssignMask(const ArgTy &Args) { 5290 for (const auto &ArgIdx : enumerate(Args)) { 5291 MVT ArgVT = ArgIdx.value().VT; 5292 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) 5293 return ArgIdx.index(); 5294 } 5295 return None; 5296 } 5297 5298 void RISCVTargetLowering::analyzeInputArgs( 5299 MachineFunction &MF, CCState &CCInfo, 5300 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 5301 unsigned NumArgs = Ins.size(); 5302 FunctionType *FType = MF.getFunction().getFunctionType(); 5303 5304 Optional<unsigned> FirstMaskArgument; 5305 if (Subtarget.hasStdExtV()) 5306 FirstMaskArgument = preAssignMask(Ins); 5307 5308 for (unsigned i = 0; i != NumArgs; ++i) { 5309 MVT ArgVT = Ins[i].VT; 5310 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 5311 5312 Type *ArgTy = nullptr; 5313 if (IsRet) 5314 ArgTy = FType->getReturnType(); 5315 else if (Ins[i].isOrigArg()) 5316 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 5317 5318 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 5319 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 5320 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, 5321 FirstMaskArgument)) { 5322 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 5323 << EVT(ArgVT).getEVTString() << '\n'); 5324 llvm_unreachable(nullptr); 5325 } 5326 } 5327 } 5328 5329 void RISCVTargetLowering::analyzeOutputArgs( 5330 MachineFunction &MF, CCState &CCInfo, 5331 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 5332 CallLoweringInfo *CLI) const { 5333 unsigned NumArgs = Outs.size(); 5334 5335 Optional<unsigned> FirstMaskArgument; 5336 if (Subtarget.hasStdExtV()) 5337 FirstMaskArgument = preAssignMask(Outs); 5338 5339 for (unsigned i = 0; i != NumArgs; i++) { 5340 MVT ArgVT = Outs[i].VT; 5341 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5342 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 5343 5344 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 5345 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 5346 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, 5347 FirstMaskArgument)) { 5348 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 5349 << EVT(ArgVT).getEVTString() << "\n"); 5350 llvm_unreachable(nullptr); 5351 } 5352 } 5353 } 5354 5355 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 5356 // values. 5357 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 5358 const CCValAssign &VA, const SDLoc &DL, 5359 const RISCVSubtarget &Subtarget) { 5360 switch (VA.getLocInfo()) { 5361 default: 5362 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 5363 case CCValAssign::Full: 5364 if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector()) 5365 Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget); 5366 break; 5367 case CCValAssign::BCvt: 5368 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 5369 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 5370 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 5371 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 5372 else 5373 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 5374 break; 5375 } 5376 return Val; 5377 } 5378 5379 // The caller is responsible for loading the full value if the argument is 5380 // passed with CCValAssign::Indirect. 5381 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 5382 const CCValAssign &VA, const SDLoc &DL, 5383 const RISCVTargetLowering &TLI) { 5384 MachineFunction &MF = DAG.getMachineFunction(); 5385 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5386 EVT LocVT = VA.getLocVT(); 5387 SDValue Val; 5388 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); 5389 Register VReg = RegInfo.createVirtualRegister(RC); 5390 RegInfo.addLiveIn(VA.getLocReg(), VReg); 5391 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 5392 5393 if (VA.getLocInfo() == CCValAssign::Indirect) 5394 return Val; 5395 5396 return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget()); 5397 } 5398 5399 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 5400 const CCValAssign &VA, const SDLoc &DL, 5401 const RISCVSubtarget &Subtarget) { 5402 EVT LocVT = VA.getLocVT(); 5403 5404 switch (VA.getLocInfo()) { 5405 default: 5406 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 5407 case CCValAssign::Full: 5408 if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector()) 5409 Val = convertToScalableVector(LocVT, Val, DAG, Subtarget); 5410 break; 5411 case CCValAssign::BCvt: 5412 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 5413 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 5414 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 5415 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 5416 else 5417 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 5418 break; 5419 } 5420 return Val; 5421 } 5422 5423 // The caller is responsible for loading the full value if the argument is 5424 // passed with CCValAssign::Indirect. 5425 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 5426 const CCValAssign &VA, const SDLoc &DL) { 5427 MachineFunction &MF = DAG.getMachineFunction(); 5428 MachineFrameInfo &MFI = MF.getFrameInfo(); 5429 EVT LocVT = VA.getLocVT(); 5430 EVT ValVT = VA.getValVT(); 5431 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 5432 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 5433 VA.getLocMemOffset(), /*Immutable=*/true); 5434 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 5435 SDValue Val; 5436 5437 ISD::LoadExtType ExtType; 5438 switch (VA.getLocInfo()) { 5439 default: 5440 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 5441 case CCValAssign::Full: 5442 case CCValAssign::Indirect: 5443 case CCValAssign::BCvt: 5444 ExtType = ISD::NON_EXTLOAD; 5445 break; 5446 } 5447 Val = DAG.getExtLoad( 5448 ExtType, DL, LocVT, Chain, FIN, 5449 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 5450 return Val; 5451 } 5452 5453 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 5454 const CCValAssign &VA, const SDLoc &DL) { 5455 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 5456 "Unexpected VA"); 5457 MachineFunction &MF = DAG.getMachineFunction(); 5458 MachineFrameInfo &MFI = MF.getFrameInfo(); 5459 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5460 5461 if (VA.isMemLoc()) { 5462 // f64 is passed on the stack. 5463 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 5464 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 5465 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 5466 MachinePointerInfo::getFixedStack(MF, FI)); 5467 } 5468 5469 assert(VA.isRegLoc() && "Expected register VA assignment"); 5470 5471 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 5472 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 5473 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 5474 SDValue Hi; 5475 if (VA.getLocReg() == RISCV::X17) { 5476 // Second half of f64 is passed on the stack. 5477 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 5478 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 5479 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 5480 MachinePointerInfo::getFixedStack(MF, FI)); 5481 } else { 5482 // Second half of f64 is passed in another GPR. 5483 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 5484 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 5485 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 5486 } 5487 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 5488 } 5489 5490 // FastCC has less than 1% performance improvement for some particular 5491 // benchmark. But theoretically, it may has benenfit for some cases. 5492 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 5493 CCValAssign::LocInfo LocInfo, 5494 ISD::ArgFlagsTy ArgFlags, CCState &State) { 5495 5496 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 5497 // X5 and X6 might be used for save-restore libcall. 5498 static const MCPhysReg GPRList[] = { 5499 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 5500 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 5501 RISCV::X29, RISCV::X30, RISCV::X31}; 5502 if (unsigned Reg = State.AllocateReg(GPRList)) { 5503 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5504 return false; 5505 } 5506 } 5507 5508 if (LocVT == MVT::f16) { 5509 static const MCPhysReg FPR16List[] = { 5510 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 5511 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 5512 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 5513 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 5514 if (unsigned Reg = State.AllocateReg(FPR16List)) { 5515 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5516 return false; 5517 } 5518 } 5519 5520 if (LocVT == MVT::f32) { 5521 static const MCPhysReg FPR32List[] = { 5522 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 5523 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 5524 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 5525 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 5526 if (unsigned Reg = State.AllocateReg(FPR32List)) { 5527 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5528 return false; 5529 } 5530 } 5531 5532 if (LocVT == MVT::f64) { 5533 static const MCPhysReg FPR64List[] = { 5534 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 5535 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 5536 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 5537 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 5538 if (unsigned Reg = State.AllocateReg(FPR64List)) { 5539 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5540 return false; 5541 } 5542 } 5543 5544 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 5545 unsigned Offset4 = State.AllocateStack(4, Align(4)); 5546 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 5547 return false; 5548 } 5549 5550 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 5551 unsigned Offset5 = State.AllocateStack(8, Align(8)); 5552 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 5553 return false; 5554 } 5555 5556 return true; // CC didn't match. 5557 } 5558 5559 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 5560 CCValAssign::LocInfo LocInfo, 5561 ISD::ArgFlagsTy ArgFlags, CCState &State) { 5562 5563 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 5564 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 5565 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 5566 static const MCPhysReg GPRList[] = { 5567 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 5568 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 5569 if (unsigned Reg = State.AllocateReg(GPRList)) { 5570 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5571 return false; 5572 } 5573 } 5574 5575 if (LocVT == MVT::f32) { 5576 // Pass in STG registers: F1, ..., F6 5577 // fs0 ... fs5 5578 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 5579 RISCV::F18_F, RISCV::F19_F, 5580 RISCV::F20_F, RISCV::F21_F}; 5581 if (unsigned Reg = State.AllocateReg(FPR32List)) { 5582 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5583 return false; 5584 } 5585 } 5586 5587 if (LocVT == MVT::f64) { 5588 // Pass in STG registers: D1, ..., D6 5589 // fs6 ... fs11 5590 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 5591 RISCV::F24_D, RISCV::F25_D, 5592 RISCV::F26_D, RISCV::F27_D}; 5593 if (unsigned Reg = State.AllocateReg(FPR64List)) { 5594 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5595 return false; 5596 } 5597 } 5598 5599 report_fatal_error("No registers left in GHC calling convention"); 5600 return true; 5601 } 5602 5603 // Transform physical registers into virtual registers. 5604 SDValue RISCVTargetLowering::LowerFormalArguments( 5605 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 5606 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 5607 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5608 5609 MachineFunction &MF = DAG.getMachineFunction(); 5610 5611 switch (CallConv) { 5612 default: 5613 report_fatal_error("Unsupported calling convention"); 5614 case CallingConv::C: 5615 case CallingConv::Fast: 5616 break; 5617 case CallingConv::GHC: 5618 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 5619 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 5620 report_fatal_error( 5621 "GHC calling convention requires the F and D instruction set extensions"); 5622 } 5623 5624 const Function &Func = MF.getFunction(); 5625 if (Func.hasFnAttribute("interrupt")) { 5626 if (!Func.arg_empty()) 5627 report_fatal_error( 5628 "Functions with the interrupt attribute cannot have arguments!"); 5629 5630 StringRef Kind = 5631 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 5632 5633 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 5634 report_fatal_error( 5635 "Function interrupt attribute argument not supported!"); 5636 } 5637 5638 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5639 MVT XLenVT = Subtarget.getXLenVT(); 5640 unsigned XLenInBytes = Subtarget.getXLen() / 8; 5641 // Used with vargs to acumulate store chains. 5642 std::vector<SDValue> OutChains; 5643 5644 // Assign locations to all of the incoming arguments. 5645 SmallVector<CCValAssign, 16> ArgLocs; 5646 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5647 5648 if (CallConv == CallingConv::Fast) 5649 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 5650 else if (CallConv == CallingConv::GHC) 5651 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 5652 else 5653 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 5654 5655 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 5656 CCValAssign &VA = ArgLocs[i]; 5657 SDValue ArgValue; 5658 // Passing f64 on RV32D with a soft float ABI must be handled as a special 5659 // case. 5660 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 5661 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 5662 else if (VA.isRegLoc()) 5663 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); 5664 else 5665 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 5666 5667 if (VA.getLocInfo() == CCValAssign::Indirect) { 5668 // If the original argument was split and passed by reference (e.g. i128 5669 // on RV32), we need to load all parts of it here (using the same 5670 // address). Vectors may be partly split to registers and partly to the 5671 // stack, in which case the base address is partly offset and subsequent 5672 // stores are relative to that. 5673 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 5674 MachinePointerInfo())); 5675 unsigned ArgIndex = Ins[i].OrigArgIndex; 5676 unsigned ArgPartOffset = Ins[i].PartOffset; 5677 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 5678 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 5679 CCValAssign &PartVA = ArgLocs[i + 1]; 5680 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset; 5681 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 5682 DAG.getIntPtrConstant(PartOffset, DL)); 5683 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 5684 MachinePointerInfo())); 5685 ++i; 5686 } 5687 continue; 5688 } 5689 InVals.push_back(ArgValue); 5690 } 5691 5692 if (IsVarArg) { 5693 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 5694 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 5695 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 5696 MachineFrameInfo &MFI = MF.getFrameInfo(); 5697 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5698 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 5699 5700 // Offset of the first variable argument from stack pointer, and size of 5701 // the vararg save area. For now, the varargs save area is either zero or 5702 // large enough to hold a0-a7. 5703 int VaArgOffset, VarArgsSaveSize; 5704 5705 // If all registers are allocated, then all varargs must be passed on the 5706 // stack and we don't need to save any argregs. 5707 if (ArgRegs.size() == Idx) { 5708 VaArgOffset = CCInfo.getNextStackOffset(); 5709 VarArgsSaveSize = 0; 5710 } else { 5711 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 5712 VaArgOffset = -VarArgsSaveSize; 5713 } 5714 5715 // Record the frame index of the first variable argument 5716 // which is a value necessary to VASTART. 5717 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 5718 RVFI->setVarArgsFrameIndex(FI); 5719 5720 // If saving an odd number of registers then create an extra stack slot to 5721 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 5722 // offsets to even-numbered registered remain 2*XLEN-aligned. 5723 if (Idx % 2) { 5724 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 5725 VarArgsSaveSize += XLenInBytes; 5726 } 5727 5728 // Copy the integer registers that may have been used for passing varargs 5729 // to the vararg save area. 5730 for (unsigned I = Idx; I < ArgRegs.size(); 5731 ++I, VaArgOffset += XLenInBytes) { 5732 const Register Reg = RegInfo.createVirtualRegister(RC); 5733 RegInfo.addLiveIn(ArgRegs[I], Reg); 5734 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 5735 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 5736 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 5737 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 5738 MachinePointerInfo::getFixedStack(MF, FI)); 5739 cast<StoreSDNode>(Store.getNode()) 5740 ->getMemOperand() 5741 ->setValue((Value *)nullptr); 5742 OutChains.push_back(Store); 5743 } 5744 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 5745 } 5746 5747 // All stores are grouped in one node to allow the matching between 5748 // the size of Ins and InVals. This only happens for vararg functions. 5749 if (!OutChains.empty()) { 5750 OutChains.push_back(Chain); 5751 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 5752 } 5753 5754 return Chain; 5755 } 5756 5757 /// isEligibleForTailCallOptimization - Check whether the call is eligible 5758 /// for tail call optimization. 5759 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 5760 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 5761 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 5762 const SmallVector<CCValAssign, 16> &ArgLocs) const { 5763 5764 auto &Callee = CLI.Callee; 5765 auto CalleeCC = CLI.CallConv; 5766 auto &Outs = CLI.Outs; 5767 auto &Caller = MF.getFunction(); 5768 auto CallerCC = Caller.getCallingConv(); 5769 5770 // Exception-handling functions need a special set of instructions to 5771 // indicate a return to the hardware. Tail-calling another function would 5772 // probably break this. 5773 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 5774 // should be expanded as new function attributes are introduced. 5775 if (Caller.hasFnAttribute("interrupt")) 5776 return false; 5777 5778 // Do not tail call opt if the stack is used to pass parameters. 5779 if (CCInfo.getNextStackOffset() != 0) 5780 return false; 5781 5782 // Do not tail call opt if any parameters need to be passed indirectly. 5783 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 5784 // passed indirectly. So the address of the value will be passed in a 5785 // register, or if not available, then the address is put on the stack. In 5786 // order to pass indirectly, space on the stack often needs to be allocated 5787 // in order to store the value. In this case the CCInfo.getNextStackOffset() 5788 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 5789 // are passed CCValAssign::Indirect. 5790 for (auto &VA : ArgLocs) 5791 if (VA.getLocInfo() == CCValAssign::Indirect) 5792 return false; 5793 5794 // Do not tail call opt if either caller or callee uses struct return 5795 // semantics. 5796 auto IsCallerStructRet = Caller.hasStructRetAttr(); 5797 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 5798 if (IsCallerStructRet || IsCalleeStructRet) 5799 return false; 5800 5801 // Externally-defined functions with weak linkage should not be 5802 // tail-called. The behaviour of branch instructions in this situation (as 5803 // used for tail calls) is implementation-defined, so we cannot rely on the 5804 // linker replacing the tail call with a return. 5805 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 5806 const GlobalValue *GV = G->getGlobal(); 5807 if (GV->hasExternalWeakLinkage()) 5808 return false; 5809 } 5810 5811 // The callee has to preserve all registers the caller needs to preserve. 5812 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5813 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 5814 if (CalleeCC != CallerCC) { 5815 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 5816 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 5817 return false; 5818 } 5819 5820 // Byval parameters hand the function a pointer directly into the stack area 5821 // we want to reuse during a tail call. Working around this *is* possible 5822 // but less efficient and uglier in LowerCall. 5823 for (auto &Arg : Outs) 5824 if (Arg.Flags.isByVal()) 5825 return false; 5826 5827 return true; 5828 } 5829 5830 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 5831 // and output parameter nodes. 5832 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 5833 SmallVectorImpl<SDValue> &InVals) const { 5834 SelectionDAG &DAG = CLI.DAG; 5835 SDLoc &DL = CLI.DL; 5836 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5837 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5838 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5839 SDValue Chain = CLI.Chain; 5840 SDValue Callee = CLI.Callee; 5841 bool &IsTailCall = CLI.IsTailCall; 5842 CallingConv::ID CallConv = CLI.CallConv; 5843 bool IsVarArg = CLI.IsVarArg; 5844 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5845 MVT XLenVT = Subtarget.getXLenVT(); 5846 5847 MachineFunction &MF = DAG.getMachineFunction(); 5848 5849 // Analyze the operands of the call, assigning locations to each operand. 5850 SmallVector<CCValAssign, 16> ArgLocs; 5851 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5852 5853 if (CallConv == CallingConv::Fast) 5854 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 5855 else if (CallConv == CallingConv::GHC) 5856 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 5857 else 5858 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 5859 5860 // Check if it's really possible to do a tail call. 5861 if (IsTailCall) 5862 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 5863 5864 if (IsTailCall) 5865 ++NumTailCalls; 5866 else if (CLI.CB && CLI.CB->isMustTailCall()) 5867 report_fatal_error("failed to perform tail call elimination on a call " 5868 "site marked musttail"); 5869 5870 // Get a count of how many bytes are to be pushed on the stack. 5871 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 5872 5873 // Create local copies for byval args 5874 SmallVector<SDValue, 8> ByValArgs; 5875 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 5876 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5877 if (!Flags.isByVal()) 5878 continue; 5879 5880 SDValue Arg = OutVals[i]; 5881 unsigned Size = Flags.getByValSize(); 5882 Align Alignment = Flags.getNonZeroByValAlign(); 5883 5884 int FI = 5885 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 5886 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 5887 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 5888 5889 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 5890 /*IsVolatile=*/false, 5891 /*AlwaysInline=*/false, IsTailCall, 5892 MachinePointerInfo(), MachinePointerInfo()); 5893 ByValArgs.push_back(FIPtr); 5894 } 5895 5896 if (!IsTailCall) 5897 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 5898 5899 // Copy argument values to their designated locations. 5900 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 5901 SmallVector<SDValue, 8> MemOpChains; 5902 SDValue StackPtr; 5903 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 5904 CCValAssign &VA = ArgLocs[i]; 5905 SDValue ArgValue = OutVals[i]; 5906 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5907 5908 // Handle passing f64 on RV32D with a soft float ABI as a special case. 5909 bool IsF64OnRV32DSoftABI = 5910 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 5911 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 5912 SDValue SplitF64 = DAG.getNode( 5913 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 5914 SDValue Lo = SplitF64.getValue(0); 5915 SDValue Hi = SplitF64.getValue(1); 5916 5917 Register RegLo = VA.getLocReg(); 5918 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 5919 5920 if (RegLo == RISCV::X17) { 5921 // Second half of f64 is passed on the stack. 5922 // Work out the address of the stack slot. 5923 if (!StackPtr.getNode()) 5924 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 5925 // Emit the store. 5926 MemOpChains.push_back( 5927 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 5928 } else { 5929 // Second half of f64 is passed in another GPR. 5930 assert(RegLo < RISCV::X31 && "Invalid register pair"); 5931 Register RegHigh = RegLo + 1; 5932 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 5933 } 5934 continue; 5935 } 5936 5937 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 5938 // as any other MemLoc. 5939 5940 // Promote the value if needed. 5941 // For now, only handle fully promoted and indirect arguments. 5942 if (VA.getLocInfo() == CCValAssign::Indirect) { 5943 // Store the argument in a stack slot and pass its address. 5944 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 5945 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 5946 MemOpChains.push_back( 5947 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 5948 MachinePointerInfo::getFixedStack(MF, FI))); 5949 // If the original argument was split (e.g. i128), we need 5950 // to store the required parts of it here (and pass just one address). 5951 // Vectors may be partly split to registers and partly to the stack, in 5952 // which case the base address is partly offset and subsequent stores are 5953 // relative to that. 5954 unsigned ArgIndex = Outs[i].OrigArgIndex; 5955 unsigned ArgPartOffset = Outs[i].PartOffset; 5956 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 5957 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 5958 SDValue PartValue = OutVals[i + 1]; 5959 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset; 5960 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 5961 DAG.getIntPtrConstant(PartOffset, DL)); 5962 MemOpChains.push_back( 5963 DAG.getStore(Chain, DL, PartValue, Address, 5964 MachinePointerInfo::getFixedStack(MF, FI))); 5965 ++i; 5966 } 5967 ArgValue = SpillSlot; 5968 } else { 5969 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget); 5970 } 5971 5972 // Use local copy if it is a byval arg. 5973 if (Flags.isByVal()) 5974 ArgValue = ByValArgs[j++]; 5975 5976 if (VA.isRegLoc()) { 5977 // Queue up the argument copies and emit them at the end. 5978 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 5979 } else { 5980 assert(VA.isMemLoc() && "Argument not register or memory"); 5981 assert(!IsTailCall && "Tail call not allowed if stack is used " 5982 "for passing parameters"); 5983 5984 // Work out the address of the stack slot. 5985 if (!StackPtr.getNode()) 5986 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 5987 SDValue Address = 5988 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 5989 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 5990 5991 // Emit the store. 5992 MemOpChains.push_back( 5993 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 5994 } 5995 } 5996 5997 // Join the stores, which are independent of one another. 5998 if (!MemOpChains.empty()) 5999 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 6000 6001 SDValue Glue; 6002 6003 // Build a sequence of copy-to-reg nodes, chained and glued together. 6004 for (auto &Reg : RegsToPass) { 6005 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 6006 Glue = Chain.getValue(1); 6007 } 6008 6009 // Validate that none of the argument registers have been marked as 6010 // reserved, if so report an error. Do the same for the return address if this 6011 // is not a tailcall. 6012 validateCCReservedRegs(RegsToPass, MF); 6013 if (!IsTailCall && 6014 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 6015 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 6016 MF.getFunction(), 6017 "Return address register required, but has been reserved."}); 6018 6019 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 6020 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 6021 // split it and then direct call can be matched by PseudoCALL. 6022 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 6023 const GlobalValue *GV = S->getGlobal(); 6024 6025 unsigned OpFlags = RISCVII::MO_CALL; 6026 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 6027 OpFlags = RISCVII::MO_PLT; 6028 6029 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 6030 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 6031 unsigned OpFlags = RISCVII::MO_CALL; 6032 6033 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 6034 nullptr)) 6035 OpFlags = RISCVII::MO_PLT; 6036 6037 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 6038 } 6039 6040 // The first call operand is the chain and the second is the target address. 6041 SmallVector<SDValue, 8> Ops; 6042 Ops.push_back(Chain); 6043 Ops.push_back(Callee); 6044 6045 // Add argument registers to the end of the list so that they are 6046 // known live into the call. 6047 for (auto &Reg : RegsToPass) 6048 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 6049 6050 if (!IsTailCall) { 6051 // Add a register mask operand representing the call-preserved registers. 6052 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6053 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 6054 assert(Mask && "Missing call preserved mask for calling convention"); 6055 Ops.push_back(DAG.getRegisterMask(Mask)); 6056 } 6057 6058 // Glue the call to the argument copies, if any. 6059 if (Glue.getNode()) 6060 Ops.push_back(Glue); 6061 6062 // Emit the call. 6063 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6064 6065 if (IsTailCall) { 6066 MF.getFrameInfo().setHasTailCall(); 6067 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 6068 } 6069 6070 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 6071 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 6072 Glue = Chain.getValue(1); 6073 6074 // Mark the end of the call, which is glued to the call itself. 6075 Chain = DAG.getCALLSEQ_END(Chain, 6076 DAG.getConstant(NumBytes, DL, PtrVT, true), 6077 DAG.getConstant(0, DL, PtrVT, true), 6078 Glue, DL); 6079 Glue = Chain.getValue(1); 6080 6081 // Assign locations to each value returned by this call. 6082 SmallVector<CCValAssign, 16> RVLocs; 6083 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 6084 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 6085 6086 // Copy all of the result registers out of their specified physreg. 6087 for (auto &VA : RVLocs) { 6088 // Copy the value out 6089 SDValue RetValue = 6090 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 6091 // Glue the RetValue to the end of the call sequence 6092 Chain = RetValue.getValue(1); 6093 Glue = RetValue.getValue(2); 6094 6095 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 6096 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 6097 SDValue RetValue2 = 6098 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 6099 Chain = RetValue2.getValue(1); 6100 Glue = RetValue2.getValue(2); 6101 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 6102 RetValue2); 6103 } 6104 6105 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget); 6106 6107 InVals.push_back(RetValue); 6108 } 6109 6110 return Chain; 6111 } 6112 6113 bool RISCVTargetLowering::CanLowerReturn( 6114 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 6115 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 6116 SmallVector<CCValAssign, 16> RVLocs; 6117 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 6118 6119 Optional<unsigned> FirstMaskArgument; 6120 if (Subtarget.hasStdExtV()) 6121 FirstMaskArgument = preAssignMask(Outs); 6122 6123 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 6124 MVT VT = Outs[i].VT; 6125 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 6126 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 6127 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 6128 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, 6129 *this, FirstMaskArgument)) 6130 return false; 6131 } 6132 return true; 6133 } 6134 6135 SDValue 6136 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6137 bool IsVarArg, 6138 const SmallVectorImpl<ISD::OutputArg> &Outs, 6139 const SmallVectorImpl<SDValue> &OutVals, 6140 const SDLoc &DL, SelectionDAG &DAG) const { 6141 const MachineFunction &MF = DAG.getMachineFunction(); 6142 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 6143 6144 // Stores the assignment of the return value to a location. 6145 SmallVector<CCValAssign, 16> RVLocs; 6146 6147 // Info about the registers and stack slot. 6148 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 6149 *DAG.getContext()); 6150 6151 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 6152 nullptr); 6153 6154 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 6155 report_fatal_error("GHC functions return void only"); 6156 6157 SDValue Glue; 6158 SmallVector<SDValue, 4> RetOps(1, Chain); 6159 6160 // Copy the result values into the output registers. 6161 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 6162 SDValue Val = OutVals[i]; 6163 CCValAssign &VA = RVLocs[i]; 6164 assert(VA.isRegLoc() && "Can only return in registers!"); 6165 6166 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 6167 // Handle returning f64 on RV32D with a soft float ABI. 6168 assert(VA.isRegLoc() && "Expected return via registers"); 6169 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 6170 DAG.getVTList(MVT::i32, MVT::i32), Val); 6171 SDValue Lo = SplitF64.getValue(0); 6172 SDValue Hi = SplitF64.getValue(1); 6173 Register RegLo = VA.getLocReg(); 6174 assert(RegLo < RISCV::X31 && "Invalid register pair"); 6175 Register RegHi = RegLo + 1; 6176 6177 if (STI.isRegisterReservedByUser(RegLo) || 6178 STI.isRegisterReservedByUser(RegHi)) 6179 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 6180 MF.getFunction(), 6181 "Return value register required, but has been reserved."}); 6182 6183 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 6184 Glue = Chain.getValue(1); 6185 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 6186 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 6187 Glue = Chain.getValue(1); 6188 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 6189 } else { 6190 // Handle a 'normal' return. 6191 Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget); 6192 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 6193 6194 if (STI.isRegisterReservedByUser(VA.getLocReg())) 6195 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 6196 MF.getFunction(), 6197 "Return value register required, but has been reserved."}); 6198 6199 // Guarantee that all emitted copies are stuck together. 6200 Glue = Chain.getValue(1); 6201 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 6202 } 6203 } 6204 6205 RetOps[0] = Chain; // Update chain. 6206 6207 // Add the glue node if we have it. 6208 if (Glue.getNode()) { 6209 RetOps.push_back(Glue); 6210 } 6211 6212 // Interrupt service routines use different return instructions. 6213 const Function &Func = DAG.getMachineFunction().getFunction(); 6214 if (Func.hasFnAttribute("interrupt")) { 6215 if (!Func.getReturnType()->isVoidTy()) 6216 report_fatal_error( 6217 "Functions with the interrupt attribute must have void return type!"); 6218 6219 MachineFunction &MF = DAG.getMachineFunction(); 6220 StringRef Kind = 6221 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 6222 6223 unsigned RetOpc; 6224 if (Kind == "user") 6225 RetOpc = RISCVISD::URET_FLAG; 6226 else if (Kind == "supervisor") 6227 RetOpc = RISCVISD::SRET_FLAG; 6228 else 6229 RetOpc = RISCVISD::MRET_FLAG; 6230 6231 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 6232 } 6233 6234 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 6235 } 6236 6237 void RISCVTargetLowering::validateCCReservedRegs( 6238 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 6239 MachineFunction &MF) const { 6240 const Function &F = MF.getFunction(); 6241 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 6242 6243 if (llvm::any_of(Regs, [&STI](auto Reg) { 6244 return STI.isRegisterReservedByUser(Reg.first); 6245 })) 6246 F.getContext().diagnose(DiagnosticInfoUnsupported{ 6247 F, "Argument register required, but has been reserved."}); 6248 } 6249 6250 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 6251 return CI->isTailCall(); 6252 } 6253 6254 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 6255 #define NODE_NAME_CASE(NODE) \ 6256 case RISCVISD::NODE: \ 6257 return "RISCVISD::" #NODE; 6258 // clang-format off 6259 switch ((RISCVISD::NodeType)Opcode) { 6260 case RISCVISD::FIRST_NUMBER: 6261 break; 6262 NODE_NAME_CASE(RET_FLAG) 6263 NODE_NAME_CASE(URET_FLAG) 6264 NODE_NAME_CASE(SRET_FLAG) 6265 NODE_NAME_CASE(MRET_FLAG) 6266 NODE_NAME_CASE(CALL) 6267 NODE_NAME_CASE(SELECT_CC) 6268 NODE_NAME_CASE(BR_CC) 6269 NODE_NAME_CASE(BuildPairF64) 6270 NODE_NAME_CASE(SplitF64) 6271 NODE_NAME_CASE(TAIL) 6272 NODE_NAME_CASE(SLLW) 6273 NODE_NAME_CASE(SRAW) 6274 NODE_NAME_CASE(SRLW) 6275 NODE_NAME_CASE(DIVW) 6276 NODE_NAME_CASE(DIVUW) 6277 NODE_NAME_CASE(REMUW) 6278 NODE_NAME_CASE(ROLW) 6279 NODE_NAME_CASE(RORW) 6280 NODE_NAME_CASE(FSLW) 6281 NODE_NAME_CASE(FSRW) 6282 NODE_NAME_CASE(FSL) 6283 NODE_NAME_CASE(FSR) 6284 NODE_NAME_CASE(FMV_H_X) 6285 NODE_NAME_CASE(FMV_X_ANYEXTH) 6286 NODE_NAME_CASE(FMV_W_X_RV64) 6287 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 6288 NODE_NAME_CASE(READ_CYCLE_WIDE) 6289 NODE_NAME_CASE(GREVI) 6290 NODE_NAME_CASE(GREVIW) 6291 NODE_NAME_CASE(GORCI) 6292 NODE_NAME_CASE(GORCIW) 6293 NODE_NAME_CASE(SHFLI) 6294 NODE_NAME_CASE(VMV_V_X_VL) 6295 NODE_NAME_CASE(VFMV_V_F_VL) 6296 NODE_NAME_CASE(VMV_X_S) 6297 NODE_NAME_CASE(VMV_S_XF_VL) 6298 NODE_NAME_CASE(SPLAT_VECTOR_I64) 6299 NODE_NAME_CASE(READ_VLENB) 6300 NODE_NAME_CASE(TRUNCATE_VECTOR_VL) 6301 NODE_NAME_CASE(VLEFF) 6302 NODE_NAME_CASE(VLEFF_MASK) 6303 NODE_NAME_CASE(VSLIDEUP_VL) 6304 NODE_NAME_CASE(VSLIDE1UP_VL) 6305 NODE_NAME_CASE(VSLIDEDOWN_VL) 6306 NODE_NAME_CASE(VID_VL) 6307 NODE_NAME_CASE(VFNCVT_ROD_VL) 6308 NODE_NAME_CASE(VECREDUCE_ADD_VL) 6309 NODE_NAME_CASE(VECREDUCE_UMAX_VL) 6310 NODE_NAME_CASE(VECREDUCE_SMAX_VL) 6311 NODE_NAME_CASE(VECREDUCE_UMIN_VL) 6312 NODE_NAME_CASE(VECREDUCE_SMIN_VL) 6313 NODE_NAME_CASE(VECREDUCE_AND_VL) 6314 NODE_NAME_CASE(VECREDUCE_OR_VL) 6315 NODE_NAME_CASE(VECREDUCE_XOR_VL) 6316 NODE_NAME_CASE(VECREDUCE_FADD_VL) 6317 NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL) 6318 NODE_NAME_CASE(ADD_VL) 6319 NODE_NAME_CASE(AND_VL) 6320 NODE_NAME_CASE(MUL_VL) 6321 NODE_NAME_CASE(OR_VL) 6322 NODE_NAME_CASE(SDIV_VL) 6323 NODE_NAME_CASE(SHL_VL) 6324 NODE_NAME_CASE(SREM_VL) 6325 NODE_NAME_CASE(SRA_VL) 6326 NODE_NAME_CASE(SRL_VL) 6327 NODE_NAME_CASE(SUB_VL) 6328 NODE_NAME_CASE(UDIV_VL) 6329 NODE_NAME_CASE(UREM_VL) 6330 NODE_NAME_CASE(XOR_VL) 6331 NODE_NAME_CASE(FADD_VL) 6332 NODE_NAME_CASE(FSUB_VL) 6333 NODE_NAME_CASE(FMUL_VL) 6334 NODE_NAME_CASE(FDIV_VL) 6335 NODE_NAME_CASE(FNEG_VL) 6336 NODE_NAME_CASE(FABS_VL) 6337 NODE_NAME_CASE(FSQRT_VL) 6338 NODE_NAME_CASE(FMA_VL) 6339 NODE_NAME_CASE(FCOPYSIGN_VL) 6340 NODE_NAME_CASE(SMIN_VL) 6341 NODE_NAME_CASE(SMAX_VL) 6342 NODE_NAME_CASE(UMIN_VL) 6343 NODE_NAME_CASE(UMAX_VL) 6344 NODE_NAME_CASE(MULHS_VL) 6345 NODE_NAME_CASE(MULHU_VL) 6346 NODE_NAME_CASE(FP_TO_SINT_VL) 6347 NODE_NAME_CASE(FP_TO_UINT_VL) 6348 NODE_NAME_CASE(SINT_TO_FP_VL) 6349 NODE_NAME_CASE(UINT_TO_FP_VL) 6350 NODE_NAME_CASE(FP_EXTEND_VL) 6351 NODE_NAME_CASE(FP_ROUND_VL) 6352 NODE_NAME_CASE(SETCC_VL) 6353 NODE_NAME_CASE(VSELECT_VL) 6354 NODE_NAME_CASE(VMAND_VL) 6355 NODE_NAME_CASE(VMOR_VL) 6356 NODE_NAME_CASE(VMXOR_VL) 6357 NODE_NAME_CASE(VMCLR_VL) 6358 NODE_NAME_CASE(VMSET_VL) 6359 NODE_NAME_CASE(VRGATHER_VX_VL) 6360 NODE_NAME_CASE(VRGATHER_VV_VL) 6361 NODE_NAME_CASE(VRGATHEREI16_VV_VL) 6362 NODE_NAME_CASE(VSEXT_VL) 6363 NODE_NAME_CASE(VZEXT_VL) 6364 NODE_NAME_CASE(VLE_VL) 6365 NODE_NAME_CASE(VSE_VL) 6366 } 6367 // clang-format on 6368 return nullptr; 6369 #undef NODE_NAME_CASE 6370 } 6371 6372 /// getConstraintType - Given a constraint letter, return the type of 6373 /// constraint it is for this target. 6374 RISCVTargetLowering::ConstraintType 6375 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 6376 if (Constraint.size() == 1) { 6377 switch (Constraint[0]) { 6378 default: 6379 break; 6380 case 'f': 6381 case 'v': 6382 return C_RegisterClass; 6383 case 'I': 6384 case 'J': 6385 case 'K': 6386 return C_Immediate; 6387 case 'A': 6388 return C_Memory; 6389 } 6390 } 6391 return TargetLowering::getConstraintType(Constraint); 6392 } 6393 6394 std::pair<unsigned, const TargetRegisterClass *> 6395 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 6396 StringRef Constraint, 6397 MVT VT) const { 6398 // First, see if this is a constraint that directly corresponds to a 6399 // RISCV register class. 6400 if (Constraint.size() == 1) { 6401 switch (Constraint[0]) { 6402 case 'r': 6403 return std::make_pair(0U, &RISCV::GPRRegClass); 6404 case 'f': 6405 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 6406 return std::make_pair(0U, &RISCV::FPR16RegClass); 6407 if (Subtarget.hasStdExtF() && VT == MVT::f32) 6408 return std::make_pair(0U, &RISCV::FPR32RegClass); 6409 if (Subtarget.hasStdExtD() && VT == MVT::f64) 6410 return std::make_pair(0U, &RISCV::FPR64RegClass); 6411 break; 6412 case 'v': 6413 for (const auto *RC : 6414 {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass, 6415 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 6416 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) 6417 return std::make_pair(0U, RC); 6418 } 6419 break; 6420 default: 6421 break; 6422 } 6423 } 6424 6425 // Clang will correctly decode the usage of register name aliases into their 6426 // official names. However, other frontends like `rustc` do not. This allows 6427 // users of these frontends to use the ABI names for registers in LLVM-style 6428 // register constraints. 6429 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 6430 .Case("{zero}", RISCV::X0) 6431 .Case("{ra}", RISCV::X1) 6432 .Case("{sp}", RISCV::X2) 6433 .Case("{gp}", RISCV::X3) 6434 .Case("{tp}", RISCV::X4) 6435 .Case("{t0}", RISCV::X5) 6436 .Case("{t1}", RISCV::X6) 6437 .Case("{t2}", RISCV::X7) 6438 .Cases("{s0}", "{fp}", RISCV::X8) 6439 .Case("{s1}", RISCV::X9) 6440 .Case("{a0}", RISCV::X10) 6441 .Case("{a1}", RISCV::X11) 6442 .Case("{a2}", RISCV::X12) 6443 .Case("{a3}", RISCV::X13) 6444 .Case("{a4}", RISCV::X14) 6445 .Case("{a5}", RISCV::X15) 6446 .Case("{a6}", RISCV::X16) 6447 .Case("{a7}", RISCV::X17) 6448 .Case("{s2}", RISCV::X18) 6449 .Case("{s3}", RISCV::X19) 6450 .Case("{s4}", RISCV::X20) 6451 .Case("{s5}", RISCV::X21) 6452 .Case("{s6}", RISCV::X22) 6453 .Case("{s7}", RISCV::X23) 6454 .Case("{s8}", RISCV::X24) 6455 .Case("{s9}", RISCV::X25) 6456 .Case("{s10}", RISCV::X26) 6457 .Case("{s11}", RISCV::X27) 6458 .Case("{t3}", RISCV::X28) 6459 .Case("{t4}", RISCV::X29) 6460 .Case("{t5}", RISCV::X30) 6461 .Case("{t6}", RISCV::X31) 6462 .Default(RISCV::NoRegister); 6463 if (XRegFromAlias != RISCV::NoRegister) 6464 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 6465 6466 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 6467 // TableGen record rather than the AsmName to choose registers for InlineAsm 6468 // constraints, plus we want to match those names to the widest floating point 6469 // register type available, manually select floating point registers here. 6470 // 6471 // The second case is the ABI name of the register, so that frontends can also 6472 // use the ABI names in register constraint lists. 6473 if (Subtarget.hasStdExtF()) { 6474 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 6475 .Cases("{f0}", "{ft0}", RISCV::F0_F) 6476 .Cases("{f1}", "{ft1}", RISCV::F1_F) 6477 .Cases("{f2}", "{ft2}", RISCV::F2_F) 6478 .Cases("{f3}", "{ft3}", RISCV::F3_F) 6479 .Cases("{f4}", "{ft4}", RISCV::F4_F) 6480 .Cases("{f5}", "{ft5}", RISCV::F5_F) 6481 .Cases("{f6}", "{ft6}", RISCV::F6_F) 6482 .Cases("{f7}", "{ft7}", RISCV::F7_F) 6483 .Cases("{f8}", "{fs0}", RISCV::F8_F) 6484 .Cases("{f9}", "{fs1}", RISCV::F9_F) 6485 .Cases("{f10}", "{fa0}", RISCV::F10_F) 6486 .Cases("{f11}", "{fa1}", RISCV::F11_F) 6487 .Cases("{f12}", "{fa2}", RISCV::F12_F) 6488 .Cases("{f13}", "{fa3}", RISCV::F13_F) 6489 .Cases("{f14}", "{fa4}", RISCV::F14_F) 6490 .Cases("{f15}", "{fa5}", RISCV::F15_F) 6491 .Cases("{f16}", "{fa6}", RISCV::F16_F) 6492 .Cases("{f17}", "{fa7}", RISCV::F17_F) 6493 .Cases("{f18}", "{fs2}", RISCV::F18_F) 6494 .Cases("{f19}", "{fs3}", RISCV::F19_F) 6495 .Cases("{f20}", "{fs4}", RISCV::F20_F) 6496 .Cases("{f21}", "{fs5}", RISCV::F21_F) 6497 .Cases("{f22}", "{fs6}", RISCV::F22_F) 6498 .Cases("{f23}", "{fs7}", RISCV::F23_F) 6499 .Cases("{f24}", "{fs8}", RISCV::F24_F) 6500 .Cases("{f25}", "{fs9}", RISCV::F25_F) 6501 .Cases("{f26}", "{fs10}", RISCV::F26_F) 6502 .Cases("{f27}", "{fs11}", RISCV::F27_F) 6503 .Cases("{f28}", "{ft8}", RISCV::F28_F) 6504 .Cases("{f29}", "{ft9}", RISCV::F29_F) 6505 .Cases("{f30}", "{ft10}", RISCV::F30_F) 6506 .Cases("{f31}", "{ft11}", RISCV::F31_F) 6507 .Default(RISCV::NoRegister); 6508 if (FReg != RISCV::NoRegister) { 6509 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 6510 if (Subtarget.hasStdExtD()) { 6511 unsigned RegNo = FReg - RISCV::F0_F; 6512 unsigned DReg = RISCV::F0_D + RegNo; 6513 return std::make_pair(DReg, &RISCV::FPR64RegClass); 6514 } 6515 return std::make_pair(FReg, &RISCV::FPR32RegClass); 6516 } 6517 } 6518 6519 if (Subtarget.hasStdExtV()) { 6520 Register VReg = StringSwitch<Register>(Constraint.lower()) 6521 .Case("{v0}", RISCV::V0) 6522 .Case("{v1}", RISCV::V1) 6523 .Case("{v2}", RISCV::V2) 6524 .Case("{v3}", RISCV::V3) 6525 .Case("{v4}", RISCV::V4) 6526 .Case("{v5}", RISCV::V5) 6527 .Case("{v6}", RISCV::V6) 6528 .Case("{v7}", RISCV::V7) 6529 .Case("{v8}", RISCV::V8) 6530 .Case("{v9}", RISCV::V9) 6531 .Case("{v10}", RISCV::V10) 6532 .Case("{v11}", RISCV::V11) 6533 .Case("{v12}", RISCV::V12) 6534 .Case("{v13}", RISCV::V13) 6535 .Case("{v14}", RISCV::V14) 6536 .Case("{v15}", RISCV::V15) 6537 .Case("{v16}", RISCV::V16) 6538 .Case("{v17}", RISCV::V17) 6539 .Case("{v18}", RISCV::V18) 6540 .Case("{v19}", RISCV::V19) 6541 .Case("{v20}", RISCV::V20) 6542 .Case("{v21}", RISCV::V21) 6543 .Case("{v22}", RISCV::V22) 6544 .Case("{v23}", RISCV::V23) 6545 .Case("{v24}", RISCV::V24) 6546 .Case("{v25}", RISCV::V25) 6547 .Case("{v26}", RISCV::V26) 6548 .Case("{v27}", RISCV::V27) 6549 .Case("{v28}", RISCV::V28) 6550 .Case("{v29}", RISCV::V29) 6551 .Case("{v30}", RISCV::V30) 6552 .Case("{v31}", RISCV::V31) 6553 .Default(RISCV::NoRegister); 6554 if (VReg != RISCV::NoRegister) { 6555 if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) 6556 return std::make_pair(VReg, &RISCV::VMRegClass); 6557 if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy)) 6558 return std::make_pair(VReg, &RISCV::VRRegClass); 6559 for (const auto *RC : 6560 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 6561 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) { 6562 VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC); 6563 return std::make_pair(VReg, RC); 6564 } 6565 } 6566 } 6567 } 6568 6569 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 6570 } 6571 6572 unsigned 6573 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 6574 // Currently only support length 1 constraints. 6575 if (ConstraintCode.size() == 1) { 6576 switch (ConstraintCode[0]) { 6577 case 'A': 6578 return InlineAsm::Constraint_A; 6579 default: 6580 break; 6581 } 6582 } 6583 6584 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 6585 } 6586 6587 void RISCVTargetLowering::LowerAsmOperandForConstraint( 6588 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 6589 SelectionDAG &DAG) const { 6590 // Currently only support length 1 constraints. 6591 if (Constraint.length() == 1) { 6592 switch (Constraint[0]) { 6593 case 'I': 6594 // Validate & create a 12-bit signed immediate operand. 6595 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 6596 uint64_t CVal = C->getSExtValue(); 6597 if (isInt<12>(CVal)) 6598 Ops.push_back( 6599 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 6600 } 6601 return; 6602 case 'J': 6603 // Validate & create an integer zero operand. 6604 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 6605 if (C->getZExtValue() == 0) 6606 Ops.push_back( 6607 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 6608 return; 6609 case 'K': 6610 // Validate & create a 5-bit unsigned immediate operand. 6611 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 6612 uint64_t CVal = C->getZExtValue(); 6613 if (isUInt<5>(CVal)) 6614 Ops.push_back( 6615 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 6616 } 6617 return; 6618 default: 6619 break; 6620 } 6621 } 6622 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6623 } 6624 6625 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 6626 Instruction *Inst, 6627 AtomicOrdering Ord) const { 6628 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 6629 return Builder.CreateFence(Ord); 6630 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 6631 return Builder.CreateFence(AtomicOrdering::Release); 6632 return nullptr; 6633 } 6634 6635 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 6636 Instruction *Inst, 6637 AtomicOrdering Ord) const { 6638 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 6639 return Builder.CreateFence(AtomicOrdering::Acquire); 6640 return nullptr; 6641 } 6642 6643 TargetLowering::AtomicExpansionKind 6644 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 6645 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 6646 // point operations can't be used in an lr/sc sequence without breaking the 6647 // forward-progress guarantee. 6648 if (AI->isFloatingPointOperation()) 6649 return AtomicExpansionKind::CmpXChg; 6650 6651 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 6652 if (Size == 8 || Size == 16) 6653 return AtomicExpansionKind::MaskedIntrinsic; 6654 return AtomicExpansionKind::None; 6655 } 6656 6657 static Intrinsic::ID 6658 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 6659 if (XLen == 32) { 6660 switch (BinOp) { 6661 default: 6662 llvm_unreachable("Unexpected AtomicRMW BinOp"); 6663 case AtomicRMWInst::Xchg: 6664 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 6665 case AtomicRMWInst::Add: 6666 return Intrinsic::riscv_masked_atomicrmw_add_i32; 6667 case AtomicRMWInst::Sub: 6668 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 6669 case AtomicRMWInst::Nand: 6670 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 6671 case AtomicRMWInst::Max: 6672 return Intrinsic::riscv_masked_atomicrmw_max_i32; 6673 case AtomicRMWInst::Min: 6674 return Intrinsic::riscv_masked_atomicrmw_min_i32; 6675 case AtomicRMWInst::UMax: 6676 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 6677 case AtomicRMWInst::UMin: 6678 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 6679 } 6680 } 6681 6682 if (XLen == 64) { 6683 switch (BinOp) { 6684 default: 6685 llvm_unreachable("Unexpected AtomicRMW BinOp"); 6686 case AtomicRMWInst::Xchg: 6687 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 6688 case AtomicRMWInst::Add: 6689 return Intrinsic::riscv_masked_atomicrmw_add_i64; 6690 case AtomicRMWInst::Sub: 6691 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 6692 case AtomicRMWInst::Nand: 6693 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 6694 case AtomicRMWInst::Max: 6695 return Intrinsic::riscv_masked_atomicrmw_max_i64; 6696 case AtomicRMWInst::Min: 6697 return Intrinsic::riscv_masked_atomicrmw_min_i64; 6698 case AtomicRMWInst::UMax: 6699 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 6700 case AtomicRMWInst::UMin: 6701 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 6702 } 6703 } 6704 6705 llvm_unreachable("Unexpected XLen\n"); 6706 } 6707 6708 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 6709 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 6710 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 6711 unsigned XLen = Subtarget.getXLen(); 6712 Value *Ordering = 6713 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 6714 Type *Tys[] = {AlignedAddr->getType()}; 6715 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 6716 AI->getModule(), 6717 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 6718 6719 if (XLen == 64) { 6720 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 6721 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 6722 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 6723 } 6724 6725 Value *Result; 6726 6727 // Must pass the shift amount needed to sign extend the loaded value prior 6728 // to performing a signed comparison for min/max. ShiftAmt is the number of 6729 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 6730 // is the number of bits to left+right shift the value in order to 6731 // sign-extend. 6732 if (AI->getOperation() == AtomicRMWInst::Min || 6733 AI->getOperation() == AtomicRMWInst::Max) { 6734 const DataLayout &DL = AI->getModule()->getDataLayout(); 6735 unsigned ValWidth = 6736 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 6737 Value *SextShamt = 6738 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 6739 Result = Builder.CreateCall(LrwOpScwLoop, 6740 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 6741 } else { 6742 Result = 6743 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 6744 } 6745 6746 if (XLen == 64) 6747 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 6748 return Result; 6749 } 6750 6751 TargetLowering::AtomicExpansionKind 6752 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 6753 AtomicCmpXchgInst *CI) const { 6754 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 6755 if (Size == 8 || Size == 16) 6756 return AtomicExpansionKind::MaskedIntrinsic; 6757 return AtomicExpansionKind::None; 6758 } 6759 6760 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 6761 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 6762 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 6763 unsigned XLen = Subtarget.getXLen(); 6764 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 6765 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 6766 if (XLen == 64) { 6767 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 6768 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 6769 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 6770 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 6771 } 6772 Type *Tys[] = {AlignedAddr->getType()}; 6773 Function *MaskedCmpXchg = 6774 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 6775 Value *Result = Builder.CreateCall( 6776 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 6777 if (XLen == 64) 6778 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 6779 return Result; 6780 } 6781 6782 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 6783 EVT VT) const { 6784 VT = VT.getScalarType(); 6785 6786 if (!VT.isSimple()) 6787 return false; 6788 6789 switch (VT.getSimpleVT().SimpleTy) { 6790 case MVT::f16: 6791 return Subtarget.hasStdExtZfh(); 6792 case MVT::f32: 6793 return Subtarget.hasStdExtF(); 6794 case MVT::f64: 6795 return Subtarget.hasStdExtD(); 6796 default: 6797 break; 6798 } 6799 6800 return false; 6801 } 6802 6803 Register RISCVTargetLowering::getExceptionPointerRegister( 6804 const Constant *PersonalityFn) const { 6805 return RISCV::X10; 6806 } 6807 6808 Register RISCVTargetLowering::getExceptionSelectorRegister( 6809 const Constant *PersonalityFn) const { 6810 return RISCV::X11; 6811 } 6812 6813 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 6814 // Return false to suppress the unnecessary extensions if the LibCall 6815 // arguments or return value is f32 type for LP64 ABI. 6816 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 6817 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 6818 return false; 6819 6820 return true; 6821 } 6822 6823 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 6824 if (Subtarget.is64Bit() && Type == MVT::i32) 6825 return true; 6826 6827 return IsSigned; 6828 } 6829 6830 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 6831 SDValue C) const { 6832 // Check integral scalar types. 6833 if (VT.isScalarInteger()) { 6834 // Omit the optimization if the sub target has the M extension and the data 6835 // size exceeds XLen. 6836 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) 6837 return false; 6838 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 6839 // Break the MUL to a SLLI and an ADD/SUB. 6840 const APInt &Imm = ConstNode->getAPIntValue(); 6841 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 6842 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) 6843 return true; 6844 // Omit the following optimization if the sub target has the M extension 6845 // and the data size >= XLen. 6846 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) 6847 return false; 6848 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs 6849 // a pair of LUI/ADDI. 6850 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { 6851 APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); 6852 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || 6853 (1 - ImmS).isPowerOf2()) 6854 return true; 6855 } 6856 } 6857 } 6858 6859 return false; 6860 } 6861 6862 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const { 6863 if (!Subtarget.useRVVForFixedLengthVectors()) 6864 return false; 6865 6866 if (!VT.isFixedLengthVector()) 6867 return false; 6868 6869 // Don't use RVV for vectors we cannot scalarize if required. 6870 switch (VT.getVectorElementType().SimpleTy) { 6871 // i1 is supported but has different rules. 6872 default: 6873 return false; 6874 case MVT::i1: 6875 // Masks can only use a single register. 6876 if (VT.getVectorNumElements() > Subtarget.getMinRVVVectorSizeInBits()) 6877 return false; 6878 break; 6879 case MVT::i8: 6880 case MVT::i16: 6881 case MVT::i32: 6882 case MVT::i64: 6883 break; 6884 case MVT::f16: 6885 if (!Subtarget.hasStdExtZfh()) 6886 return false; 6887 break; 6888 case MVT::f32: 6889 if (!Subtarget.hasStdExtF()) 6890 return false; 6891 break; 6892 case MVT::f64: 6893 if (!Subtarget.hasStdExtD()) 6894 return false; 6895 break; 6896 } 6897 6898 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 6899 // Don't use RVV for types that don't fit. 6900 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors()) 6901 return false; 6902 6903 // TODO: Perhaps an artificial restriction, but worth having whilst getting 6904 // the base fixed length RVV support in place. 6905 if (!VT.isPow2VectorType()) 6906 return false; 6907 6908 return true; 6909 } 6910 6911 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( 6912 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 6913 bool *Fast) const { 6914 if (!VT.isScalableVector()) 6915 return false; 6916 6917 EVT ElemVT = VT.getVectorElementType(); 6918 if (Alignment >= ElemVT.getStoreSize()) { 6919 if (Fast) 6920 *Fast = true; 6921 return true; 6922 } 6923 6924 return false; 6925 } 6926 6927 bool RISCVTargetLowering::splitValueIntoRegisterParts( 6928 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 6929 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 6930 EVT ValueVT = Val.getValueType(); 6931 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 6932 LLVMContext &Context = *DAG.getContext(); 6933 EVT ValueEltVT = ValueVT.getVectorElementType(); 6934 EVT PartEltVT = PartVT.getVectorElementType(); 6935 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 6936 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 6937 if (PartVTBitSize % ValueVTBitSize == 0) { 6938 // If the element types are different, bitcast to the same element type of 6939 // PartVT first. 6940 if (ValueEltVT != PartEltVT) { 6941 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); 6942 assert(Count != 0 && "The number of element should not be zero."); 6943 EVT SameEltTypeVT = 6944 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); 6945 Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val); 6946 } 6947 Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT), 6948 Val, DAG.getConstant(0, DL, Subtarget.getXLenVT())); 6949 Parts[0] = Val; 6950 return true; 6951 } 6952 } 6953 return false; 6954 } 6955 6956 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( 6957 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 6958 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 6959 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 6960 LLVMContext &Context = *DAG.getContext(); 6961 SDValue Val = Parts[0]; 6962 EVT ValueEltVT = ValueVT.getVectorElementType(); 6963 EVT PartEltVT = PartVT.getVectorElementType(); 6964 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 6965 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 6966 if (PartVTBitSize % ValueVTBitSize == 0) { 6967 EVT SameEltTypeVT = ValueVT; 6968 // If the element types are different, convert it to the same element type 6969 // of PartVT. 6970 if (ValueEltVT != PartEltVT) { 6971 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); 6972 assert(Count != 0 && "The number of element should not be zero."); 6973 SameEltTypeVT = 6974 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); 6975 } 6976 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val, 6977 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 6978 if (ValueEltVT != PartEltVT) 6979 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 6980 return Val; 6981 } 6982 } 6983 return SDValue(); 6984 } 6985 6986 #define GET_REGISTER_MATCHER 6987 #include "RISCVGenAsmMatcher.inc" 6988 6989 Register 6990 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 6991 const MachineFunction &MF) const { 6992 Register Reg = MatchRegisterAltName(RegName); 6993 if (Reg == RISCV::NoRegister) 6994 Reg = MatchRegisterName(RegName); 6995 if (Reg == RISCV::NoRegister) 6996 report_fatal_error( 6997 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 6998 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 6999 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 7000 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 7001 StringRef(RegName) + "\".")); 7002 return Reg; 7003 } 7004 7005 namespace llvm { 7006 namespace RISCVVIntrinsicsTable { 7007 7008 #define GET_RISCVVIntrinsicsTable_IMPL 7009 #include "RISCVGenSearchableTables.inc" 7010 7011 } // namespace RISCVVIntrinsicsTable 7012 7013 } // namespace llvm 7014