1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/CodeGen/ValueTypes.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "llvm/IR/DiagnosticPrinter.h" 32 #include "llvm/IR/IntrinsicsRISCV.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "riscv-lower" 42 43 STATISTIC(NumTailCalls, "Number of tail calls"); 44 45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 46 const RISCVSubtarget &STI) 47 : TargetLowering(TM), Subtarget(STI) { 48 49 if (Subtarget.isRV32E()) 50 report_fatal_error("Codegen not yet implemented for RV32E"); 51 52 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 53 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 54 55 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 56 !Subtarget.hasStdExtF()) { 57 errs() << "Hard-float 'f' ABI can't be used for a target that " 58 "doesn't support the F instruction set extension (ignoring " 59 "target-abi)\n"; 60 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 61 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 62 !Subtarget.hasStdExtD()) { 63 errs() << "Hard-float 'd' ABI can't be used for a target that " 64 "doesn't support the D instruction set extension (ignoring " 65 "target-abi)\n"; 66 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 67 } 68 69 switch (ABI) { 70 default: 71 report_fatal_error("Don't know how to lower this ABI"); 72 case RISCVABI::ABI_ILP32: 73 case RISCVABI::ABI_ILP32F: 74 case RISCVABI::ABI_ILP32D: 75 case RISCVABI::ABI_LP64: 76 case RISCVABI::ABI_LP64F: 77 case RISCVABI::ABI_LP64D: 78 break; 79 } 80 81 MVT XLenVT = Subtarget.getXLenVT(); 82 83 // Set up the register classes. 84 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 85 86 if (Subtarget.hasStdExtZfh()) 87 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 88 if (Subtarget.hasStdExtF()) 89 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 90 if (Subtarget.hasStdExtD()) 91 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 92 93 static const MVT::SimpleValueType BoolVecVTs[] = { 94 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, 95 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; 96 static const MVT::SimpleValueType IntVecVTs[] = { 97 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, 98 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, 99 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, 100 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, 101 MVT::nxv4i64, MVT::nxv8i64}; 102 static const MVT::SimpleValueType F16VecVTs[] = { 103 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, 104 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; 105 static const MVT::SimpleValueType F32VecVTs[] = { 106 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; 107 static const MVT::SimpleValueType F64VecVTs[] = { 108 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; 109 110 if (Subtarget.hasStdExtV()) { 111 auto addRegClassForRVV = [this](MVT VT) { 112 unsigned Size = VT.getSizeInBits().getKnownMinValue(); 113 assert(Size <= 512 && isPowerOf2_32(Size)); 114 const TargetRegisterClass *RC; 115 if (Size <= 64) 116 RC = &RISCV::VRRegClass; 117 else if (Size == 128) 118 RC = &RISCV::VRM2RegClass; 119 else if (Size == 256) 120 RC = &RISCV::VRM4RegClass; 121 else 122 RC = &RISCV::VRM8RegClass; 123 124 addRegisterClass(VT, RC); 125 }; 126 127 for (MVT VT : BoolVecVTs) 128 addRegClassForRVV(VT); 129 for (MVT VT : IntVecVTs) 130 addRegClassForRVV(VT); 131 132 if (Subtarget.hasStdExtZfh()) 133 for (MVT VT : F16VecVTs) 134 addRegClassForRVV(VT); 135 136 if (Subtarget.hasStdExtF()) 137 for (MVT VT : F32VecVTs) 138 addRegClassForRVV(VT); 139 140 if (Subtarget.hasStdExtD()) 141 for (MVT VT : F64VecVTs) 142 addRegClassForRVV(VT); 143 144 if (Subtarget.useRVVForFixedLengthVectors()) { 145 auto addRegClassForFixedVectors = [this](MVT VT) { 146 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 147 const TargetRegisterClass *RC; 148 if (LMul == 1 || VT.getVectorElementType() == MVT::i1) 149 RC = &RISCV::VRRegClass; 150 else if (LMul == 2) 151 RC = &RISCV::VRM2RegClass; 152 else if (LMul == 4) 153 RC = &RISCV::VRM4RegClass; 154 else if (LMul == 8) 155 RC = &RISCV::VRM8RegClass; 156 else 157 llvm_unreachable("Unexpected LMul!"); 158 159 addRegisterClass(VT, RC); 160 }; 161 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) 162 if (useRVVForFixedLengthVectorVT(VT)) 163 addRegClassForFixedVectors(VT); 164 165 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) 166 if (useRVVForFixedLengthVectorVT(VT)) 167 addRegClassForFixedVectors(VT); 168 } 169 } 170 171 // Compute derived properties from the register classes. 172 computeRegisterProperties(STI.getRegisterInfo()); 173 174 setStackPointerRegisterToSaveRestore(RISCV::X2); 175 176 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 177 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 178 179 // TODO: add all necessary setOperationAction calls. 180 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 181 182 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 183 setOperationAction(ISD::BR_CC, XLenVT, Expand); 184 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 185 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 186 187 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 188 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 189 190 setOperationAction(ISD::VASTART, MVT::Other, Custom); 191 setOperationAction(ISD::VAARG, MVT::Other, Expand); 192 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 193 setOperationAction(ISD::VAEND, MVT::Other, Expand); 194 195 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 196 if (!Subtarget.hasStdExtZbb()) { 197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 198 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 199 } 200 201 if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit()) 202 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 203 204 if (Subtarget.is64Bit()) { 205 setOperationAction(ISD::ADD, MVT::i32, Custom); 206 setOperationAction(ISD::SUB, MVT::i32, Custom); 207 setOperationAction(ISD::SHL, MVT::i32, Custom); 208 setOperationAction(ISD::SRA, MVT::i32, Custom); 209 setOperationAction(ISD::SRL, MVT::i32, Custom); 210 211 setOperationAction(ISD::UADDO, MVT::i32, Custom); 212 setOperationAction(ISD::USUBO, MVT::i32, Custom); 213 setOperationAction(ISD::UADDSAT, MVT::i32, Custom); 214 setOperationAction(ISD::USUBSAT, MVT::i32, Custom); 215 } 216 217 if (!Subtarget.hasStdExtM()) { 218 setOperationAction(ISD::MUL, XLenVT, Expand); 219 setOperationAction(ISD::MULHS, XLenVT, Expand); 220 setOperationAction(ISD::MULHU, XLenVT, Expand); 221 setOperationAction(ISD::SDIV, XLenVT, Expand); 222 setOperationAction(ISD::UDIV, XLenVT, Expand); 223 setOperationAction(ISD::SREM, XLenVT, Expand); 224 setOperationAction(ISD::UREM, XLenVT, Expand); 225 } else { 226 if (Subtarget.is64Bit()) { 227 setOperationAction(ISD::MUL, MVT::i32, Custom); 228 setOperationAction(ISD::MUL, MVT::i128, Custom); 229 230 setOperationAction(ISD::SDIV, MVT::i8, Custom); 231 setOperationAction(ISD::UDIV, MVT::i8, Custom); 232 setOperationAction(ISD::UREM, MVT::i8, Custom); 233 setOperationAction(ISD::SDIV, MVT::i16, Custom); 234 setOperationAction(ISD::UDIV, MVT::i16, Custom); 235 setOperationAction(ISD::UREM, MVT::i16, Custom); 236 setOperationAction(ISD::SDIV, MVT::i32, Custom); 237 setOperationAction(ISD::UDIV, MVT::i32, Custom); 238 setOperationAction(ISD::UREM, MVT::i32, Custom); 239 } else { 240 setOperationAction(ISD::MUL, MVT::i64, Custom); 241 } 242 } 243 244 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 245 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 246 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 247 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 248 249 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 250 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 251 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 252 253 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 254 if (Subtarget.is64Bit()) { 255 setOperationAction(ISD::ROTL, MVT::i32, Custom); 256 setOperationAction(ISD::ROTR, MVT::i32, Custom); 257 } 258 } else { 259 setOperationAction(ISD::ROTL, XLenVT, Expand); 260 setOperationAction(ISD::ROTR, XLenVT, Expand); 261 } 262 263 if (Subtarget.hasStdExtZbp()) { 264 // Custom lower bswap/bitreverse so we can convert them to GREVI to enable 265 // more combining. 266 setOperationAction(ISD::BITREVERSE, XLenVT, Custom); 267 setOperationAction(ISD::BSWAP, XLenVT, Custom); 268 269 if (Subtarget.is64Bit()) { 270 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); 271 setOperationAction(ISD::BSWAP, MVT::i32, Custom); 272 } 273 } else { 274 // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll 275 // pattern match it directly in isel. 276 setOperationAction(ISD::BSWAP, XLenVT, 277 Subtarget.hasStdExtZbb() ? Legal : Expand); 278 } 279 280 if (Subtarget.hasStdExtZbb()) { 281 setOperationAction(ISD::SMIN, XLenVT, Legal); 282 setOperationAction(ISD::SMAX, XLenVT, Legal); 283 setOperationAction(ISD::UMIN, XLenVT, Legal); 284 setOperationAction(ISD::UMAX, XLenVT, Legal); 285 286 if (Subtarget.is64Bit()) { 287 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 288 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 289 setOperationAction(ISD::CTLZ, MVT::i32, Custom); 290 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 291 } 292 } else { 293 setOperationAction(ISD::CTTZ, XLenVT, Expand); 294 setOperationAction(ISD::CTLZ, XLenVT, Expand); 295 setOperationAction(ISD::CTPOP, XLenVT, Expand); 296 } 297 298 if (Subtarget.hasStdExtZbt()) { 299 setOperationAction(ISD::FSHL, XLenVT, Custom); 300 setOperationAction(ISD::FSHR, XLenVT, Custom); 301 setOperationAction(ISD::SELECT, XLenVT, Legal); 302 303 if (Subtarget.is64Bit()) { 304 setOperationAction(ISD::FSHL, MVT::i32, Custom); 305 setOperationAction(ISD::FSHR, MVT::i32, Custom); 306 } 307 } else { 308 setOperationAction(ISD::SELECT, XLenVT, Custom); 309 } 310 311 ISD::CondCode FPCCToExpand[] = { 312 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 313 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 314 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 315 316 ISD::NodeType FPOpToExpand[] = { 317 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 318 ISD::FP_TO_FP16}; 319 320 if (Subtarget.hasStdExtZfh()) 321 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 322 323 if (Subtarget.hasStdExtZfh()) { 324 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 325 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 326 for (auto CC : FPCCToExpand) 327 setCondCodeAction(CC, MVT::f16, Expand); 328 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 329 setOperationAction(ISD::SELECT, MVT::f16, Custom); 330 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 331 for (auto Op : FPOpToExpand) 332 setOperationAction(Op, MVT::f16, Expand); 333 } 334 335 if (Subtarget.hasStdExtF()) { 336 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 337 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 338 for (auto CC : FPCCToExpand) 339 setCondCodeAction(CC, MVT::f32, Expand); 340 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 341 setOperationAction(ISD::SELECT, MVT::f32, Custom); 342 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 343 for (auto Op : FPOpToExpand) 344 setOperationAction(Op, MVT::f32, Expand); 345 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 346 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 347 } 348 349 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 350 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 351 352 if (Subtarget.hasStdExtD()) { 353 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 354 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 355 for (auto CC : FPCCToExpand) 356 setCondCodeAction(CC, MVT::f64, Expand); 357 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 358 setOperationAction(ISD::SELECT, MVT::f64, Custom); 359 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 360 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 361 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 362 for (auto Op : FPOpToExpand) 363 setOperationAction(Op, MVT::f64, Expand); 364 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 365 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 366 } 367 368 if (Subtarget.is64Bit()) { 369 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 370 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 371 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 372 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 373 } 374 375 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 376 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 377 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 378 setOperationAction(ISD::JumpTable, XLenVT, Custom); 379 380 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 381 382 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 383 // Unfortunately this can't be determined just from the ISA naming string. 384 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 385 Subtarget.is64Bit() ? Legal : Custom); 386 387 setOperationAction(ISD::TRAP, MVT::Other, Legal); 388 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 389 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 390 391 if (Subtarget.hasStdExtA()) { 392 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 393 setMinCmpXchgSizeInBits(32); 394 } else { 395 setMaxAtomicSizeInBitsSupported(0); 396 } 397 398 setBooleanContents(ZeroOrOneBooleanContent); 399 400 if (Subtarget.hasStdExtV()) { 401 setBooleanVectorContents(ZeroOrOneBooleanContent); 402 403 setOperationAction(ISD::VSCALE, XLenVT, Custom); 404 405 // RVV intrinsics may have illegal operands. 406 // We also need to custom legalize vmv.x.s. 407 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); 408 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 409 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 410 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 411 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 412 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); 413 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 414 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 415 416 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 417 418 if (!Subtarget.is64Bit()) { 419 // We must custom-lower certain vXi64 operations on RV32 due to the vector 420 // element type being illegal. 421 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); 422 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); 423 424 setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); 425 setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); 426 setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); 427 setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); 428 setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); 429 setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); 430 setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); 431 setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); 432 } 433 434 for (MVT VT : BoolVecVTs) { 435 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 436 437 // Mask VTs are custom-expanded into a series of standard nodes 438 setOperationAction(ISD::TRUNCATE, VT, Custom); 439 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 440 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 441 442 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 443 444 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 445 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 446 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 447 448 // Expand all extending loads to types larger than this, and truncating 449 // stores from types larger than this. 450 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { 451 setTruncStoreAction(OtherVT, VT, Expand); 452 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 453 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); 454 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); 455 } 456 } 457 458 for (MVT VT : IntVecVTs) { 459 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 460 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); 461 462 setOperationAction(ISD::SMIN, VT, Legal); 463 setOperationAction(ISD::SMAX, VT, Legal); 464 setOperationAction(ISD::UMIN, VT, Legal); 465 setOperationAction(ISD::UMAX, VT, Legal); 466 467 setOperationAction(ISD::ROTL, VT, Expand); 468 setOperationAction(ISD::ROTR, VT, Expand); 469 470 // Custom-lower extensions and truncations from/to mask types. 471 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 472 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 473 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 474 475 // RVV has native int->float & float->int conversions where the 476 // element type sizes are within one power-of-two of each other. Any 477 // wider distances between type sizes have to be lowered as sequences 478 // which progressively narrow the gap in stages. 479 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 480 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 481 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 482 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 483 484 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" 485 // nodes which truncate by one power of two at a time. 486 setOperationAction(ISD::TRUNCATE, VT, Custom); 487 488 // Custom-lower insert/extract operations to simplify patterns. 489 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 491 492 // Custom-lower reduction operations to set up the corresponding custom 493 // nodes' operands. 494 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 495 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 496 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 497 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 498 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 499 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 500 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 501 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 502 503 setOperationAction(ISD::MLOAD, VT, Custom); 504 setOperationAction(ISD::MSTORE, VT, Custom); 505 setOperationAction(ISD::MGATHER, VT, Custom); 506 setOperationAction(ISD::MSCATTER, VT, Custom); 507 508 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 509 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 510 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 511 512 setOperationAction(ISD::STEP_VECTOR, VT, Custom); 513 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); 514 515 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { 516 setTruncStoreAction(VT, OtherVT, Expand); 517 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 518 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); 519 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); 520 } 521 } 522 523 // Expand various CCs to best match the RVV ISA, which natively supports UNE 524 // but no other unordered comparisons, and supports all ordered comparisons 525 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization 526 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), 527 // and we pattern-match those back to the "original", swapping operands once 528 // more. This way we catch both operations and both "vf" and "fv" forms with 529 // fewer patterns. 530 ISD::CondCode VFPCCToExpand[] = { 531 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 532 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, 533 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, 534 }; 535 536 // Sets common operation actions on RVV floating-point vector types. 537 const auto SetCommonVFPActions = [&](MVT VT) { 538 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 539 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type 540 // sizes are within one power-of-two of each other. Therefore conversions 541 // between vXf16 and vXf64 must be lowered as sequences which convert via 542 // vXf32. 543 setOperationAction(ISD::FP_ROUND, VT, Custom); 544 setOperationAction(ISD::FP_EXTEND, VT, Custom); 545 // Custom-lower insert/extract operations to simplify patterns. 546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 547 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 548 // Expand various condition codes (explained above). 549 for (auto CC : VFPCCToExpand) 550 setCondCodeAction(CC, VT, Expand); 551 552 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 553 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 554 setOperationAction(ISD::FCOPYSIGN, VT, Legal); 555 556 setOperationAction(ISD::MLOAD, VT, Custom); 557 setOperationAction(ISD::MSTORE, VT, Custom); 558 setOperationAction(ISD::MGATHER, VT, Custom); 559 setOperationAction(ISD::MSCATTER, VT, Custom); 560 561 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 562 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 563 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 564 565 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); 566 }; 567 568 // Sets common extload/truncstore actions on RVV floating-point vector 569 // types. 570 const auto SetCommonVFPExtLoadTruncStoreActions = 571 [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) { 572 for (auto SmallVT : SmallerVTs) { 573 setTruncStoreAction(VT, SmallVT, Expand); 574 setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand); 575 } 576 }; 577 578 if (Subtarget.hasStdExtZfh()) 579 for (MVT VT : F16VecVTs) 580 SetCommonVFPActions(VT); 581 582 for (MVT VT : F32VecVTs) { 583 if (Subtarget.hasStdExtF()) 584 SetCommonVFPActions(VT); 585 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); 586 } 587 588 for (MVT VT : F64VecVTs) { 589 if (Subtarget.hasStdExtD()) 590 SetCommonVFPActions(VT); 591 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); 592 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs); 593 } 594 595 if (Subtarget.useRVVForFixedLengthVectors()) { 596 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 597 if (!useRVVForFixedLengthVectorVT(VT)) 598 continue; 599 600 // By default everything must be expanded. 601 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 602 setOperationAction(Op, VT, Expand); 603 for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) { 604 setTruncStoreAction(VT, OtherVT, Expand); 605 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 606 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); 607 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); 608 } 609 610 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 611 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 612 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 613 614 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 615 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 616 617 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 618 619 setOperationAction(ISD::LOAD, VT, Custom); 620 setOperationAction(ISD::STORE, VT, Custom); 621 622 setOperationAction(ISD::SETCC, VT, Custom); 623 624 setOperationAction(ISD::TRUNCATE, VT, Custom); 625 626 setOperationAction(ISD::BITCAST, VT, Custom); 627 628 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 629 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 630 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 631 632 // Operations below are different for between masks and other vectors. 633 if (VT.getVectorElementType() == MVT::i1) { 634 setOperationAction(ISD::AND, VT, Custom); 635 setOperationAction(ISD::OR, VT, Custom); 636 setOperationAction(ISD::XOR, VT, Custom); 637 continue; 638 } 639 640 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 641 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 642 643 setOperationAction(ISD::MLOAD, VT, Custom); 644 setOperationAction(ISD::MSTORE, VT, Custom); 645 setOperationAction(ISD::MGATHER, VT, Custom); 646 setOperationAction(ISD::MSCATTER, VT, Custom); 647 setOperationAction(ISD::ADD, VT, Custom); 648 setOperationAction(ISD::MUL, VT, Custom); 649 setOperationAction(ISD::SUB, VT, Custom); 650 setOperationAction(ISD::AND, VT, Custom); 651 setOperationAction(ISD::OR, VT, Custom); 652 setOperationAction(ISD::XOR, VT, Custom); 653 setOperationAction(ISD::SDIV, VT, Custom); 654 setOperationAction(ISD::SREM, VT, Custom); 655 setOperationAction(ISD::UDIV, VT, Custom); 656 setOperationAction(ISD::UREM, VT, Custom); 657 setOperationAction(ISD::SHL, VT, Custom); 658 setOperationAction(ISD::SRA, VT, Custom); 659 setOperationAction(ISD::SRL, VT, Custom); 660 661 setOperationAction(ISD::SMIN, VT, Custom); 662 setOperationAction(ISD::SMAX, VT, Custom); 663 setOperationAction(ISD::UMIN, VT, Custom); 664 setOperationAction(ISD::UMAX, VT, Custom); 665 setOperationAction(ISD::ABS, VT, Custom); 666 667 setOperationAction(ISD::MULHS, VT, Custom); 668 setOperationAction(ISD::MULHU, VT, Custom); 669 670 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 671 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 672 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 673 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 674 675 setOperationAction(ISD::VSELECT, VT, Custom); 676 677 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 678 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 679 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 680 681 // Custom-lower reduction operations to set up the corresponding custom 682 // nodes' operands. 683 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 684 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 685 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 686 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 687 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 688 } 689 690 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { 691 if (!useRVVForFixedLengthVectorVT(VT)) 692 continue; 693 694 // By default everything must be expanded. 695 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 696 setOperationAction(Op, VT, Expand); 697 for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { 698 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 699 setTruncStoreAction(VT, OtherVT, Expand); 700 } 701 702 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 703 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 704 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 705 706 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 707 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 708 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 709 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 710 711 setOperationAction(ISD::LOAD, VT, Custom); 712 setOperationAction(ISD::STORE, VT, Custom); 713 setOperationAction(ISD::MLOAD, VT, Custom); 714 setOperationAction(ISD::MSTORE, VT, Custom); 715 setOperationAction(ISD::MGATHER, VT, Custom); 716 setOperationAction(ISD::MSCATTER, VT, Custom); 717 setOperationAction(ISD::FADD, VT, Custom); 718 setOperationAction(ISD::FSUB, VT, Custom); 719 setOperationAction(ISD::FMUL, VT, Custom); 720 setOperationAction(ISD::FDIV, VT, Custom); 721 setOperationAction(ISD::FNEG, VT, Custom); 722 setOperationAction(ISD::FABS, VT, Custom); 723 setOperationAction(ISD::FCOPYSIGN, VT, Custom); 724 setOperationAction(ISD::FSQRT, VT, Custom); 725 setOperationAction(ISD::FMA, VT, Custom); 726 727 setOperationAction(ISD::FP_ROUND, VT, Custom); 728 setOperationAction(ISD::FP_EXTEND, VT, Custom); 729 730 for (auto CC : VFPCCToExpand) 731 setCondCodeAction(CC, VT, Expand); 732 733 setOperationAction(ISD::VSELECT, VT, Custom); 734 735 setOperationAction(ISD::BITCAST, VT, Custom); 736 737 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 738 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 739 } 740 741 // Custom-legalize bitcasts from fixed-length vectors to scalar types. 742 setOperationAction(ISD::BITCAST, MVT::i8, Custom); 743 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 744 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 745 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 746 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 747 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 748 setOperationAction(ISD::BITCAST, MVT::f64, Custom); 749 } 750 } 751 752 // Function alignments. 753 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 754 setMinFunctionAlignment(FunctionAlignment); 755 setPrefFunctionAlignment(FunctionAlignment); 756 757 setMinimumJumpTableEntries(5); 758 759 // Jumps are expensive, compared to logic 760 setJumpIsExpensive(); 761 762 // We can use any register for comparisons 763 setHasMultipleConditionRegisters(); 764 765 if (Subtarget.hasStdExtZbp()) { 766 setTargetDAGCombine(ISD::OR); 767 } 768 if (Subtarget.hasStdExtV()) { 769 setTargetDAGCombine(ISD::FCOPYSIGN); 770 setTargetDAGCombine(ISD::MGATHER); 771 setTargetDAGCombine(ISD::MSCATTER); 772 } 773 } 774 775 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, 776 LLVMContext &Context, 777 EVT VT) const { 778 if (!VT.isVector()) 779 return getPointerTy(DL); 780 if (Subtarget.hasStdExtV() && 781 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) 782 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); 783 return VT.changeVectorElementTypeToInteger(); 784 } 785 786 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 787 const CallInst &I, 788 MachineFunction &MF, 789 unsigned Intrinsic) const { 790 switch (Intrinsic) { 791 default: 792 return false; 793 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 794 case Intrinsic::riscv_masked_atomicrmw_add_i32: 795 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 796 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 797 case Intrinsic::riscv_masked_atomicrmw_max_i32: 798 case Intrinsic::riscv_masked_atomicrmw_min_i32: 799 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 800 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 801 case Intrinsic::riscv_masked_cmpxchg_i32: 802 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 803 Info.opc = ISD::INTRINSIC_W_CHAIN; 804 Info.memVT = MVT::getVT(PtrTy->getElementType()); 805 Info.ptrVal = I.getArgOperand(0); 806 Info.offset = 0; 807 Info.align = Align(4); 808 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 809 MachineMemOperand::MOVolatile; 810 return true; 811 } 812 } 813 814 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 815 const AddrMode &AM, Type *Ty, 816 unsigned AS, 817 Instruction *I) const { 818 // No global is ever allowed as a base. 819 if (AM.BaseGV) 820 return false; 821 822 // Require a 12-bit signed offset. 823 if (!isInt<12>(AM.BaseOffs)) 824 return false; 825 826 switch (AM.Scale) { 827 case 0: // "r+i" or just "i", depending on HasBaseReg. 828 break; 829 case 1: 830 if (!AM.HasBaseReg) // allow "r+i". 831 break; 832 return false; // disallow "r+r" or "r+r+i". 833 default: 834 return false; 835 } 836 837 return true; 838 } 839 840 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 841 return isInt<12>(Imm); 842 } 843 844 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 845 return isInt<12>(Imm); 846 } 847 848 // On RV32, 64-bit integers are split into their high and low parts and held 849 // in two different registers, so the trunc is free since the low register can 850 // just be used. 851 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 852 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 853 return false; 854 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 855 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 856 return (SrcBits == 64 && DestBits == 32); 857 } 858 859 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 860 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 861 !SrcVT.isInteger() || !DstVT.isInteger()) 862 return false; 863 unsigned SrcBits = SrcVT.getSizeInBits(); 864 unsigned DestBits = DstVT.getSizeInBits(); 865 return (SrcBits == 64 && DestBits == 32); 866 } 867 868 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 869 // Zexts are free if they can be combined with a load. 870 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 871 EVT MemVT = LD->getMemoryVT(); 872 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 873 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 874 (LD->getExtensionType() == ISD::NON_EXTLOAD || 875 LD->getExtensionType() == ISD::ZEXTLOAD)) 876 return true; 877 } 878 879 return TargetLowering::isZExtFree(Val, VT2); 880 } 881 882 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 883 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 884 } 885 886 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 887 return Subtarget.hasStdExtZbb(); 888 } 889 890 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 891 return Subtarget.hasStdExtZbb(); 892 } 893 894 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 895 bool ForCodeSize) const { 896 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 897 return false; 898 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 899 return false; 900 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 901 return false; 902 if (Imm.isNegZero()) 903 return false; 904 return Imm.isZero(); 905 } 906 907 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 908 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 909 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 910 (VT == MVT::f64 && Subtarget.hasStdExtD()); 911 } 912 913 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 914 CallingConv::ID CC, 915 EVT VT) const { 916 // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still 917 // end up using a GPR but that will be decided based on ABI. 918 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) 919 return MVT::f32; 920 921 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 922 } 923 924 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 925 CallingConv::ID CC, 926 EVT VT) const { 927 // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still 928 // end up using a GPR but that will be decided based on ABI. 929 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) 930 return 1; 931 932 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 933 } 934 935 // Changes the condition code and swaps operands if necessary, so the SetCC 936 // operation matches one of the comparisons supported directly by branches 937 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare 938 // with 1/-1. 939 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, 940 ISD::CondCode &CC, SelectionDAG &DAG) { 941 // Convert X > -1 to X >= 0. 942 if (CC == ISD::SETGT && isAllOnesConstant(RHS)) { 943 RHS = DAG.getConstant(0, DL, RHS.getValueType()); 944 CC = ISD::SETGE; 945 return; 946 } 947 // Convert X < 1 to 0 >= X. 948 if (CC == ISD::SETLT && isOneConstant(RHS)) { 949 RHS = LHS; 950 LHS = DAG.getConstant(0, DL, RHS.getValueType()); 951 CC = ISD::SETGE; 952 return; 953 } 954 955 switch (CC) { 956 default: 957 break; 958 case ISD::SETGT: 959 case ISD::SETLE: 960 case ISD::SETUGT: 961 case ISD::SETULE: 962 CC = ISD::getSetCCSwappedOperands(CC); 963 std::swap(LHS, RHS); 964 break; 965 } 966 } 967 968 // Return the RISC-V branch opcode that matches the given DAG integer 969 // condition code. The CondCode must be one of those supported by the RISC-V 970 // ISA (see translateSetCCForBranch). 971 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 972 switch (CC) { 973 default: 974 llvm_unreachable("Unsupported CondCode"); 975 case ISD::SETEQ: 976 return RISCV::BEQ; 977 case ISD::SETNE: 978 return RISCV::BNE; 979 case ISD::SETLT: 980 return RISCV::BLT; 981 case ISD::SETGE: 982 return RISCV::BGE; 983 case ISD::SETULT: 984 return RISCV::BLTU; 985 case ISD::SETUGE: 986 return RISCV::BGEU; 987 } 988 } 989 990 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) { 991 assert(VT.isScalableVector() && "Expecting a scalable vector type"); 992 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); 993 if (VT.getVectorElementType() == MVT::i1) 994 KnownSize *= 8; 995 996 switch (KnownSize) { 997 default: 998 llvm_unreachable("Invalid LMUL."); 999 case 8: 1000 return RISCVVLMUL::LMUL_F8; 1001 case 16: 1002 return RISCVVLMUL::LMUL_F4; 1003 case 32: 1004 return RISCVVLMUL::LMUL_F2; 1005 case 64: 1006 return RISCVVLMUL::LMUL_1; 1007 case 128: 1008 return RISCVVLMUL::LMUL_2; 1009 case 256: 1010 return RISCVVLMUL::LMUL_4; 1011 case 512: 1012 return RISCVVLMUL::LMUL_8; 1013 } 1014 } 1015 1016 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) { 1017 switch (LMul) { 1018 default: 1019 llvm_unreachable("Invalid LMUL."); 1020 case RISCVVLMUL::LMUL_F8: 1021 case RISCVVLMUL::LMUL_F4: 1022 case RISCVVLMUL::LMUL_F2: 1023 case RISCVVLMUL::LMUL_1: 1024 return RISCV::VRRegClassID; 1025 case RISCVVLMUL::LMUL_2: 1026 return RISCV::VRM2RegClassID; 1027 case RISCVVLMUL::LMUL_4: 1028 return RISCV::VRM4RegClassID; 1029 case RISCVVLMUL::LMUL_8: 1030 return RISCV::VRM8RegClassID; 1031 } 1032 } 1033 1034 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { 1035 RISCVVLMUL LMUL = getLMUL(VT); 1036 if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 || 1037 LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) { 1038 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 1039 "Unexpected subreg numbering"); 1040 return RISCV::sub_vrm1_0 + Index; 1041 } 1042 if (LMUL == RISCVVLMUL::LMUL_2) { 1043 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 1044 "Unexpected subreg numbering"); 1045 return RISCV::sub_vrm2_0 + Index; 1046 } 1047 if (LMUL == RISCVVLMUL::LMUL_4) { 1048 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 1049 "Unexpected subreg numbering"); 1050 return RISCV::sub_vrm4_0 + Index; 1051 } 1052 llvm_unreachable("Invalid vector type."); 1053 } 1054 1055 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { 1056 if (VT.getVectorElementType() == MVT::i1) 1057 return RISCV::VRRegClassID; 1058 return getRegClassIDForLMUL(getLMUL(VT)); 1059 } 1060 1061 // Attempt to decompose a subvector insert/extract between VecVT and 1062 // SubVecVT via subregister indices. Returns the subregister index that 1063 // can perform the subvector insert/extract with the given element index, as 1064 // well as the index corresponding to any leftover subvectors that must be 1065 // further inserted/extracted within the register class for SubVecVT. 1066 std::pair<unsigned, unsigned> 1067 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 1068 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, 1069 const RISCVRegisterInfo *TRI) { 1070 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && 1071 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && 1072 RISCV::VRM2RegClassID > RISCV::VRRegClassID), 1073 "Register classes not ordered"); 1074 unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); 1075 unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); 1076 // Try to compose a subregister index that takes us from the incoming 1077 // LMUL>1 register class down to the outgoing one. At each step we half 1078 // the LMUL: 1079 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 1080 // Note that this is not guaranteed to find a subregister index, such as 1081 // when we are extracting from one VR type to another. 1082 unsigned SubRegIdx = RISCV::NoSubRegister; 1083 for (const unsigned RCID : 1084 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) 1085 if (VecRegClassID > RCID && SubRegClassID <= RCID) { 1086 VecVT = VecVT.getHalfNumVectorElementsVT(); 1087 bool IsHi = 1088 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); 1089 SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, 1090 getSubregIndexByMVT(VecVT, IsHi)); 1091 if (IsHi) 1092 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); 1093 } 1094 return {SubRegIdx, InsertExtractIdx}; 1095 } 1096 1097 // Return the largest legal scalable vector type that matches VT's element type. 1098 MVT RISCVTargetLowering::getContainerForFixedLengthVector( 1099 const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget) { 1100 assert(VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && 1101 "Expected legal fixed length vector!"); 1102 1103 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 1104 assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!"); 1105 1106 MVT EltVT = VT.getVectorElementType(); 1107 switch (EltVT.SimpleTy) { 1108 default: 1109 llvm_unreachable("unexpected element type for RVV container"); 1110 case MVT::i1: { 1111 // Masks are calculated assuming 8-bit elements since that's when we need 1112 // the most elements. 1113 unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8; 1114 return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock); 1115 } 1116 case MVT::i8: 1117 case MVT::i16: 1118 case MVT::i32: 1119 case MVT::i64: 1120 case MVT::f16: 1121 case MVT::f32: 1122 case MVT::f64: { 1123 unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits(); 1124 return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock); 1125 } 1126 } 1127 } 1128 1129 MVT RISCVTargetLowering::getContainerForFixedLengthVector( 1130 SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) { 1131 return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, 1132 Subtarget); 1133 } 1134 1135 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { 1136 return getContainerForFixedLengthVector(*this, VT, getSubtarget()); 1137 } 1138 1139 // Grow V to consume an entire RVV register. 1140 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1141 const RISCVSubtarget &Subtarget) { 1142 assert(VT.isScalableVector() && 1143 "Expected to convert into a scalable vector!"); 1144 assert(V.getValueType().isFixedLengthVector() && 1145 "Expected a fixed length vector operand!"); 1146 SDLoc DL(V); 1147 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1148 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); 1149 } 1150 1151 // Shrink V so it's just big enough to maintain a VT's worth of data. 1152 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1153 const RISCVSubtarget &Subtarget) { 1154 assert(VT.isFixedLengthVector() && 1155 "Expected to convert into a fixed length vector!"); 1156 assert(V.getValueType().isScalableVector() && 1157 "Expected a scalable vector operand!"); 1158 SDLoc DL(V); 1159 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1160 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); 1161 } 1162 1163 // Gets the two common "VL" operands: an all-ones mask and the vector length. 1164 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is 1165 // the vector type that it is contained in. 1166 static std::pair<SDValue, SDValue> 1167 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, 1168 const RISCVSubtarget &Subtarget) { 1169 assert(ContainerVT.isScalableVector() && "Expecting scalable container type"); 1170 MVT XLenVT = Subtarget.getXLenVT(); 1171 SDValue VL = VecVT.isFixedLengthVector() 1172 ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) 1173 : DAG.getRegister(RISCV::X0, XLenVT); 1174 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 1175 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 1176 return {Mask, VL}; 1177 } 1178 1179 // As above but assuming the given type is a scalable vector type. 1180 static std::pair<SDValue, SDValue> 1181 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, 1182 const RISCVSubtarget &Subtarget) { 1183 assert(VecVT.isScalableVector() && "Expecting a scalable vector"); 1184 return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); 1185 } 1186 1187 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few 1188 // of either is (currently) supported. This can get us into an infinite loop 1189 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR 1190 // as a ..., etc. 1191 // Until either (or both) of these can reliably lower any node, reporting that 1192 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks 1193 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, 1194 // which is not desirable. 1195 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( 1196 EVT VT, unsigned DefinedValues) const { 1197 return false; 1198 } 1199 1200 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 1201 // Only splats are currently supported. 1202 if (ShuffleVectorSDNode::isSplatMask(M.data(), VT)) 1203 return true; 1204 1205 return false; 1206 } 1207 1208 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 1209 const RISCVSubtarget &Subtarget) { 1210 MVT VT = Op.getSimpleValueType(); 1211 assert(VT.isFixedLengthVector() && "Unexpected vector!"); 1212 1213 MVT ContainerVT = 1214 RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget); 1215 1216 SDLoc DL(Op); 1217 SDValue Mask, VL; 1218 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1219 1220 MVT XLenVT = Subtarget.getXLenVT(); 1221 unsigned NumElts = Op.getNumOperands(); 1222 1223 if (VT.getVectorElementType() == MVT::i1) { 1224 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 1225 SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); 1226 return convertFromScalableVector(VT, VMClr, DAG, Subtarget); 1227 } 1228 1229 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 1230 SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 1231 return convertFromScalableVector(VT, VMSet, DAG, Subtarget); 1232 } 1233 1234 // Lower constant mask BUILD_VECTORs via an integer vector type, in 1235 // scalar integer chunks whose bit-width depends on the number of mask 1236 // bits and XLEN. 1237 // First, determine the most appropriate scalar integer type to use. This 1238 // is at most XLenVT, but may be shrunk to a smaller vector element type 1239 // according to the size of the final vector - use i8 chunks rather than 1240 // XLenVT if we're producing a v8i1. This results in more consistent 1241 // codegen across RV32 and RV64. 1242 // If we have to use more than one INSERT_VECTOR_ELT then this optimization 1243 // is likely to increase code size; avoid peforming it in such a case. 1244 unsigned NumViaIntegerBits = 1245 std::min(std::max(NumElts, 8u), Subtarget.getXLen()); 1246 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && 1247 (!DAG.shouldOptForSize() || NumElts <= NumViaIntegerBits)) { 1248 // Now we can create our integer vector type. Note that it may be larger 1249 // than the resulting mask type: v4i1 would use v1i8 as its integer type. 1250 MVT IntegerViaVecVT = 1251 MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits), 1252 divideCeil(NumElts, NumViaIntegerBits)); 1253 1254 uint64_t Bits = 0; 1255 unsigned BitPos = 0, IntegerEltIdx = 0; 1256 SDValue Vec = DAG.getUNDEF(IntegerViaVecVT); 1257 1258 for (unsigned I = 0; I < NumElts; I++, BitPos++) { 1259 // Once we accumulate enough bits to fill our scalar type, insert into 1260 // our vector and clear our accumulated data. 1261 if (I != 0 && I % NumViaIntegerBits == 0) { 1262 if (NumViaIntegerBits <= 32) 1263 Bits = SignExtend64(Bits, 32); 1264 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); 1265 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, 1266 Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT)); 1267 Bits = 0; 1268 BitPos = 0; 1269 IntegerEltIdx++; 1270 } 1271 SDValue V = Op.getOperand(I); 1272 bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue(); 1273 Bits |= ((uint64_t)BitValue << BitPos); 1274 } 1275 1276 // Insert the (remaining) scalar value into position in our integer 1277 // vector type. 1278 if (NumViaIntegerBits <= 32) 1279 Bits = SignExtend64(Bits, 32); 1280 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); 1281 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt, 1282 DAG.getConstant(IntegerEltIdx, DL, XLenVT)); 1283 1284 if (NumElts < NumViaIntegerBits) { 1285 // If we're producing a smaller vector than our minimum legal integer 1286 // type, bitcast to the equivalent (known-legal) mask type, and extract 1287 // our final mask. 1288 assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type"); 1289 Vec = DAG.getBitcast(MVT::v8i1, Vec); 1290 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec, 1291 DAG.getConstant(0, DL, XLenVT)); 1292 } else { 1293 // Else we must have produced an integer type with the same size as the 1294 // mask type; bitcast for the final result. 1295 assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits()); 1296 Vec = DAG.getBitcast(VT, Vec); 1297 } 1298 1299 return Vec; 1300 } 1301 1302 return SDValue(); 1303 } 1304 1305 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 1306 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL 1307 : RISCVISD::VMV_V_X_VL; 1308 Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); 1309 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 1310 } 1311 1312 // Try and match an index sequence, which we can lower directly to the vid 1313 // instruction. An all-undef vector is matched by getSplatValue, above. 1314 if (VT.isInteger()) { 1315 bool IsVID = true; 1316 for (unsigned I = 0; I < NumElts && IsVID; I++) 1317 IsVID &= Op.getOperand(I).isUndef() || 1318 (isa<ConstantSDNode>(Op.getOperand(I)) && 1319 Op.getConstantOperandVal(I) == I); 1320 1321 if (IsVID) { 1322 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); 1323 return convertFromScalableVector(VT, VID, DAG, Subtarget); 1324 } 1325 } 1326 1327 // Attempt to detect "hidden" splats, which only reveal themselves as splats 1328 // when re-interpreted as a vector with a larger element type. For example, 1329 // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1 1330 // could be instead splat as 1331 // v2i32 = build_vector i32 0x00010000, i32 0x00010000 1332 // TODO: This optimization could also work on non-constant splats, but it 1333 // would require bit-manipulation instructions to construct the splat value. 1334 SmallVector<SDValue> Sequence; 1335 unsigned EltBitSize = VT.getScalarSizeInBits(); 1336 const auto *BV = cast<BuildVectorSDNode>(Op); 1337 if (VT.isInteger() && EltBitSize < 64 && 1338 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && 1339 BV->getRepeatedSequence(Sequence) && 1340 (Sequence.size() * EltBitSize) <= 64) { 1341 unsigned SeqLen = Sequence.size(); 1342 MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen); 1343 MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen); 1344 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || 1345 ViaIntVT == MVT::i64) && 1346 "Unexpected sequence type"); 1347 1348 unsigned EltIdx = 0; 1349 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize); 1350 uint64_t SplatValue = 0; 1351 // Construct the amalgamated value which can be splatted as this larger 1352 // vector type. 1353 for (const auto &SeqV : Sequence) { 1354 if (!SeqV.isUndef()) 1355 SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask) 1356 << (EltIdx * EltBitSize)); 1357 EltIdx++; 1358 } 1359 1360 // On RV64, sign-extend from 32 to 64 bits where possible in order to 1361 // achieve better constant materializion. 1362 if (Subtarget.is64Bit() && ViaIntVT == MVT::i32) 1363 SplatValue = SignExtend64(SplatValue, 32); 1364 1365 // Since we can't introduce illegal i64 types at this stage, we can only 1366 // perform an i64 splat on RV32 if it is its own sign-extended value. That 1367 // way we can use RVV instructions to splat. 1368 assert((ViaIntVT.bitsLE(XLenVT) || 1369 (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) && 1370 "Unexpected bitcast sequence"); 1371 if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) { 1372 SDValue ViaVL = 1373 DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT); 1374 MVT ViaContainerVT = 1375 RISCVTargetLowering::getContainerForFixedLengthVector(DAG, ViaVecVT, 1376 Subtarget); 1377 SDValue Splat = 1378 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT, 1379 DAG.getConstant(SplatValue, DL, XLenVT), ViaVL); 1380 Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget); 1381 return DAG.getBitcast(VT, Splat); 1382 } 1383 } 1384 1385 // Try and optimize BUILD_VECTORs with "dominant values" - these are values 1386 // which constitute a large proportion of the elements. In such cases we can 1387 // splat a vector with the dominant element and make up the shortfall with 1388 // INSERT_VECTOR_ELTs. 1389 // Note that this includes vectors of 2 elements by association. The 1390 // upper-most element is the "dominant" one, allowing us to use a splat to 1391 // "insert" the upper element, and an insert of the lower element at position 1392 // 0, which improves codegen. 1393 SDValue DominantValue; 1394 unsigned MostCommonCount = 0; 1395 DenseMap<SDValue, unsigned> ValueCounts; 1396 unsigned NumUndefElts = 1397 count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); }); 1398 1399 for (SDValue V : Op->op_values()) { 1400 if (V.isUndef()) 1401 continue; 1402 1403 ValueCounts.insert(std::make_pair(V, 0)); 1404 unsigned &Count = ValueCounts[V]; 1405 1406 // Is this value dominant? In case of a tie, prefer the highest element as 1407 // it's cheaper to insert near the beginning of a vector than it is at the 1408 // end. 1409 if (++Count >= MostCommonCount) { 1410 DominantValue = V; 1411 MostCommonCount = Count; 1412 } 1413 } 1414 1415 assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR"); 1416 unsigned NumDefElts = NumElts - NumUndefElts; 1417 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2; 1418 1419 // Don't perform this optimization when optimizing for size, since 1420 // materializing elements and inserting them tends to cause code bloat. 1421 if (!DAG.shouldOptForSize() && 1422 ((MostCommonCount > DominantValueCountThreshold) || 1423 (ValueCounts.size() <= Log2_32(NumDefElts)))) { 1424 // Start by splatting the most common element. 1425 SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue); 1426 1427 DenseSet<SDValue> Processed{DominantValue}; 1428 MVT SelMaskTy = VT.changeVectorElementType(MVT::i1); 1429 for (const auto &OpIdx : enumerate(Op->ops())) { 1430 const SDValue &V = OpIdx.value(); 1431 if (V.isUndef() || !Processed.insert(V).second) 1432 continue; 1433 if (ValueCounts[V] == 1) { 1434 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, 1435 DAG.getConstant(OpIdx.index(), DL, XLenVT)); 1436 } else { 1437 // Blend in all instances of this value using a VSELECT, using a 1438 // mask where each bit signals whether that element is the one 1439 // we're after. 1440 SmallVector<SDValue> Ops; 1441 transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) { 1442 return DAG.getConstant(V == V1, DL, XLenVT); 1443 }); 1444 Vec = DAG.getNode(ISD::VSELECT, DL, VT, 1445 DAG.getBuildVector(SelMaskTy, DL, Ops), 1446 DAG.getSplatBuildVector(VT, DL, V), Vec); 1447 } 1448 } 1449 1450 return Vec; 1451 } 1452 1453 return SDValue(); 1454 } 1455 1456 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 1457 const RISCVSubtarget &Subtarget) { 1458 SDValue V1 = Op.getOperand(0); 1459 SDValue V2 = Op.getOperand(1); 1460 SDLoc DL(Op); 1461 MVT XLenVT = Subtarget.getXLenVT(); 1462 MVT VT = Op.getSimpleValueType(); 1463 unsigned NumElts = VT.getVectorNumElements(); 1464 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 1465 1466 if (SVN->isSplat()) { 1467 int Lane = SVN->getSplatIndex(); 1468 if (Lane >= 0) { 1469 MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector( 1470 DAG, VT, Subtarget); 1471 1472 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 1473 assert(Lane < (int)NumElts && "Unexpected lane!"); 1474 1475 SDValue Mask, VL; 1476 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1477 SDValue Gather = 1478 DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, 1479 DAG.getConstant(Lane, DL, XLenVT), Mask, VL); 1480 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 1481 } 1482 } 1483 1484 // Detect shuffles which can be re-expressed as vector selects. 1485 SmallVector<SDValue> MaskVals; 1486 // By default we preserve the original operand order, and select LHS as true 1487 // and RHS as false. However, since RVV vector selects may feature splats but 1488 // only on the LHS, we may choose to invert our mask and instead select 1489 // between RHS and LHS. 1490 bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1); 1491 1492 bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) { 1493 int MaskIndex = MaskIdx.value(); 1494 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ SwapOps; 1495 MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT)); 1496 return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts; 1497 }); 1498 1499 if (IsSelect) { 1500 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle"); 1501 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts); 1502 SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals); 1503 return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, SwapOps ? V2 : V1, 1504 SwapOps ? V1 : V2); 1505 } 1506 1507 return SDValue(); 1508 } 1509 1510 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT, 1511 SDLoc DL, SelectionDAG &DAG, 1512 const RISCVSubtarget &Subtarget) { 1513 if (VT.isScalableVector()) 1514 return DAG.getFPExtendOrRound(Op, DL, VT); 1515 assert(VT.isFixedLengthVector() && 1516 "Unexpected value type for RVV FP extend/round lowering"); 1517 SDValue Mask, VL; 1518 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1519 unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType()) 1520 ? RISCVISD::FP_EXTEND_VL 1521 : RISCVISD::FP_ROUND_VL; 1522 return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL); 1523 } 1524 1525 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 1526 SelectionDAG &DAG) const { 1527 switch (Op.getOpcode()) { 1528 default: 1529 report_fatal_error("unimplemented operand"); 1530 case ISD::GlobalAddress: 1531 return lowerGlobalAddress(Op, DAG); 1532 case ISD::BlockAddress: 1533 return lowerBlockAddress(Op, DAG); 1534 case ISD::ConstantPool: 1535 return lowerConstantPool(Op, DAG); 1536 case ISD::JumpTable: 1537 return lowerJumpTable(Op, DAG); 1538 case ISD::GlobalTLSAddress: 1539 return lowerGlobalTLSAddress(Op, DAG); 1540 case ISD::SELECT: 1541 return lowerSELECT(Op, DAG); 1542 case ISD::BRCOND: 1543 return lowerBRCOND(Op, DAG); 1544 case ISD::VASTART: 1545 return lowerVASTART(Op, DAG); 1546 case ISD::FRAMEADDR: 1547 return lowerFRAMEADDR(Op, DAG); 1548 case ISD::RETURNADDR: 1549 return lowerRETURNADDR(Op, DAG); 1550 case ISD::SHL_PARTS: 1551 return lowerShiftLeftParts(Op, DAG); 1552 case ISD::SRA_PARTS: 1553 return lowerShiftRightParts(Op, DAG, true); 1554 case ISD::SRL_PARTS: 1555 return lowerShiftRightParts(Op, DAG, false); 1556 case ISD::BITCAST: { 1557 SDLoc DL(Op); 1558 EVT VT = Op.getValueType(); 1559 SDValue Op0 = Op.getOperand(0); 1560 EVT Op0VT = Op0.getValueType(); 1561 MVT XLenVT = Subtarget.getXLenVT(); 1562 if (VT.isFixedLengthVector()) { 1563 // We can handle fixed length vector bitcasts with a simple replacement 1564 // in isel. 1565 if (Op0VT.isFixedLengthVector()) 1566 return Op; 1567 // When bitcasting from scalar to fixed-length vector, insert the scalar 1568 // into a one-element vector of the result type, and perform a vector 1569 // bitcast. 1570 if (!Op0VT.isVector()) { 1571 auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1); 1572 return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT, 1573 DAG.getUNDEF(BVT), Op0, 1574 DAG.getConstant(0, DL, XLenVT))); 1575 } 1576 return SDValue(); 1577 } 1578 // Custom-legalize bitcasts from fixed-length vector types to scalar types 1579 // thus: bitcast the vector to a one-element vector type whose element type 1580 // is the same as the result type, and extract the first element. 1581 if (!VT.isVector() && Op0VT.isFixedLengthVector()) { 1582 LLVMContext &Context = *DAG.getContext(); 1583 SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0); 1584 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, 1585 DAG.getConstant(0, DL, XLenVT)); 1586 } 1587 if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) { 1588 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0); 1589 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 1590 return FPConv; 1591 } 1592 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() && 1593 Subtarget.hasStdExtF()) { 1594 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 1595 SDValue FPConv = 1596 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 1597 return FPConv; 1598 } 1599 return SDValue(); 1600 } 1601 case ISD::INTRINSIC_WO_CHAIN: 1602 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1603 case ISD::INTRINSIC_W_CHAIN: 1604 return LowerINTRINSIC_W_CHAIN(Op, DAG); 1605 case ISD::BSWAP: 1606 case ISD::BITREVERSE: { 1607 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 1608 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 1609 MVT VT = Op.getSimpleValueType(); 1610 SDLoc DL(Op); 1611 // Start with the maximum immediate value which is the bitwidth - 1. 1612 unsigned Imm = VT.getSizeInBits() - 1; 1613 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 1614 if (Op.getOpcode() == ISD::BSWAP) 1615 Imm &= ~0x7U; 1616 return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), 1617 DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); 1618 } 1619 case ISD::FSHL: 1620 case ISD::FSHR: { 1621 MVT VT = Op.getSimpleValueType(); 1622 assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization"); 1623 SDLoc DL(Op); 1624 if (Op.getOperand(2).getOpcode() == ISD::Constant) 1625 return Op; 1626 // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only 1627 // use log(XLen) bits. Mask the shift amount accordingly. 1628 unsigned ShAmtWidth = Subtarget.getXLen() - 1; 1629 SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), 1630 DAG.getConstant(ShAmtWidth, DL, VT)); 1631 unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR; 1632 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt); 1633 } 1634 case ISD::TRUNCATE: { 1635 SDLoc DL(Op); 1636 MVT VT = Op.getSimpleValueType(); 1637 // Only custom-lower vector truncates 1638 if (!VT.isVector()) 1639 return Op; 1640 1641 // Truncates to mask types are handled differently 1642 if (VT.getVectorElementType() == MVT::i1) 1643 return lowerVectorMaskTrunc(Op, DAG); 1644 1645 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary 1646 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which 1647 // truncate by one power of two at a time. 1648 MVT DstEltVT = VT.getVectorElementType(); 1649 1650 SDValue Src = Op.getOperand(0); 1651 MVT SrcVT = Src.getSimpleValueType(); 1652 MVT SrcEltVT = SrcVT.getVectorElementType(); 1653 1654 assert(DstEltVT.bitsLT(SrcEltVT) && 1655 isPowerOf2_64(DstEltVT.getSizeInBits()) && 1656 isPowerOf2_64(SrcEltVT.getSizeInBits()) && 1657 "Unexpected vector truncate lowering"); 1658 1659 MVT ContainerVT = SrcVT; 1660 if (SrcVT.isFixedLengthVector()) { 1661 ContainerVT = getContainerForFixedLengthVector(SrcVT); 1662 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 1663 } 1664 1665 SDValue Result = Src; 1666 SDValue Mask, VL; 1667 std::tie(Mask, VL) = 1668 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); 1669 LLVMContext &Context = *DAG.getContext(); 1670 const ElementCount Count = ContainerVT.getVectorElementCount(); 1671 do { 1672 SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); 1673 EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); 1674 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, 1675 Mask, VL); 1676 } while (SrcEltVT != DstEltVT); 1677 1678 if (SrcVT.isFixedLengthVector()) 1679 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 1680 1681 return Result; 1682 } 1683 case ISD::ANY_EXTEND: 1684 case ISD::ZERO_EXTEND: 1685 if (Op.getOperand(0).getValueType().isVector() && 1686 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1687 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); 1688 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); 1689 case ISD::SIGN_EXTEND: 1690 if (Op.getOperand(0).getValueType().isVector() && 1691 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1692 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); 1693 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); 1694 case ISD::SPLAT_VECTOR_PARTS: 1695 return lowerSPLAT_VECTOR_PARTS(Op, DAG); 1696 case ISD::INSERT_VECTOR_ELT: 1697 return lowerINSERT_VECTOR_ELT(Op, DAG); 1698 case ISD::EXTRACT_VECTOR_ELT: 1699 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 1700 case ISD::VSCALE: { 1701 MVT VT = Op.getSimpleValueType(); 1702 SDLoc DL(Op); 1703 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); 1704 // We define our scalable vector types for lmul=1 to use a 64 bit known 1705 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate 1706 // vscale as VLENB / 8. 1707 assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!"); 1708 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, 1709 DAG.getConstant(3, DL, VT)); 1710 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); 1711 } 1712 case ISD::FP_EXTEND: { 1713 // RVV can only do fp_extend to types double the size as the source. We 1714 // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going 1715 // via f32. 1716 SDLoc DL(Op); 1717 MVT VT = Op.getSimpleValueType(); 1718 SDValue Src = Op.getOperand(0); 1719 MVT SrcVT = Src.getSimpleValueType(); 1720 1721 // Prepare any fixed-length vector operands. 1722 MVT ContainerVT = VT; 1723 if (SrcVT.isFixedLengthVector()) { 1724 ContainerVT = getContainerForFixedLengthVector(VT); 1725 MVT SrcContainerVT = 1726 ContainerVT.changeVectorElementType(SrcVT.getVectorElementType()); 1727 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1728 } 1729 1730 if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 || 1731 SrcVT.getVectorElementType() != MVT::f16) { 1732 // For scalable vectors, we only need to close the gap between 1733 // vXf16->vXf64. 1734 if (!VT.isFixedLengthVector()) 1735 return Op; 1736 // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version. 1737 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 1738 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1739 } 1740 1741 MVT InterVT = VT.changeVectorElementType(MVT::f32); 1742 MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32); 1743 SDValue IntermediateExtend = getRVVFPExtendOrRound( 1744 Src, InterVT, InterContainerVT, DL, DAG, Subtarget); 1745 1746 SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT, 1747 DL, DAG, Subtarget); 1748 if (VT.isFixedLengthVector()) 1749 return convertFromScalableVector(VT, Extend, DAG, Subtarget); 1750 return Extend; 1751 } 1752 case ISD::FP_ROUND: { 1753 // RVV can only do fp_round to types half the size as the source. We 1754 // custom-lower f64->f16 rounds via RVV's round-to-odd float 1755 // conversion instruction. 1756 SDLoc DL(Op); 1757 MVT VT = Op.getSimpleValueType(); 1758 SDValue Src = Op.getOperand(0); 1759 MVT SrcVT = Src.getSimpleValueType(); 1760 1761 // Prepare any fixed-length vector operands. 1762 MVT ContainerVT = VT; 1763 if (VT.isFixedLengthVector()) { 1764 MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 1765 ContainerVT = 1766 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 1767 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1768 } 1769 1770 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 || 1771 SrcVT.getVectorElementType() != MVT::f64) { 1772 // For scalable vectors, we only need to close the gap between 1773 // vXf64<->vXf16. 1774 if (!VT.isFixedLengthVector()) 1775 return Op; 1776 // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version. 1777 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 1778 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1779 } 1780 1781 SDValue Mask, VL; 1782 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1783 1784 MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); 1785 SDValue IntermediateRound = 1786 DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL); 1787 SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT, 1788 DL, DAG, Subtarget); 1789 1790 if (VT.isFixedLengthVector()) 1791 return convertFromScalableVector(VT, Round, DAG, Subtarget); 1792 return Round; 1793 } 1794 case ISD::FP_TO_SINT: 1795 case ISD::FP_TO_UINT: 1796 case ISD::SINT_TO_FP: 1797 case ISD::UINT_TO_FP: { 1798 // RVV can only do fp<->int conversions to types half/double the size as 1799 // the source. We custom-lower any conversions that do two hops into 1800 // sequences. 1801 MVT VT = Op.getSimpleValueType(); 1802 if (!VT.isVector()) 1803 return Op; 1804 SDLoc DL(Op); 1805 SDValue Src = Op.getOperand(0); 1806 MVT EltVT = VT.getVectorElementType(); 1807 MVT SrcVT = Src.getSimpleValueType(); 1808 MVT SrcEltVT = SrcVT.getVectorElementType(); 1809 unsigned EltSize = EltVT.getSizeInBits(); 1810 unsigned SrcEltSize = SrcEltVT.getSizeInBits(); 1811 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && 1812 "Unexpected vector element types"); 1813 1814 bool IsInt2FP = SrcEltVT.isInteger(); 1815 // Widening conversions 1816 if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) { 1817 if (IsInt2FP) { 1818 // Do a regular integer sign/zero extension then convert to float. 1819 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()), 1820 VT.getVectorElementCount()); 1821 unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP 1822 ? ISD::ZERO_EXTEND 1823 : ISD::SIGN_EXTEND; 1824 SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); 1825 return DAG.getNode(Op.getOpcode(), DL, VT, Ext); 1826 } 1827 // FP2Int 1828 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering"); 1829 // Do one doubling fp_extend then complete the operation by converting 1830 // to int. 1831 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 1832 SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); 1833 return DAG.getNode(Op.getOpcode(), DL, VT, FExt); 1834 } 1835 1836 // Narrowing conversions 1837 if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) { 1838 if (IsInt2FP) { 1839 // One narrowing int_to_fp, then an fp_round. 1840 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering"); 1841 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 1842 SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); 1843 return DAG.getFPExtendOrRound(Int2FP, DL, VT); 1844 } 1845 // FP2Int 1846 // One narrowing fp_to_int, then truncate the integer. If the float isn't 1847 // representable by the integer, the result is poison. 1848 MVT IVecVT = 1849 MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2), 1850 VT.getVectorElementCount()); 1851 SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); 1852 return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); 1853 } 1854 1855 // Scalable vectors can exit here. Patterns will handle equally-sized 1856 // conversions halving/doubling ones. 1857 if (!VT.isFixedLengthVector()) 1858 return Op; 1859 1860 // For fixed-length vectors we lower to a custom "VL" node. 1861 unsigned RVVOpc = 0; 1862 switch (Op.getOpcode()) { 1863 default: 1864 llvm_unreachable("Impossible opcode"); 1865 case ISD::FP_TO_SINT: 1866 RVVOpc = RISCVISD::FP_TO_SINT_VL; 1867 break; 1868 case ISD::FP_TO_UINT: 1869 RVVOpc = RISCVISD::FP_TO_UINT_VL; 1870 break; 1871 case ISD::SINT_TO_FP: 1872 RVVOpc = RISCVISD::SINT_TO_FP_VL; 1873 break; 1874 case ISD::UINT_TO_FP: 1875 RVVOpc = RISCVISD::UINT_TO_FP_VL; 1876 break; 1877 } 1878 1879 MVT ContainerVT, SrcContainerVT; 1880 // Derive the reference container type from the larger vector type. 1881 if (SrcEltSize > EltSize) { 1882 SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 1883 ContainerVT = 1884 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 1885 } else { 1886 ContainerVT = getContainerForFixedLengthVector(VT); 1887 SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); 1888 } 1889 1890 SDValue Mask, VL; 1891 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1892 1893 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1894 Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); 1895 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1896 } 1897 case ISD::VECREDUCE_ADD: 1898 case ISD::VECREDUCE_UMAX: 1899 case ISD::VECREDUCE_SMAX: 1900 case ISD::VECREDUCE_UMIN: 1901 case ISD::VECREDUCE_SMIN: 1902 return lowerVECREDUCE(Op, DAG); 1903 case ISD::VECREDUCE_AND: 1904 case ISD::VECREDUCE_OR: 1905 case ISD::VECREDUCE_XOR: 1906 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1907 return lowerVectorMaskVECREDUCE(Op, DAG); 1908 return lowerVECREDUCE(Op, DAG); 1909 case ISD::VECREDUCE_FADD: 1910 case ISD::VECREDUCE_SEQ_FADD: 1911 return lowerFPVECREDUCE(Op, DAG); 1912 case ISD::INSERT_SUBVECTOR: 1913 return lowerINSERT_SUBVECTOR(Op, DAG); 1914 case ISD::EXTRACT_SUBVECTOR: 1915 return lowerEXTRACT_SUBVECTOR(Op, DAG); 1916 case ISD::STEP_VECTOR: 1917 return lowerSTEP_VECTOR(Op, DAG); 1918 case ISD::VECTOR_REVERSE: 1919 return lowerVECTOR_REVERSE(Op, DAG); 1920 case ISD::BUILD_VECTOR: 1921 return lowerBUILD_VECTOR(Op, DAG, Subtarget); 1922 case ISD::VECTOR_SHUFFLE: 1923 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 1924 case ISD::CONCAT_VECTORS: { 1925 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is 1926 // better than going through the stack, as the default expansion does. 1927 SDLoc DL(Op); 1928 MVT VT = Op.getSimpleValueType(); 1929 unsigned NumOpElts = 1930 Op.getOperand(0).getSimpleValueType().getVectorMinNumElements(); 1931 SDValue Vec = DAG.getUNDEF(VT); 1932 for (const auto &OpIdx : enumerate(Op->ops())) 1933 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), 1934 DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); 1935 return Vec; 1936 } 1937 case ISD::LOAD: 1938 return lowerFixedLengthVectorLoadToRVV(Op, DAG); 1939 case ISD::STORE: 1940 return lowerFixedLengthVectorStoreToRVV(Op, DAG); 1941 case ISD::MLOAD: 1942 return lowerMLOAD(Op, DAG); 1943 case ISD::MSTORE: 1944 return lowerMSTORE(Op, DAG); 1945 case ISD::SETCC: 1946 return lowerFixedLengthVectorSetccToRVV(Op, DAG); 1947 case ISD::ADD: 1948 return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); 1949 case ISD::SUB: 1950 return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); 1951 case ISD::MUL: 1952 return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); 1953 case ISD::MULHS: 1954 return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); 1955 case ISD::MULHU: 1956 return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); 1957 case ISD::AND: 1958 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, 1959 RISCVISD::AND_VL); 1960 case ISD::OR: 1961 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, 1962 RISCVISD::OR_VL); 1963 case ISD::XOR: 1964 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, 1965 RISCVISD::XOR_VL); 1966 case ISD::SDIV: 1967 return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); 1968 case ISD::SREM: 1969 return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); 1970 case ISD::UDIV: 1971 return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); 1972 case ISD::UREM: 1973 return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); 1974 case ISD::SHL: 1975 return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL); 1976 case ISD::SRA: 1977 return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL); 1978 case ISD::SRL: 1979 return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL); 1980 case ISD::FADD: 1981 return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); 1982 case ISD::FSUB: 1983 return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); 1984 case ISD::FMUL: 1985 return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); 1986 case ISD::FDIV: 1987 return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); 1988 case ISD::FNEG: 1989 return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); 1990 case ISD::FABS: 1991 return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); 1992 case ISD::FSQRT: 1993 return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); 1994 case ISD::FMA: 1995 return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); 1996 case ISD::SMIN: 1997 return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); 1998 case ISD::SMAX: 1999 return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); 2000 case ISD::UMIN: 2001 return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); 2002 case ISD::UMAX: 2003 return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); 2004 case ISD::ABS: 2005 return lowerABS(Op, DAG); 2006 case ISD::VSELECT: 2007 return lowerFixedLengthVectorSelectToRVV(Op, DAG); 2008 case ISD::FCOPYSIGN: 2009 return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); 2010 case ISD::MGATHER: 2011 return lowerMGATHER(Op, DAG); 2012 case ISD::MSCATTER: 2013 return lowerMSCATTER(Op, DAG); 2014 } 2015 } 2016 2017 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 2018 SelectionDAG &DAG, unsigned Flags) { 2019 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 2020 } 2021 2022 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 2023 SelectionDAG &DAG, unsigned Flags) { 2024 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 2025 Flags); 2026 } 2027 2028 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 2029 SelectionDAG &DAG, unsigned Flags) { 2030 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 2031 N->getOffset(), Flags); 2032 } 2033 2034 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 2035 SelectionDAG &DAG, unsigned Flags) { 2036 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 2037 } 2038 2039 template <class NodeTy> 2040 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 2041 bool IsLocal) const { 2042 SDLoc DL(N); 2043 EVT Ty = getPointerTy(DAG.getDataLayout()); 2044 2045 if (isPositionIndependent()) { 2046 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 2047 if (IsLocal) 2048 // Use PC-relative addressing to access the symbol. This generates the 2049 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 2050 // %pcrel_lo(auipc)). 2051 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 2052 2053 // Use PC-relative addressing to access the GOT for this symbol, then load 2054 // the address from the GOT. This generates the pattern (PseudoLA sym), 2055 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 2056 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 2057 } 2058 2059 switch (getTargetMachine().getCodeModel()) { 2060 default: 2061 report_fatal_error("Unsupported code model for lowering"); 2062 case CodeModel::Small: { 2063 // Generate a sequence for accessing addresses within the first 2 GiB of 2064 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 2065 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 2066 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 2067 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 2068 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 2069 } 2070 case CodeModel::Medium: { 2071 // Generate a sequence for accessing addresses within any 2GiB range within 2072 // the address space. This generates the pattern (PseudoLLA sym), which 2073 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 2074 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 2075 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 2076 } 2077 } 2078 } 2079 2080 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 2081 SelectionDAG &DAG) const { 2082 SDLoc DL(Op); 2083 EVT Ty = Op.getValueType(); 2084 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 2085 int64_t Offset = N->getOffset(); 2086 MVT XLenVT = Subtarget.getXLenVT(); 2087 2088 const GlobalValue *GV = N->getGlobal(); 2089 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 2090 SDValue Addr = getAddr(N, DAG, IsLocal); 2091 2092 // In order to maximise the opportunity for common subexpression elimination, 2093 // emit a separate ADD node for the global address offset instead of folding 2094 // it in the global address node. Later peephole optimisations may choose to 2095 // fold it back in when profitable. 2096 if (Offset != 0) 2097 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 2098 DAG.getConstant(Offset, DL, XLenVT)); 2099 return Addr; 2100 } 2101 2102 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 2103 SelectionDAG &DAG) const { 2104 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 2105 2106 return getAddr(N, DAG); 2107 } 2108 2109 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 2110 SelectionDAG &DAG) const { 2111 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 2112 2113 return getAddr(N, DAG); 2114 } 2115 2116 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, 2117 SelectionDAG &DAG) const { 2118 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 2119 2120 return getAddr(N, DAG); 2121 } 2122 2123 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 2124 SelectionDAG &DAG, 2125 bool UseGOT) const { 2126 SDLoc DL(N); 2127 EVT Ty = getPointerTy(DAG.getDataLayout()); 2128 const GlobalValue *GV = N->getGlobal(); 2129 MVT XLenVT = Subtarget.getXLenVT(); 2130 2131 if (UseGOT) { 2132 // Use PC-relative addressing to access the GOT for this TLS symbol, then 2133 // load the address from the GOT and add the thread pointer. This generates 2134 // the pattern (PseudoLA_TLS_IE sym), which expands to 2135 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 2136 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 2137 SDValue Load = 2138 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 2139 2140 // Add the thread pointer. 2141 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 2142 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 2143 } 2144 2145 // Generate a sequence for accessing the address relative to the thread 2146 // pointer, with the appropriate adjustment for the thread pointer offset. 2147 // This generates the pattern 2148 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 2149 SDValue AddrHi = 2150 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 2151 SDValue AddrAdd = 2152 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 2153 SDValue AddrLo = 2154 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 2155 2156 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 2157 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 2158 SDValue MNAdd = SDValue( 2159 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 2160 0); 2161 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 2162 } 2163 2164 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 2165 SelectionDAG &DAG) const { 2166 SDLoc DL(N); 2167 EVT Ty = getPointerTy(DAG.getDataLayout()); 2168 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 2169 const GlobalValue *GV = N->getGlobal(); 2170 2171 // Use a PC-relative addressing mode to access the global dynamic GOT address. 2172 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 2173 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 2174 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 2175 SDValue Load = 2176 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 2177 2178 // Prepare argument list to generate call. 2179 ArgListTy Args; 2180 ArgListEntry Entry; 2181 Entry.Node = Load; 2182 Entry.Ty = CallTy; 2183 Args.push_back(Entry); 2184 2185 // Setup call to __tls_get_addr. 2186 TargetLowering::CallLoweringInfo CLI(DAG); 2187 CLI.setDebugLoc(DL) 2188 .setChain(DAG.getEntryNode()) 2189 .setLibCallee(CallingConv::C, CallTy, 2190 DAG.getExternalSymbol("__tls_get_addr", Ty), 2191 std::move(Args)); 2192 2193 return LowerCallTo(CLI).first; 2194 } 2195 2196 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 2197 SelectionDAG &DAG) const { 2198 SDLoc DL(Op); 2199 EVT Ty = Op.getValueType(); 2200 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 2201 int64_t Offset = N->getOffset(); 2202 MVT XLenVT = Subtarget.getXLenVT(); 2203 2204 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 2205 2206 if (DAG.getMachineFunction().getFunction().getCallingConv() == 2207 CallingConv::GHC) 2208 report_fatal_error("In GHC calling convention TLS is not supported"); 2209 2210 SDValue Addr; 2211 switch (Model) { 2212 case TLSModel::LocalExec: 2213 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 2214 break; 2215 case TLSModel::InitialExec: 2216 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 2217 break; 2218 case TLSModel::LocalDynamic: 2219 case TLSModel::GeneralDynamic: 2220 Addr = getDynamicTLSAddr(N, DAG); 2221 break; 2222 } 2223 2224 // In order to maximise the opportunity for common subexpression elimination, 2225 // emit a separate ADD node for the global address offset instead of folding 2226 // it in the global address node. Later peephole optimisations may choose to 2227 // fold it back in when profitable. 2228 if (Offset != 0) 2229 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 2230 DAG.getConstant(Offset, DL, XLenVT)); 2231 return Addr; 2232 } 2233 2234 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2235 SDValue CondV = Op.getOperand(0); 2236 SDValue TrueV = Op.getOperand(1); 2237 SDValue FalseV = Op.getOperand(2); 2238 SDLoc DL(Op); 2239 MVT XLenVT = Subtarget.getXLenVT(); 2240 2241 // If the result type is XLenVT and CondV is the output of a SETCC node 2242 // which also operated on XLenVT inputs, then merge the SETCC node into the 2243 // lowered RISCVISD::SELECT_CC to take advantage of the integer 2244 // compare+branch instructions. i.e.: 2245 // (select (setcc lhs, rhs, cc), truev, falsev) 2246 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 2247 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 2248 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 2249 SDValue LHS = CondV.getOperand(0); 2250 SDValue RHS = CondV.getOperand(1); 2251 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 2252 ISD::CondCode CCVal = CC->get(); 2253 2254 // Special case for a select of 2 constants that have a diffence of 1. 2255 // Normally this is done by DAGCombine, but if the select is introduced by 2256 // type legalization or op legalization, we miss it. Restricting to SETLT 2257 // case for now because that is what signed saturating add/sub need. 2258 // FIXME: We don't need the condition to be SETLT or even a SETCC, 2259 // but we would probably want to swap the true/false values if the condition 2260 // is SETGE/SETLE to avoid an XORI. 2261 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) && 2262 CCVal == ISD::SETLT) { 2263 const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue(); 2264 const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue(); 2265 if (TrueVal - 1 == FalseVal) 2266 return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV); 2267 if (TrueVal + 1 == FalseVal) 2268 return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV); 2269 } 2270 2271 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 2272 2273 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 2274 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 2275 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 2276 } 2277 2278 // Otherwise: 2279 // (select condv, truev, falsev) 2280 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 2281 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 2282 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 2283 2284 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 2285 2286 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 2287 } 2288 2289 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 2290 SDValue CondV = Op.getOperand(1); 2291 SDLoc DL(Op); 2292 MVT XLenVT = Subtarget.getXLenVT(); 2293 2294 if (CondV.getOpcode() == ISD::SETCC && 2295 CondV.getOperand(0).getValueType() == XLenVT) { 2296 SDValue LHS = CondV.getOperand(0); 2297 SDValue RHS = CondV.getOperand(1); 2298 ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get(); 2299 2300 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 2301 2302 SDValue TargetCC = DAG.getCondCode(CCVal); 2303 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 2304 LHS, RHS, TargetCC, Op.getOperand(2)); 2305 } 2306 2307 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 2308 CondV, DAG.getConstant(0, DL, XLenVT), 2309 DAG.getCondCode(ISD::SETNE), Op.getOperand(2)); 2310 } 2311 2312 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2313 MachineFunction &MF = DAG.getMachineFunction(); 2314 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 2315 2316 SDLoc DL(Op); 2317 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2318 getPointerTy(MF.getDataLayout())); 2319 2320 // vastart just stores the address of the VarArgsFrameIndex slot into the 2321 // memory location argument. 2322 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2323 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 2324 MachinePointerInfo(SV)); 2325 } 2326 2327 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 2328 SelectionDAG &DAG) const { 2329 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 2330 MachineFunction &MF = DAG.getMachineFunction(); 2331 MachineFrameInfo &MFI = MF.getFrameInfo(); 2332 MFI.setFrameAddressIsTaken(true); 2333 Register FrameReg = RI.getFrameRegister(MF); 2334 int XLenInBytes = Subtarget.getXLen() / 8; 2335 2336 EVT VT = Op.getValueType(); 2337 SDLoc DL(Op); 2338 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 2339 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2340 while (Depth--) { 2341 int Offset = -(XLenInBytes * 2); 2342 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 2343 DAG.getIntPtrConstant(Offset, DL)); 2344 FrameAddr = 2345 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 2346 } 2347 return FrameAddr; 2348 } 2349 2350 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 2351 SelectionDAG &DAG) const { 2352 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 2353 MachineFunction &MF = DAG.getMachineFunction(); 2354 MachineFrameInfo &MFI = MF.getFrameInfo(); 2355 MFI.setReturnAddressIsTaken(true); 2356 MVT XLenVT = Subtarget.getXLenVT(); 2357 int XLenInBytes = Subtarget.getXLen() / 8; 2358 2359 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 2360 return SDValue(); 2361 2362 EVT VT = Op.getValueType(); 2363 SDLoc DL(Op); 2364 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2365 if (Depth) { 2366 int Off = -XLenInBytes; 2367 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 2368 SDValue Offset = DAG.getConstant(Off, DL, VT); 2369 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 2370 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 2371 MachinePointerInfo()); 2372 } 2373 2374 // Return the value of the return address register, marking it an implicit 2375 // live-in. 2376 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 2377 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 2378 } 2379 2380 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 2381 SelectionDAG &DAG) const { 2382 SDLoc DL(Op); 2383 SDValue Lo = Op.getOperand(0); 2384 SDValue Hi = Op.getOperand(1); 2385 SDValue Shamt = Op.getOperand(2); 2386 EVT VT = Lo.getValueType(); 2387 2388 // if Shamt-XLEN < 0: // Shamt < XLEN 2389 // Lo = Lo << Shamt 2390 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 2391 // else: 2392 // Lo = 0 2393 // Hi = Lo << (Shamt-XLEN) 2394 2395 SDValue Zero = DAG.getConstant(0, DL, VT); 2396 SDValue One = DAG.getConstant(1, DL, VT); 2397 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 2398 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 2399 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 2400 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 2401 2402 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 2403 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 2404 SDValue ShiftRightLo = 2405 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 2406 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 2407 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 2408 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 2409 2410 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 2411 2412 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 2413 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 2414 2415 SDValue Parts[2] = {Lo, Hi}; 2416 return DAG.getMergeValues(Parts, DL); 2417 } 2418 2419 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 2420 bool IsSRA) const { 2421 SDLoc DL(Op); 2422 SDValue Lo = Op.getOperand(0); 2423 SDValue Hi = Op.getOperand(1); 2424 SDValue Shamt = Op.getOperand(2); 2425 EVT VT = Lo.getValueType(); 2426 2427 // SRA expansion: 2428 // if Shamt-XLEN < 0: // Shamt < XLEN 2429 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 2430 // Hi = Hi >>s Shamt 2431 // else: 2432 // Lo = Hi >>s (Shamt-XLEN); 2433 // Hi = Hi >>s (XLEN-1) 2434 // 2435 // SRL expansion: 2436 // if Shamt-XLEN < 0: // Shamt < XLEN 2437 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 2438 // Hi = Hi >>u Shamt 2439 // else: 2440 // Lo = Hi >>u (Shamt-XLEN); 2441 // Hi = 0; 2442 2443 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 2444 2445 SDValue Zero = DAG.getConstant(0, DL, VT); 2446 SDValue One = DAG.getConstant(1, DL, VT); 2447 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 2448 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 2449 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 2450 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 2451 2452 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 2453 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 2454 SDValue ShiftLeftHi = 2455 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 2456 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 2457 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 2458 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 2459 SDValue HiFalse = 2460 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 2461 2462 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 2463 2464 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 2465 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 2466 2467 SDValue Parts[2] = {Lo, Hi}; 2468 return DAG.getMergeValues(Parts, DL); 2469 } 2470 2471 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is 2472 // illegal (currently only vXi64 RV32). 2473 // FIXME: We could also catch non-constant sign-extended i32 values and lower 2474 // them to SPLAT_VECTOR_I64 2475 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, 2476 SelectionDAG &DAG) const { 2477 SDLoc DL(Op); 2478 EVT VecVT = Op.getValueType(); 2479 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && 2480 "Unexpected SPLAT_VECTOR_PARTS lowering"); 2481 2482 assert(Op.getNumOperands() == 2 && "Unexpected number of operands!"); 2483 SDValue Lo = Op.getOperand(0); 2484 SDValue Hi = Op.getOperand(1); 2485 2486 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { 2487 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); 2488 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); 2489 // If Hi constant is all the same sign bit as Lo, lower this as a custom 2490 // node in order to try and match RVV vector/scalar instructions. 2491 if ((LoC >> 31) == HiC) 2492 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 2493 } 2494 2495 // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not 2496 // to accidentally sign-extend the 32-bit halves to the e64 SEW: 2497 // vmv.v.x vX, hi 2498 // vsll.vx vX, vX, /*32*/ 2499 // vmv.v.x vY, lo 2500 // vsll.vx vY, vY, /*32*/ 2501 // vsrl.vx vY, vY, /*32*/ 2502 // vor.vv vX, vX, vY 2503 SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT); 2504 2505 Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 2506 Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV); 2507 Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV); 2508 2509 if (isNullConstant(Hi)) 2510 return Lo; 2511 2512 Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi); 2513 Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV); 2514 2515 return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); 2516 } 2517 2518 // Custom-lower extensions from mask vectors by using a vselect either with 1 2519 // for zero/any-extension or -1 for sign-extension: 2520 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) 2521 // Note that any-extension is lowered identically to zero-extension. 2522 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, 2523 int64_t ExtTrueVal) const { 2524 SDLoc DL(Op); 2525 MVT VecVT = Op.getSimpleValueType(); 2526 SDValue Src = Op.getOperand(0); 2527 // Only custom-lower extensions from mask types 2528 assert(Src.getValueType().isVector() && 2529 Src.getValueType().getVectorElementType() == MVT::i1); 2530 2531 MVT XLenVT = Subtarget.getXLenVT(); 2532 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); 2533 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); 2534 2535 if (VecVT.isScalableVector()) { 2536 // Be careful not to introduce illegal scalar types at this stage, and be 2537 // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is 2538 // illegal and must be expanded. Since we know that the constants are 2539 // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. 2540 bool IsRV32E64 = 2541 !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; 2542 2543 if (!IsRV32E64) { 2544 SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); 2545 SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); 2546 } else { 2547 SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); 2548 SplatTrueVal = 2549 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); 2550 } 2551 2552 return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); 2553 } 2554 2555 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 2556 MVT I1ContainerVT = 2557 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2558 2559 SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); 2560 2561 SDValue Mask, VL; 2562 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2563 2564 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); 2565 SplatTrueVal = 2566 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); 2567 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, 2568 SplatTrueVal, SplatZero, VL); 2569 2570 return convertFromScalableVector(VecVT, Select, DAG, Subtarget); 2571 } 2572 2573 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( 2574 SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { 2575 MVT ExtVT = Op.getSimpleValueType(); 2576 // Only custom-lower extensions from fixed-length vector types. 2577 if (!ExtVT.isFixedLengthVector()) 2578 return Op; 2579 MVT VT = Op.getOperand(0).getSimpleValueType(); 2580 // Grab the canonical container type for the extended type. Infer the smaller 2581 // type from that to ensure the same number of vector elements, as we know 2582 // the LMUL will be sufficient to hold the smaller type. 2583 MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); 2584 // Get the extended container type manually to ensure the same number of 2585 // vector elements between source and dest. 2586 MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 2587 ContainerExtVT.getVectorElementCount()); 2588 2589 SDValue Op1 = 2590 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 2591 2592 SDLoc DL(Op); 2593 SDValue Mask, VL; 2594 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2595 2596 SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); 2597 2598 return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); 2599 } 2600 2601 // Custom-lower truncations from vectors to mask vectors by using a mask and a 2602 // setcc operation: 2603 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) 2604 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, 2605 SelectionDAG &DAG) const { 2606 SDLoc DL(Op); 2607 EVT MaskVT = Op.getValueType(); 2608 // Only expect to custom-lower truncations to mask types 2609 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && 2610 "Unexpected type for vector mask lowering"); 2611 SDValue Src = Op.getOperand(0); 2612 MVT VecVT = Src.getSimpleValueType(); 2613 2614 // If this is a fixed vector, we need to convert it to a scalable vector. 2615 MVT ContainerVT = VecVT; 2616 if (VecVT.isFixedLengthVector()) { 2617 ContainerVT = getContainerForFixedLengthVector(VecVT); 2618 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 2619 } 2620 2621 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); 2622 SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 2623 2624 SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); 2625 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); 2626 2627 if (VecVT.isScalableVector()) { 2628 SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); 2629 return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); 2630 } 2631 2632 SDValue Mask, VL; 2633 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2634 2635 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 2636 SDValue Trunc = 2637 DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); 2638 Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, 2639 DAG.getCondCode(ISD::SETNE), Mask, VL); 2640 return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); 2641 } 2642 2643 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the 2644 // first position of a vector, and that vector is slid up to the insert index. 2645 // By limiting the active vector length to index+1 and merging with the 2646 // original vector (with an undisturbed tail policy for elements >= VL), we 2647 // achieve the desired result of leaving all elements untouched except the one 2648 // at VL-1, which is replaced with the desired value. 2649 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2650 SelectionDAG &DAG) const { 2651 SDLoc DL(Op); 2652 MVT VecVT = Op.getSimpleValueType(); 2653 SDValue Vec = Op.getOperand(0); 2654 SDValue Val = Op.getOperand(1); 2655 SDValue Idx = Op.getOperand(2); 2656 2657 MVT ContainerVT = VecVT; 2658 // If the operand is a fixed-length vector, convert to a scalable one. 2659 if (VecVT.isFixedLengthVector()) { 2660 ContainerVT = getContainerForFixedLengthVector(VecVT); 2661 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2662 } 2663 2664 MVT XLenVT = Subtarget.getXLenVT(); 2665 2666 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 2667 bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64; 2668 // Even i64-element vectors on RV32 can be lowered without scalar 2669 // legalization if the most-significant 32 bits of the value are not affected 2670 // by the sign-extension of the lower 32 bits. 2671 // TODO: We could also catch sign extensions of a 32-bit value. 2672 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) { 2673 const auto *CVal = cast<ConstantSDNode>(Val); 2674 if (isInt<32>(CVal->getSExtValue())) { 2675 IsLegalInsert = true; 2676 Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 2677 } 2678 } 2679 2680 SDValue Mask, VL; 2681 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2682 2683 SDValue ValInVec; 2684 2685 if (IsLegalInsert) { 2686 unsigned Opc = 2687 VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL; 2688 if (isNullConstant(Idx)) { 2689 Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL); 2690 if (!VecVT.isFixedLengthVector()) 2691 return Vec; 2692 return convertFromScalableVector(VecVT, Vec, DAG, Subtarget); 2693 } 2694 ValInVec = 2695 DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL); 2696 } else { 2697 // On RV32, i64-element vectors must be specially handled to place the 2698 // value at element 0, by using two vslide1up instructions in sequence on 2699 // the i32 split lo/hi value. Use an equivalently-sized i32 vector for 2700 // this. 2701 SDValue One = DAG.getConstant(1, DL, XLenVT); 2702 SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero); 2703 SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One); 2704 MVT I32ContainerVT = 2705 MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2); 2706 SDValue I32Mask = 2707 getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first; 2708 // Limit the active VL to two. 2709 SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT); 2710 // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied 2711 // undef doesn't obey the earlyclobber constraint. Just splat a zero value. 2712 ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, 2713 InsertI64VL); 2714 // First slide in the hi value, then the lo in underneath it. 2715 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, 2716 ValHi, I32Mask, InsertI64VL); 2717 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, 2718 ValLo, I32Mask, InsertI64VL); 2719 // Bitcast back to the right container type. 2720 ValInVec = DAG.getBitcast(ContainerVT, ValInVec); 2721 } 2722 2723 // Now that the value is in a vector, slide it into position. 2724 SDValue InsertVL = 2725 DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); 2726 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 2727 ValInVec, Idx, Mask, InsertVL); 2728 if (!VecVT.isFixedLengthVector()) 2729 return Slideup; 2730 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 2731 } 2732 2733 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then 2734 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer 2735 // types this is done using VMV_X_S to allow us to glean information about the 2736 // sign bits of the result. 2737 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2738 SelectionDAG &DAG) const { 2739 SDLoc DL(Op); 2740 SDValue Idx = Op.getOperand(1); 2741 SDValue Vec = Op.getOperand(0); 2742 EVT EltVT = Op.getValueType(); 2743 MVT VecVT = Vec.getSimpleValueType(); 2744 MVT XLenVT = Subtarget.getXLenVT(); 2745 2746 if (VecVT.getVectorElementType() == MVT::i1) { 2747 // FIXME: For now we just promote to an i8 vector and extract from that, 2748 // but this is probably not optimal. 2749 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); 2750 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); 2751 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx); 2752 } 2753 2754 // If this is a fixed vector, we need to convert it to a scalable vector. 2755 MVT ContainerVT = VecVT; 2756 if (VecVT.isFixedLengthVector()) { 2757 ContainerVT = getContainerForFixedLengthVector(VecVT); 2758 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2759 } 2760 2761 // If the index is 0, the vector is already in the right position. 2762 if (!isNullConstant(Idx)) { 2763 // Use a VL of 1 to avoid processing more elements than we need. 2764 SDValue VL = DAG.getConstant(1, DL, XLenVT); 2765 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2766 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2767 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 2768 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 2769 } 2770 2771 if (!EltVT.isInteger()) { 2772 // Floating-point extracts are handled in TableGen. 2773 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, 2774 DAG.getConstant(0, DL, XLenVT)); 2775 } 2776 2777 SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 2778 return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); 2779 } 2780 2781 // Called by type legalization to handle splat of i64 on RV32. 2782 // FIXME: We can optimize this when the type has sign or zero bits in one 2783 // of the halves. 2784 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, 2785 SDValue VL, SelectionDAG &DAG) { 2786 SDValue ThirtyTwoV = DAG.getConstant(32, DL, VT); 2787 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 2788 DAG.getConstant(0, DL, MVT::i32)); 2789 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 2790 DAG.getConstant(1, DL, MVT::i32)); 2791 2792 // vmv.v.x vX, hi 2793 // vsll.vx vX, vX, /*32*/ 2794 // vmv.v.x vY, lo 2795 // vsll.vx vY, vY, /*32*/ 2796 // vsrl.vx vY, vY, /*32*/ 2797 // vor.vv vX, vX, vY 2798 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 2799 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2800 Lo = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); 2801 Lo = DAG.getNode(RISCVISD::SHL_VL, DL, VT, Lo, ThirtyTwoV, Mask, VL); 2802 Lo = DAG.getNode(RISCVISD::SRL_VL, DL, VT, Lo, ThirtyTwoV, Mask, VL); 2803 2804 Hi = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Hi, VL); 2805 Hi = DAG.getNode(RISCVISD::SHL_VL, DL, VT, Hi, ThirtyTwoV, Mask, VL); 2806 2807 return DAG.getNode(RISCVISD::OR_VL, DL, VT, Lo, Hi, Mask, VL); 2808 } 2809 2810 // Some RVV intrinsics may claim that they want an integer operand to be 2811 // promoted or expanded. 2812 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG, 2813 const RISCVSubtarget &Subtarget) { 2814 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2815 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && 2816 "Unexpected opcode"); 2817 2818 if (!Subtarget.hasStdExtV()) 2819 return SDValue(); 2820 2821 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; 2822 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); 2823 SDLoc DL(Op); 2824 2825 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 2826 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); 2827 if (!II || !II->SplatOperand) 2828 return SDValue(); 2829 2830 unsigned SplatOp = II->SplatOperand + HasChain; 2831 assert(SplatOp < Op.getNumOperands()); 2832 2833 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 2834 SDValue &ScalarOp = Operands[SplatOp]; 2835 MVT OpVT = ScalarOp.getSimpleValueType(); 2836 MVT XLenVT = Subtarget.getXLenVT(); 2837 2838 // If this isn't a scalar, or its type is XLenVT we're done. 2839 if (!OpVT.isScalarInteger() || OpVT == XLenVT) 2840 return SDValue(); 2841 2842 // Simplest case is that the operand needs to be promoted to XLenVT. 2843 if (OpVT.bitsLT(XLenVT)) { 2844 // If the operand is a constant, sign extend to increase our chances 2845 // of being able to use a .vi instruction. ANY_EXTEND would become a 2846 // a zero extend and the simm5 check in isel would fail. 2847 // FIXME: Should we ignore the upper bits in isel instead? 2848 unsigned ExtOpc = 2849 isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 2850 ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp); 2851 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 2852 } 2853 2854 // Use the previous operand to get the vXi64 VT. The result might be a mask 2855 // VT for compares. Using the previous operand assumes that the previous 2856 // operand will never have a smaller element size than a scalar operand and 2857 // that a widening operation never uses SEW=64. 2858 // NOTE: If this fails the below assert, we can probably just find the 2859 // element count from any operand or result and use it to construct the VT. 2860 assert(II->SplatOperand > 1 && "Unexpected splat operand!"); 2861 MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType(); 2862 2863 // The more complex case is when the scalar is larger than XLenVT. 2864 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 && 2865 VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!"); 2866 2867 // If this is a sign-extended 32-bit constant, we can truncate it and rely 2868 // on the instruction to sign-extend since SEW>XLEN. 2869 if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) { 2870 if (isInt<32>(CVal->getSExtValue())) { 2871 ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 2872 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 2873 } 2874 } 2875 2876 // We need to convert the scalar to a splat vector. 2877 // FIXME: Can we implicitly truncate the scalar if it is known to 2878 // be sign extended? 2879 // VL should be the last operand. 2880 SDValue VL = Op.getOperand(Op.getNumOperands() - 1); 2881 assert(VL.getValueType() == XLenVT); 2882 ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG); 2883 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 2884 } 2885 2886 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2887 SelectionDAG &DAG) const { 2888 unsigned IntNo = Op.getConstantOperandVal(0); 2889 SDLoc DL(Op); 2890 MVT XLenVT = Subtarget.getXLenVT(); 2891 2892 switch (IntNo) { 2893 default: 2894 break; // Don't custom lower most intrinsics. 2895 case Intrinsic::thread_pointer: { 2896 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2897 return DAG.getRegister(RISCV::X4, PtrVT); 2898 } 2899 case Intrinsic::riscv_orc_b: 2900 // Lower to the GORCI encoding for orc.b. 2901 return DAG.getNode(RISCVISD::GORCI, DL, XLenVT, Op.getOperand(1), 2902 DAG.getTargetConstant(7, DL, XLenVT)); 2903 case Intrinsic::riscv_vmv_x_s: 2904 assert(Op.getValueType() == XLenVT && "Unexpected VT!"); 2905 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), 2906 Op.getOperand(1)); 2907 case Intrinsic::riscv_vmv_v_x: { 2908 SDValue Scalar = Op.getOperand(1); 2909 if (Scalar.getValueType().bitsLE(XLenVT)) { 2910 unsigned ExtOpc = 2911 isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 2912 Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar); 2913 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), Scalar, 2914 Op.getOperand(2)); 2915 } 2916 2917 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!"); 2918 2919 // If this is a sign-extended 32-bit constant, we can truncate it and rely 2920 // on the instruction to sign-extend since SEW>XLEN. 2921 if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) { 2922 if (isInt<32>(CVal->getSExtValue())) 2923 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), 2924 DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32), 2925 Op.getOperand(2)); 2926 } 2927 2928 // Otherwise use the more complicated splatting algorithm. 2929 return splatSplitI64WithVL(DL, Op.getSimpleValueType(), Scalar, 2930 Op.getOperand(2), DAG); 2931 } 2932 case Intrinsic::riscv_vfmv_v_f: 2933 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), 2934 Op.getOperand(1), Op.getOperand(2)); 2935 case Intrinsic::riscv_vmv_s_x: { 2936 SDValue Scalar = Op.getOperand(2); 2937 2938 if (Scalar.getValueType().bitsLE(XLenVT)) { 2939 Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar); 2940 return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(), 2941 Op.getOperand(1), Scalar, Op.getOperand(3)); 2942 } 2943 2944 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!"); 2945 2946 // This is an i64 value that lives in two scalar registers. We have to 2947 // insert this in a convoluted way. First we build vXi64 splat containing 2948 // the/ two values that we assemble using some bit math. Next we'll use 2949 // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask 2950 // to merge element 0 from our splat into the source vector. 2951 // FIXME: This is probably not the best way to do this, but it is 2952 // consistent with INSERT_VECTOR_ELT lowering so it is a good starting 2953 // point. 2954 // vmv.v.x vX, hi 2955 // vsll.vx vX, vX, /*32*/ 2956 // vmv.v.x vY, lo 2957 // vsll.vx vY, vY, /*32*/ 2958 // vsrl.vx vY, vY, /*32*/ 2959 // vor.vv vX, vX, vY 2960 // 2961 // vid.v vVid 2962 // vmseq.vx mMask, vVid, 0 2963 // vmerge.vvm vDest, vSrc, vVal, mMask 2964 MVT VT = Op.getSimpleValueType(); 2965 SDValue Vec = Op.getOperand(1); 2966 SDValue VL = Op.getOperand(3); 2967 2968 SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); 2969 SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, 2970 DAG.getConstant(0, DL, MVT::i32), VL); 2971 2972 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 2973 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2974 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 2975 SDValue SelectCond = 2976 DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx, 2977 DAG.getCondCode(ISD::SETEQ), Mask, VL); 2978 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal, 2979 Vec, VL); 2980 } 2981 case Intrinsic::riscv_vslide1up: 2982 case Intrinsic::riscv_vslide1down: 2983 case Intrinsic::riscv_vslide1up_mask: 2984 case Intrinsic::riscv_vslide1down_mask: { 2985 // We need to special case these when the scalar is larger than XLen. 2986 unsigned NumOps = Op.getNumOperands(); 2987 bool IsMasked = NumOps == 6; 2988 unsigned OpOffset = IsMasked ? 1 : 0; 2989 SDValue Scalar = Op.getOperand(2 + OpOffset); 2990 if (Scalar.getValueType().bitsLE(XLenVT)) 2991 break; 2992 2993 // Splatting a sign extended constant is fine. 2994 if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) 2995 if (isInt<32>(CVal->getSExtValue())) 2996 break; 2997 2998 MVT VT = Op.getSimpleValueType(); 2999 assert(VT.getVectorElementType() == MVT::i64 && 3000 Scalar.getValueType() == MVT::i64 && "Unexpected VTs"); 3001 3002 // Convert the vector source to the equivalent nxvXi32 vector. 3003 MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); 3004 SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset)); 3005 3006 SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 3007 DAG.getConstant(0, DL, XLenVT)); 3008 SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 3009 DAG.getConstant(1, DL, XLenVT)); 3010 3011 // Double the VL since we halved SEW. 3012 SDValue VL = Op.getOperand(NumOps - 1); 3013 SDValue I32VL = 3014 DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT)); 3015 3016 MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount()); 3017 SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL); 3018 3019 // Shift the two scalar parts in using SEW=32 slide1up/slide1down 3020 // instructions. 3021 if (IntNo == Intrinsic::riscv_vslide1up || 3022 IntNo == Intrinsic::riscv_vslide1up_mask) { 3023 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi, 3024 I32Mask, I32VL); 3025 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo, 3026 I32Mask, I32VL); 3027 } else { 3028 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo, 3029 I32Mask, I32VL); 3030 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi, 3031 I32Mask, I32VL); 3032 } 3033 3034 // Convert back to nxvXi64. 3035 Vec = DAG.getBitcast(VT, Vec); 3036 3037 if (!IsMasked) 3038 return Vec; 3039 3040 // Apply mask after the operation. 3041 SDValue Mask = Op.getOperand(NumOps - 2); 3042 SDValue MaskedOff = Op.getOperand(1); 3043 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); 3044 } 3045 } 3046 3047 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); 3048 } 3049 3050 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 3051 SelectionDAG &DAG) const { 3052 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); 3053 } 3054 3055 static MVT getLMUL1VT(MVT VT) { 3056 assert(VT.getVectorElementType().getSizeInBits() <= 64 && 3057 "Unexpected vector MVT"); 3058 return MVT::getScalableVectorVT( 3059 VT.getVectorElementType(), 3060 RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); 3061 } 3062 3063 static unsigned getRVVReductionOp(unsigned ISDOpcode) { 3064 switch (ISDOpcode) { 3065 default: 3066 llvm_unreachable("Unhandled reduction"); 3067 case ISD::VECREDUCE_ADD: 3068 return RISCVISD::VECREDUCE_ADD_VL; 3069 case ISD::VECREDUCE_UMAX: 3070 return RISCVISD::VECREDUCE_UMAX_VL; 3071 case ISD::VECREDUCE_SMAX: 3072 return RISCVISD::VECREDUCE_SMAX_VL; 3073 case ISD::VECREDUCE_UMIN: 3074 return RISCVISD::VECREDUCE_UMIN_VL; 3075 case ISD::VECREDUCE_SMIN: 3076 return RISCVISD::VECREDUCE_SMIN_VL; 3077 case ISD::VECREDUCE_AND: 3078 return RISCVISD::VECREDUCE_AND_VL; 3079 case ISD::VECREDUCE_OR: 3080 return RISCVISD::VECREDUCE_OR_VL; 3081 case ISD::VECREDUCE_XOR: 3082 return RISCVISD::VECREDUCE_XOR_VL; 3083 } 3084 } 3085 3086 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op, 3087 SelectionDAG &DAG) const { 3088 SDLoc DL(Op); 3089 SDValue Vec = Op.getOperand(0); 3090 MVT VecVT = Vec.getSimpleValueType(); 3091 assert((Op.getOpcode() == ISD::VECREDUCE_AND || 3092 Op.getOpcode() == ISD::VECREDUCE_OR || 3093 Op.getOpcode() == ISD::VECREDUCE_XOR) && 3094 "Unexpected reduction lowering"); 3095 3096 MVT XLenVT = Subtarget.getXLenVT(); 3097 assert(Op.getValueType() == XLenVT && 3098 "Expected reduction output to be legalized to XLenVT"); 3099 3100 MVT ContainerVT = VecVT; 3101 if (VecVT.isFixedLengthVector()) { 3102 ContainerVT = getContainerForFixedLengthVector(VecVT); 3103 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3104 } 3105 3106 SDValue Mask, VL; 3107 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3108 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 3109 3110 switch (Op.getOpcode()) { 3111 default: 3112 llvm_unreachable("Unhandled reduction"); 3113 case ISD::VECREDUCE_AND: 3114 // vpopc ~x == 0 3115 Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL); 3116 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); 3117 return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ); 3118 case ISD::VECREDUCE_OR: 3119 // vpopc x != 0 3120 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); 3121 return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE); 3122 case ISD::VECREDUCE_XOR: { 3123 // ((vpopc x) & 1) != 0 3124 SDValue One = DAG.getConstant(1, DL, XLenVT); 3125 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); 3126 Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One); 3127 return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE); 3128 } 3129 } 3130 } 3131 3132 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, 3133 SelectionDAG &DAG) const { 3134 SDLoc DL(Op); 3135 SDValue Vec = Op.getOperand(0); 3136 EVT VecEVT = Vec.getValueType(); 3137 3138 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode()); 3139 3140 // Due to ordering in legalize types we may have a vector type that needs to 3141 // be split. Do that manually so we can get down to a legal type. 3142 while (getTypeAction(*DAG.getContext(), VecEVT) == 3143 TargetLowering::TypeSplitVector) { 3144 SDValue Lo, Hi; 3145 std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL); 3146 VecEVT = Lo.getValueType(); 3147 Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi); 3148 } 3149 3150 // TODO: The type may need to be widened rather than split. Or widened before 3151 // it can be split. 3152 if (!isTypeLegal(VecEVT)) 3153 return SDValue(); 3154 3155 MVT VecVT = VecEVT.getSimpleVT(); 3156 MVT VecEltVT = VecVT.getVectorElementType(); 3157 unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode()); 3158 3159 MVT ContainerVT = VecVT; 3160 if (VecVT.isFixedLengthVector()) { 3161 ContainerVT = getContainerForFixedLengthVector(VecVT); 3162 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3163 } 3164 3165 MVT M1VT = getLMUL1VT(ContainerVT); 3166 3167 SDValue Mask, VL; 3168 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3169 3170 // FIXME: This is a VLMAX splat which might be too large and can prevent 3171 // vsetvli removal. 3172 SDValue NeutralElem = 3173 DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); 3174 SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem); 3175 SDValue Reduction = 3176 DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL); 3177 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 3178 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 3179 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); 3180 } 3181 3182 // Given a reduction op, this function returns the matching reduction opcode, 3183 // the vector SDValue and the scalar SDValue required to lower this to a 3184 // RISCVISD node. 3185 static std::tuple<unsigned, SDValue, SDValue> 3186 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { 3187 SDLoc DL(Op); 3188 switch (Op.getOpcode()) { 3189 default: 3190 llvm_unreachable("Unhandled reduction"); 3191 case ISD::VECREDUCE_FADD: 3192 return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), 3193 DAG.getConstantFP(0.0, DL, EltVT)); 3194 case ISD::VECREDUCE_SEQ_FADD: 3195 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1), 3196 Op.getOperand(0)); 3197 } 3198 } 3199 3200 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, 3201 SelectionDAG &DAG) const { 3202 SDLoc DL(Op); 3203 MVT VecEltVT = Op.getSimpleValueType(); 3204 3205 unsigned RVVOpcode; 3206 SDValue VectorVal, ScalarVal; 3207 std::tie(RVVOpcode, VectorVal, ScalarVal) = 3208 getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); 3209 MVT VecVT = VectorVal.getSimpleValueType(); 3210 3211 MVT ContainerVT = VecVT; 3212 if (VecVT.isFixedLengthVector()) { 3213 ContainerVT = getContainerForFixedLengthVector(VecVT); 3214 VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget); 3215 } 3216 3217 MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); 3218 3219 SDValue Mask, VL; 3220 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3221 3222 // FIXME: This is a VLMAX splat which might be too large and can prevent 3223 // vsetvli removal. 3224 SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); 3225 SDValue Reduction = 3226 DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL); 3227 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 3228 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 3229 } 3230 3231 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 3232 SelectionDAG &DAG) const { 3233 SDValue Vec = Op.getOperand(0); 3234 SDValue SubVec = Op.getOperand(1); 3235 MVT VecVT = Vec.getSimpleValueType(); 3236 MVT SubVecVT = SubVec.getSimpleValueType(); 3237 3238 SDLoc DL(Op); 3239 MVT XLenVT = Subtarget.getXLenVT(); 3240 unsigned OrigIdx = Op.getConstantOperandVal(2); 3241 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3242 3243 // We don't have the ability to slide mask vectors up indexed by their i1 3244 // elements; the smallest we can do is i8. Often we are able to bitcast to 3245 // equivalent i8 vectors. Note that when inserting a fixed-length vector 3246 // into a scalable one, we might not necessarily have enough scalable 3247 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. 3248 if (SubVecVT.getVectorElementType() == MVT::i1 && 3249 (OrigIdx != 0 || !Vec.isUndef())) { 3250 if (VecVT.getVectorMinNumElements() >= 8 && 3251 SubVecVT.getVectorMinNumElements() >= 8) { 3252 assert(OrigIdx % 8 == 0 && "Invalid index"); 3253 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 3254 SubVecVT.getVectorMinNumElements() % 8 == 0 && 3255 "Unexpected mask vector lowering"); 3256 OrigIdx /= 8; 3257 SubVecVT = 3258 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 3259 SubVecVT.isScalableVector()); 3260 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 3261 VecVT.isScalableVector()); 3262 Vec = DAG.getBitcast(VecVT, Vec); 3263 SubVec = DAG.getBitcast(SubVecVT, SubVec); 3264 } else { 3265 // We can't slide this mask vector up indexed by its i1 elements. 3266 // This poses a problem when we wish to insert a scalable vector which 3267 // can't be re-expressed as a larger type. Just choose the slow path and 3268 // extend to a larger type, then truncate back down. 3269 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 3270 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 3271 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 3272 SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); 3273 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, 3274 Op.getOperand(2)); 3275 SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); 3276 return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); 3277 } 3278 } 3279 3280 // If the subvector vector is a fixed-length type, we cannot use subregister 3281 // manipulation to simplify the codegen; we don't know which register of a 3282 // LMUL group contains the specific subvector as we only know the minimum 3283 // register size. Therefore we must slide the vector group up the full 3284 // amount. 3285 if (SubVecVT.isFixedLengthVector()) { 3286 if (OrigIdx == 0 && Vec.isUndef()) 3287 return Op; 3288 MVT ContainerVT = VecVT; 3289 if (VecVT.isFixedLengthVector()) { 3290 ContainerVT = getContainerForFixedLengthVector(VecVT); 3291 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3292 } 3293 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, 3294 DAG.getUNDEF(ContainerVT), SubVec, 3295 DAG.getConstant(0, DL, XLenVT)); 3296 SDValue Mask = 3297 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 3298 // Set the vector length to only the number of elements we care about. Note 3299 // that for slideup this includes the offset. 3300 SDValue VL = 3301 DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); 3302 SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 3303 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 3304 SubVec, SlideupAmt, Mask, VL); 3305 if (VecVT.isFixedLengthVector()) 3306 Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 3307 return DAG.getBitcast(Op.getValueType(), Slideup); 3308 } 3309 3310 unsigned SubRegIdx, RemIdx; 3311 std::tie(SubRegIdx, RemIdx) = 3312 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 3313 VecVT, SubVecVT, OrigIdx, TRI); 3314 3315 RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); 3316 bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 || 3317 SubVecLMUL == RISCVVLMUL::LMUL_F4 || 3318 SubVecLMUL == RISCVVLMUL::LMUL_F8; 3319 3320 // 1. If the Idx has been completely eliminated and this subvector's size is 3321 // a vector register or a multiple thereof, or the surrounding elements are 3322 // undef, then this is a subvector insert which naturally aligns to a vector 3323 // register. These can easily be handled using subregister manipulation. 3324 // 2. If the subvector is smaller than a vector register, then the insertion 3325 // must preserve the undisturbed elements of the register. We do this by 3326 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type 3327 // (which resolves to a subregister copy), performing a VSLIDEUP to place the 3328 // subvector within the vector register, and an INSERT_SUBVECTOR of that 3329 // LMUL=1 type back into the larger vector (resolving to another subregister 3330 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type 3331 // to avoid allocating a large register group to hold our subvector. 3332 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) 3333 return Op; 3334 3335 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements 3336 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy 3337 // (in our case undisturbed). This means we can set up a subvector insertion 3338 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the 3339 // size of the subvector. 3340 MVT InterSubVT = VecVT; 3341 SDValue AlignedExtract = Vec; 3342 unsigned AlignedIdx = OrigIdx - RemIdx; 3343 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 3344 InterSubVT = getLMUL1VT(VecVT); 3345 // Extract a subvector equal to the nearest full vector register type. This 3346 // should resolve to a EXTRACT_SUBREG instruction. 3347 AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 3348 DAG.getConstant(AlignedIdx, DL, XLenVT)); 3349 } 3350 3351 SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); 3352 // For scalable vectors this must be further multiplied by vscale. 3353 SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); 3354 3355 SDValue Mask, VL; 3356 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 3357 3358 // Construct the vector length corresponding to RemIdx + length(SubVecVT). 3359 VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); 3360 VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); 3361 VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); 3362 3363 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, 3364 DAG.getUNDEF(InterSubVT), SubVec, 3365 DAG.getConstant(0, DL, XLenVT)); 3366 3367 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, 3368 AlignedExtract, SubVec, SlideupAmt, Mask, VL); 3369 3370 // If required, insert this subvector back into the correct vector register. 3371 // This should resolve to an INSERT_SUBREG instruction. 3372 if (VecVT.bitsGT(InterSubVT)) 3373 Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, 3374 DAG.getConstant(AlignedIdx, DL, XLenVT)); 3375 3376 // We might have bitcast from a mask type: cast back to the original type if 3377 // required. 3378 return DAG.getBitcast(Op.getSimpleValueType(), Slideup); 3379 } 3380 3381 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, 3382 SelectionDAG &DAG) const { 3383 SDValue Vec = Op.getOperand(0); 3384 MVT SubVecVT = Op.getSimpleValueType(); 3385 MVT VecVT = Vec.getSimpleValueType(); 3386 3387 SDLoc DL(Op); 3388 MVT XLenVT = Subtarget.getXLenVT(); 3389 unsigned OrigIdx = Op.getConstantOperandVal(1); 3390 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3391 3392 // We don't have the ability to slide mask vectors down indexed by their i1 3393 // elements; the smallest we can do is i8. Often we are able to bitcast to 3394 // equivalent i8 vectors. Note that when extracting a fixed-length vector 3395 // from a scalable one, we might not necessarily have enough scalable 3396 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. 3397 if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { 3398 if (VecVT.getVectorMinNumElements() >= 8 && 3399 SubVecVT.getVectorMinNumElements() >= 8) { 3400 assert(OrigIdx % 8 == 0 && "Invalid index"); 3401 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 3402 SubVecVT.getVectorMinNumElements() % 8 == 0 && 3403 "Unexpected mask vector lowering"); 3404 OrigIdx /= 8; 3405 SubVecVT = 3406 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 3407 SubVecVT.isScalableVector()); 3408 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 3409 VecVT.isScalableVector()); 3410 Vec = DAG.getBitcast(VecVT, Vec); 3411 } else { 3412 // We can't slide this mask vector down, indexed by its i1 elements. 3413 // This poses a problem when we wish to extract a scalable vector which 3414 // can't be re-expressed as a larger type. Just choose the slow path and 3415 // extend to a larger type, then truncate back down. 3416 // TODO: We could probably improve this when extracting certain fixed 3417 // from fixed, where we can extract as i8 and shift the correct element 3418 // right to reach the desired subvector? 3419 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 3420 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 3421 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 3422 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, 3423 Op.getOperand(1)); 3424 SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); 3425 return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); 3426 } 3427 } 3428 3429 // If the subvector vector is a fixed-length type, we cannot use subregister 3430 // manipulation to simplify the codegen; we don't know which register of a 3431 // LMUL group contains the specific subvector as we only know the minimum 3432 // register size. Therefore we must slide the vector group down the full 3433 // amount. 3434 if (SubVecVT.isFixedLengthVector()) { 3435 // With an index of 0 this is a cast-like subvector, which can be performed 3436 // with subregister operations. 3437 if (OrigIdx == 0) 3438 return Op; 3439 MVT ContainerVT = VecVT; 3440 if (VecVT.isFixedLengthVector()) { 3441 ContainerVT = getContainerForFixedLengthVector(VecVT); 3442 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3443 } 3444 SDValue Mask = 3445 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 3446 // Set the vector length to only the number of elements we care about. This 3447 // avoids sliding down elements we're going to discard straight away. 3448 SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); 3449 SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 3450 SDValue Slidedown = 3451 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 3452 DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); 3453 // Now we can use a cast-like subvector extract to get the result. 3454 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 3455 DAG.getConstant(0, DL, XLenVT)); 3456 return DAG.getBitcast(Op.getValueType(), Slidedown); 3457 } 3458 3459 unsigned SubRegIdx, RemIdx; 3460 std::tie(SubRegIdx, RemIdx) = 3461 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 3462 VecVT, SubVecVT, OrigIdx, TRI); 3463 3464 // If the Idx has been completely eliminated then this is a subvector extract 3465 // which naturally aligns to a vector register. These can easily be handled 3466 // using subregister manipulation. 3467 if (RemIdx == 0) 3468 return Op; 3469 3470 // Else we must shift our vector register directly to extract the subvector. 3471 // Do this using VSLIDEDOWN. 3472 3473 // If the vector type is an LMUL-group type, extract a subvector equal to the 3474 // nearest full vector register type. This should resolve to a EXTRACT_SUBREG 3475 // instruction. 3476 MVT InterSubVT = VecVT; 3477 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 3478 InterSubVT = getLMUL1VT(VecVT); 3479 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 3480 DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); 3481 } 3482 3483 // Slide this vector register down by the desired number of elements in order 3484 // to place the desired subvector starting at element 0. 3485 SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); 3486 // For scalable vectors this must be further multiplied by vscale. 3487 SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); 3488 3489 SDValue Mask, VL; 3490 std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); 3491 SDValue Slidedown = 3492 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, 3493 DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); 3494 3495 // Now the vector is in the right position, extract our final subvector. This 3496 // should resolve to a COPY. 3497 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 3498 DAG.getConstant(0, DL, XLenVT)); 3499 3500 // We might have bitcast from a mask type: cast back to the original type if 3501 // required. 3502 return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); 3503 } 3504 3505 // Implement step_vector to the vid instruction. 3506 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op, 3507 SelectionDAG &DAG) const { 3508 SDLoc DL(Op); 3509 assert(Op.getConstantOperandAPInt(0) == 1 && "Unexpected step value"); 3510 MVT VT = Op.getSimpleValueType(); 3511 SDValue Mask, VL; 3512 std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget); 3513 return DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 3514 } 3515 3516 // Implement vector_reverse using vrgather.vv with indices determined by 3517 // subtracting the id of each element from (VLMAX-1). This will convert 3518 // the indices like so: 3519 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0). 3520 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16. 3521 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op, 3522 SelectionDAG &DAG) const { 3523 SDLoc DL(Op); 3524 MVT VecVT = Op.getSimpleValueType(); 3525 unsigned EltSize = VecVT.getScalarSizeInBits(); 3526 unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); 3527 3528 unsigned MaxVLMAX = 0; 3529 unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits(); 3530 if (VectorBitsMax != 0) 3531 MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock; 3532 3533 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL; 3534 MVT IntVT = VecVT.changeVectorElementTypeToInteger(); 3535 3536 // If this is SEW=8 and VLMAX is unknown or more than 256, we need 3537 // to use vrgatherei16.vv. 3538 // TODO: It's also possible to use vrgatherei16.vv for other types to 3539 // decrease register width for the index calculation. 3540 if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) { 3541 // If this is LMUL=8, we have to split before can use vrgatherei16.vv. 3542 // Reverse each half, then reassemble them in reverse order. 3543 // NOTE: It's also possible that after splitting that VLMAX no longer 3544 // requires vrgatherei16.vv. 3545 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) { 3546 SDValue Lo, Hi; 3547 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 3548 EVT LoVT, HiVT; 3549 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); 3550 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo); 3551 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi); 3552 // Reassemble the low and high pieces reversed. 3553 // FIXME: This is a CONCAT_VECTORS. 3554 SDValue Res = 3555 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi, 3556 DAG.getIntPtrConstant(0, DL)); 3557 return DAG.getNode( 3558 ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo, 3559 DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL)); 3560 } 3561 3562 // Just promote the int type to i16 which will double the LMUL. 3563 IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount()); 3564 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL; 3565 } 3566 3567 MVT XLenVT = Subtarget.getXLenVT(); 3568 SDValue Mask, VL; 3569 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 3570 3571 // Calculate VLMAX-1 for the desired SEW. 3572 unsigned MinElts = VecVT.getVectorMinNumElements(); 3573 SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT, 3574 DAG.getConstant(MinElts, DL, XLenVT)); 3575 SDValue VLMinus1 = 3576 DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT)); 3577 3578 // Splat VLMAX-1 taking care to handle SEW==64 on RV32. 3579 bool IsRV32E64 = 3580 !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64; 3581 SDValue SplatVL; 3582 if (!IsRV32E64) 3583 SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); 3584 else 3585 SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1); 3586 3587 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); 3588 SDValue Indices = 3589 DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL); 3590 3591 return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL); 3592 } 3593 3594 SDValue 3595 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, 3596 SelectionDAG &DAG) const { 3597 auto *Load = cast<LoadSDNode>(Op); 3598 3599 SDLoc DL(Op); 3600 MVT VT = Op.getSimpleValueType(); 3601 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3602 3603 SDValue VL = 3604 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 3605 3606 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 3607 SDValue NewLoad = DAG.getMemIntrinsicNode( 3608 RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, 3609 Load->getMemoryVT(), Load->getMemOperand()); 3610 3611 SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 3612 return DAG.getMergeValues({Result, Load->getChain()}, DL); 3613 } 3614 3615 SDValue 3616 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, 3617 SelectionDAG &DAG) const { 3618 auto *Store = cast<StoreSDNode>(Op); 3619 3620 SDLoc DL(Op); 3621 MVT VT = Store->getValue().getSimpleValueType(); 3622 3623 // FIXME: We probably need to zero any extra bits in a byte for mask stores. 3624 // This is tricky to do. 3625 3626 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3627 3628 SDValue VL = 3629 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 3630 3631 SDValue NewValue = 3632 convertToScalableVector(ContainerVT, Store->getValue(), DAG, Subtarget); 3633 return DAG.getMemIntrinsicNode( 3634 RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), 3635 {Store->getChain(), NewValue, Store->getBasePtr(), VL}, 3636 Store->getMemoryVT(), Store->getMemOperand()); 3637 } 3638 3639 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const { 3640 auto *Load = cast<MaskedLoadSDNode>(Op); 3641 3642 SDLoc DL(Op); 3643 MVT VT = Op.getSimpleValueType(); 3644 MVT XLenVT = Subtarget.getXLenVT(); 3645 3646 SDValue Mask = Load->getMask(); 3647 SDValue PassThru = Load->getPassThru(); 3648 SDValue VL; 3649 3650 MVT ContainerVT = VT; 3651 if (VT.isFixedLengthVector()) { 3652 ContainerVT = getContainerForFixedLengthVector(VT); 3653 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3654 3655 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 3656 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 3657 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 3658 } else 3659 VL = DAG.getRegister(RISCV::X0, XLenVT); 3660 3661 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 3662 SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT); 3663 SDValue Ops[] = {Load->getChain(), IntID, PassThru, 3664 Load->getBasePtr(), Mask, VL}; 3665 SDValue Result = 3666 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 3667 Load->getMemoryVT(), Load->getMemOperand()); 3668 SDValue Chain = Result.getValue(1); 3669 3670 if (VT.isFixedLengthVector()) 3671 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 3672 3673 return DAG.getMergeValues({Result, Chain}, DL); 3674 } 3675 3676 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const { 3677 auto *Store = cast<MaskedStoreSDNode>(Op); 3678 3679 SDLoc DL(Op); 3680 SDValue Val = Store->getValue(); 3681 SDValue Mask = Store->getMask(); 3682 MVT VT = Val.getSimpleValueType(); 3683 MVT XLenVT = Subtarget.getXLenVT(); 3684 SDValue VL; 3685 3686 MVT ContainerVT = VT; 3687 if (VT.isFixedLengthVector()) { 3688 ContainerVT = getContainerForFixedLengthVector(VT); 3689 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3690 3691 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); 3692 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 3693 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 3694 } else 3695 VL = DAG.getRegister(RISCV::X0, XLenVT); 3696 3697 SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT); 3698 return DAG.getMemIntrinsicNode( 3699 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), 3700 {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL}, 3701 Store->getMemoryVT(), Store->getMemOperand()); 3702 } 3703 3704 SDValue 3705 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, 3706 SelectionDAG &DAG) const { 3707 MVT InVT = Op.getOperand(0).getSimpleValueType(); 3708 MVT ContainerVT = getContainerForFixedLengthVector(InVT); 3709 3710 MVT VT = Op.getSimpleValueType(); 3711 3712 SDValue Op1 = 3713 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 3714 SDValue Op2 = 3715 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 3716 3717 SDLoc DL(Op); 3718 SDValue VL = 3719 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 3720 3721 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3722 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3723 3724 SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, 3725 Op.getOperand(2), Mask, VL); 3726 3727 return convertFromScalableVector(VT, Cmp, DAG, Subtarget); 3728 } 3729 3730 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( 3731 SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { 3732 MVT VT = Op.getSimpleValueType(); 3733 3734 if (VT.getVectorElementType() == MVT::i1) 3735 return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); 3736 3737 return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); 3738 } 3739 3740 // Lower vector ABS to smax(X, sub(0, X)). 3741 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const { 3742 SDLoc DL(Op); 3743 MVT VT = Op.getSimpleValueType(); 3744 SDValue X = Op.getOperand(0); 3745 3746 assert(VT.isFixedLengthVector() && "Unexpected type"); 3747 3748 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3749 X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); 3750 3751 SDValue Mask, VL; 3752 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3753 3754 SDValue SplatZero = 3755 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 3756 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 3757 SDValue NegX = 3758 DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL); 3759 SDValue Max = 3760 DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL); 3761 3762 return convertFromScalableVector(VT, Max, DAG, Subtarget); 3763 } 3764 3765 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV( 3766 SDValue Op, SelectionDAG &DAG) const { 3767 SDLoc DL(Op); 3768 MVT VT = Op.getSimpleValueType(); 3769 SDValue Mag = Op.getOperand(0); 3770 SDValue Sign = Op.getOperand(1); 3771 assert(Mag.getValueType() == Sign.getValueType() && 3772 "Can only handle COPYSIGN with matching types."); 3773 3774 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3775 Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget); 3776 Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget); 3777 3778 SDValue Mask, VL; 3779 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3780 3781 SDValue CopySign = 3782 DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL); 3783 3784 return convertFromScalableVector(VT, CopySign, DAG, Subtarget); 3785 } 3786 3787 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( 3788 SDValue Op, SelectionDAG &DAG) const { 3789 MVT VT = Op.getSimpleValueType(); 3790 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3791 3792 MVT I1ContainerVT = 3793 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3794 3795 SDValue CC = 3796 convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); 3797 SDValue Op1 = 3798 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 3799 SDValue Op2 = 3800 convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); 3801 3802 SDLoc DL(Op); 3803 SDValue Mask, VL; 3804 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3805 3806 SDValue Select = 3807 DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); 3808 3809 return convertFromScalableVector(VT, Select, DAG, Subtarget); 3810 } 3811 3812 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, 3813 unsigned NewOpc, 3814 bool HasMask) const { 3815 MVT VT = Op.getSimpleValueType(); 3816 assert(useRVVForFixedLengthVectorVT(VT) && 3817 "Only expected to lower fixed length vector operation!"); 3818 MVT ContainerVT = getContainerForFixedLengthVector(VT); 3819 3820 // Create list of operands by converting existing ones to scalable types. 3821 SmallVector<SDValue, 6> Ops; 3822 for (const SDValue &V : Op->op_values()) { 3823 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 3824 3825 // Pass through non-vector operands. 3826 if (!V.getValueType().isVector()) { 3827 Ops.push_back(V); 3828 continue; 3829 } 3830 3831 // "cast" fixed length vector to a scalable vector. 3832 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && 3833 "Only fixed length vectors are supported!"); 3834 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 3835 } 3836 3837 SDLoc DL(Op); 3838 SDValue Mask, VL; 3839 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3840 if (HasMask) 3841 Ops.push_back(Mask); 3842 Ops.push_back(VL); 3843 3844 SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); 3845 return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); 3846 } 3847 3848 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to 3849 // a RVV indexed load. The RVV indexed load instructions only support the 3850 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or 3851 // truncated to XLEN and are treated as byte offsets. Any signed or scaled 3852 // indexing is extended to the XLEN value type and scaled accordingly. 3853 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const { 3854 auto *MGN = cast<MaskedGatherSDNode>(Op.getNode()); 3855 SDLoc DL(Op); 3856 3857 SDValue Index = MGN->getIndex(); 3858 SDValue Mask = MGN->getMask(); 3859 SDValue PassThru = MGN->getPassThru(); 3860 3861 MVT VT = Op.getSimpleValueType(); 3862 MVT IndexVT = Index.getSimpleValueType(); 3863 MVT XLenVT = Subtarget.getXLenVT(); 3864 3865 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 3866 "Unexpected VTs!"); 3867 assert(MGN->getBasePtr().getSimpleValueType() == XLenVT && 3868 "Unexpected pointer type"); 3869 // Targets have to explicitly opt-in for extending vector loads. 3870 assert(MGN->getExtensionType() == ISD::NON_EXTLOAD && 3871 "Unexpected extending MGATHER"); 3872 3873 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 3874 // the selection of the masked intrinsics doesn't do this for us. 3875 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 3876 3877 SDValue VL; 3878 MVT ContainerVT = VT; 3879 if (VT.isFixedLengthVector()) { 3880 // We need to use the larger of the result and index type to determine the 3881 // scalable type to use so we don't increase LMUL for any operand/result. 3882 if (VT.bitsGE(IndexVT)) { 3883 ContainerVT = getContainerForFixedLengthVector(VT); 3884 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), 3885 ContainerVT.getVectorElementCount()); 3886 } else { 3887 IndexVT = getContainerForFixedLengthVector(IndexVT); 3888 ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(), 3889 IndexVT.getVectorElementCount()); 3890 } 3891 3892 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); 3893 3894 if (!IsUnmasked) { 3895 MVT MaskVT = 3896 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3897 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 3898 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 3899 } 3900 3901 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 3902 } else 3903 VL = DAG.getRegister(RISCV::X0, XLenVT); 3904 3905 unsigned IntID = 3906 IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask; 3907 SmallVector<SDValue, 8> Ops{MGN->getChain(), 3908 DAG.getTargetConstant(IntID, DL, XLenVT)}; 3909 if (!IsUnmasked) 3910 Ops.push_back(PassThru); 3911 Ops.push_back(MGN->getBasePtr()); 3912 Ops.push_back(Index); 3913 if (!IsUnmasked) 3914 Ops.push_back(Mask); 3915 Ops.push_back(VL); 3916 3917 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 3918 SDValue Result = 3919 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 3920 MGN->getMemoryVT(), MGN->getMemOperand()); 3921 SDValue Chain = Result.getValue(1); 3922 3923 if (VT.isFixedLengthVector()) 3924 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 3925 3926 return DAG.getMergeValues({Result, Chain}, DL); 3927 } 3928 3929 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to 3930 // a RVV indexed store. The RVV indexed store instructions only support the 3931 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or 3932 // truncated to XLEN and are treated as byte offsets. Any signed or scaled 3933 // indexing is extended to the XLEN value type and scaled accordingly. 3934 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op, 3935 SelectionDAG &DAG) const { 3936 auto *MSN = cast<MaskedScatterSDNode>(Op.getNode()); 3937 SDLoc DL(Op); 3938 SDValue Index = MSN->getIndex(); 3939 SDValue Mask = MSN->getMask(); 3940 SDValue Val = MSN->getValue(); 3941 3942 MVT VT = Val.getSimpleValueType(); 3943 MVT IndexVT = Index.getSimpleValueType(); 3944 MVT XLenVT = Subtarget.getXLenVT(); 3945 3946 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 3947 "Unexpected VTs!"); 3948 assert(MSN->getBasePtr().getSimpleValueType() == XLenVT && 3949 "Unexpected pointer type"); 3950 // Targets have to explicitly opt-in for extending vector loads and 3951 // truncating vector stores. 3952 assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER"); 3953 3954 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 3955 // the selection of the masked intrinsics doesn't do this for us. 3956 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 3957 3958 SDValue VL; 3959 if (VT.isFixedLengthVector()) { 3960 // We need to use the larger of the value and index type to determine the 3961 // scalable type to use so we don't increase LMUL for any operand/result. 3962 if (VT.bitsGE(IndexVT)) { 3963 VT = getContainerForFixedLengthVector(VT); 3964 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), 3965 VT.getVectorElementCount()); 3966 } else { 3967 IndexVT = getContainerForFixedLengthVector(IndexVT); 3968 VT = MVT::getVectorVT(VT.getVectorElementType(), 3969 IndexVT.getVectorElementCount()); 3970 } 3971 3972 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); 3973 Val = convertToScalableVector(VT, Val, DAG, Subtarget); 3974 3975 if (!IsUnmasked) { 3976 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 3977 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 3978 } 3979 3980 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 3981 } else 3982 VL = DAG.getRegister(RISCV::X0, XLenVT); 3983 3984 unsigned IntID = 3985 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask; 3986 SmallVector<SDValue, 8> Ops{MSN->getChain(), 3987 DAG.getTargetConstant(IntID, DL, XLenVT)}; 3988 Ops.push_back(Val); 3989 Ops.push_back(MSN->getBasePtr()); 3990 Ops.push_back(Index); 3991 if (!IsUnmasked) 3992 Ops.push_back(Mask); 3993 Ops.push_back(VL); 3994 3995 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops, 3996 MSN->getMemoryVT(), MSN->getMemOperand()); 3997 } 3998 3999 // Returns the opcode of the target-specific SDNode that implements the 32-bit 4000 // form of the given Opcode. 4001 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 4002 switch (Opcode) { 4003 default: 4004 llvm_unreachable("Unexpected opcode"); 4005 case ISD::SHL: 4006 return RISCVISD::SLLW; 4007 case ISD::SRA: 4008 return RISCVISD::SRAW; 4009 case ISD::SRL: 4010 return RISCVISD::SRLW; 4011 case ISD::SDIV: 4012 return RISCVISD::DIVW; 4013 case ISD::UDIV: 4014 return RISCVISD::DIVUW; 4015 case ISD::UREM: 4016 return RISCVISD::REMUW; 4017 case ISD::ROTL: 4018 return RISCVISD::ROLW; 4019 case ISD::ROTR: 4020 return RISCVISD::RORW; 4021 case RISCVISD::GREVI: 4022 return RISCVISD::GREVIW; 4023 case RISCVISD::GORCI: 4024 return RISCVISD::GORCIW; 4025 } 4026 } 4027 4028 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 4029 // Because i32 isn't a legal type for RV64, these operations would otherwise 4030 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 4031 // later one because the fact the operation was originally of type i32 is 4032 // lost. 4033 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, 4034 unsigned ExtOpc = ISD::ANY_EXTEND) { 4035 SDLoc DL(N); 4036 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 4037 SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); 4038 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); 4039 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 4040 // ReplaceNodeResults requires we maintain the same type for the return value. 4041 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); 4042 } 4043 4044 // Converts the given 32-bit operation to a i64 operation with signed extension 4045 // semantic to reduce the signed extension instructions. 4046 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 4047 SDLoc DL(N); 4048 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4049 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 4050 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 4051 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 4052 DAG.getValueType(MVT::i32)); 4053 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 4054 } 4055 4056 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 4057 SmallVectorImpl<SDValue> &Results, 4058 SelectionDAG &DAG) const { 4059 SDLoc DL(N); 4060 switch (N->getOpcode()) { 4061 default: 4062 llvm_unreachable("Don't know how to custom type legalize this operation!"); 4063 case ISD::STRICT_FP_TO_SINT: 4064 case ISD::STRICT_FP_TO_UINT: 4065 case ISD::FP_TO_SINT: 4066 case ISD::FP_TO_UINT: { 4067 bool IsStrict = N->isStrictFPOpcode(); 4068 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4069 "Unexpected custom legalisation"); 4070 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 4071 // If the FP type needs to be softened, emit a library call using the 'si' 4072 // version. If we left it to default legalization we'd end up with 'di'. If 4073 // the FP type doesn't need to be softened just let generic type 4074 // legalization promote the result type. 4075 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 4076 TargetLowering::TypeSoftenFloat) 4077 return; 4078 RTLIB::Libcall LC; 4079 if (N->getOpcode() == ISD::FP_TO_SINT || 4080 N->getOpcode() == ISD::STRICT_FP_TO_SINT) 4081 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 4082 else 4083 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 4084 MakeLibCallOptions CallOptions; 4085 EVT OpVT = Op0.getValueType(); 4086 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 4087 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 4088 SDValue Result; 4089 std::tie(Result, Chain) = 4090 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 4091 Results.push_back(Result); 4092 if (IsStrict) 4093 Results.push_back(Chain); 4094 break; 4095 } 4096 case ISD::READCYCLECOUNTER: { 4097 assert(!Subtarget.is64Bit() && 4098 "READCYCLECOUNTER only has custom type legalization on riscv32"); 4099 4100 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4101 SDValue RCW = 4102 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 4103 4104 Results.push_back( 4105 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 4106 Results.push_back(RCW.getValue(2)); 4107 break; 4108 } 4109 case ISD::MUL: { 4110 unsigned Size = N->getSimpleValueType(0).getSizeInBits(); 4111 unsigned XLen = Subtarget.getXLen(); 4112 // This multiply needs to be expanded, try to use MULHSU+MUL if possible. 4113 if (Size > XLen) { 4114 assert(Size == (XLen * 2) && "Unexpected custom legalisation"); 4115 SDValue LHS = N->getOperand(0); 4116 SDValue RHS = N->getOperand(1); 4117 APInt HighMask = APInt::getHighBitsSet(Size, XLen); 4118 4119 bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask); 4120 bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask); 4121 // We need exactly one side to be unsigned. 4122 if (LHSIsU == RHSIsU) 4123 return; 4124 4125 auto MakeMULPair = [&](SDValue S, SDValue U) { 4126 MVT XLenVT = Subtarget.getXLenVT(); 4127 S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S); 4128 U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U); 4129 SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U); 4130 SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U); 4131 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi); 4132 }; 4133 4134 bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen; 4135 bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen; 4136 4137 // The other operand should be signed, but still prefer MULH when 4138 // possible. 4139 if (RHSIsU && LHSIsS && !RHSIsS) 4140 Results.push_back(MakeMULPair(LHS, RHS)); 4141 else if (LHSIsU && RHSIsS && !LHSIsS) 4142 Results.push_back(MakeMULPair(RHS, LHS)); 4143 4144 return; 4145 } 4146 LLVM_FALLTHROUGH; 4147 } 4148 case ISD::ADD: 4149 case ISD::SUB: 4150 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4151 "Unexpected custom legalisation"); 4152 if (N->getOperand(1).getOpcode() == ISD::Constant) 4153 return; 4154 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 4155 break; 4156 case ISD::SHL: 4157 case ISD::SRA: 4158 case ISD::SRL: 4159 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4160 "Unexpected custom legalisation"); 4161 if (N->getOperand(1).getOpcode() == ISD::Constant) 4162 return; 4163 Results.push_back(customLegalizeToWOp(N, DAG)); 4164 break; 4165 case ISD::ROTL: 4166 case ISD::ROTR: 4167 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4168 "Unexpected custom legalisation"); 4169 Results.push_back(customLegalizeToWOp(N, DAG)); 4170 break; 4171 case ISD::CTTZ: 4172 case ISD::CTTZ_ZERO_UNDEF: 4173 case ISD::CTLZ: 4174 case ISD::CTLZ_ZERO_UNDEF: { 4175 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4176 "Unexpected custom legalisation"); 4177 4178 SDValue NewOp0 = 4179 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4180 bool IsCTZ = 4181 N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF; 4182 unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW; 4183 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0); 4184 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 4185 return; 4186 } 4187 case ISD::SDIV: 4188 case ISD::UDIV: 4189 case ISD::UREM: { 4190 MVT VT = N->getSimpleValueType(0); 4191 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 4192 Subtarget.is64Bit() && Subtarget.hasStdExtM() && 4193 "Unexpected custom legalisation"); 4194 if (N->getOperand(0).getOpcode() == ISD::Constant || 4195 N->getOperand(1).getOpcode() == ISD::Constant) 4196 return; 4197 4198 // If the input is i32, use ANY_EXTEND since the W instructions don't read 4199 // the upper 32 bits. For other types we need to sign or zero extend 4200 // based on the opcode. 4201 unsigned ExtOpc = ISD::ANY_EXTEND; 4202 if (VT != MVT::i32) 4203 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND 4204 : ISD::ZERO_EXTEND; 4205 4206 Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); 4207 break; 4208 } 4209 case ISD::UADDO: 4210 case ISD::USUBO: { 4211 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4212 "Unexpected custom legalisation"); 4213 bool IsAdd = N->getOpcode() == ISD::UADDO; 4214 // Create an ADDW or SUBW. 4215 SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4216 SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 4217 SDValue Res = 4218 DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS); 4219 Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res, 4220 DAG.getValueType(MVT::i32)); 4221 4222 // Sign extend the LHS and perform an unsigned compare with the ADDW result. 4223 // Since the inputs are sign extended from i32, this is equivalent to 4224 // comparing the lower 32 bits. 4225 LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 4226 SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS, 4227 IsAdd ? ISD::SETULT : ISD::SETUGT); 4228 4229 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 4230 Results.push_back(Overflow); 4231 return; 4232 } 4233 case ISD::UADDSAT: 4234 case ISD::USUBSAT: { 4235 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4236 "Unexpected custom legalisation"); 4237 if (Subtarget.hasStdExtZbb()) { 4238 // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using 4239 // sign extend allows overflow of the lower 32 bits to be detected on 4240 // the promoted size. 4241 SDValue LHS = 4242 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 4243 SDValue RHS = 4244 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1)); 4245 SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS); 4246 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 4247 return; 4248 } 4249 4250 // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom 4251 // promotion for UADDO/USUBO. 4252 Results.push_back(expandAddSubSat(N, DAG)); 4253 return; 4254 } 4255 case ISD::BITCAST: { 4256 EVT VT = N->getValueType(0); 4257 assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!"); 4258 SDValue Op0 = N->getOperand(0); 4259 EVT Op0VT = Op0.getValueType(); 4260 MVT XLenVT = Subtarget.getXLenVT(); 4261 if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) { 4262 SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0); 4263 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 4264 } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() && 4265 Subtarget.hasStdExtF()) { 4266 SDValue FPConv = 4267 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 4268 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 4269 } else if (!VT.isVector() && Op0VT.isFixedLengthVector() && 4270 isTypeLegal(Op0VT)) { 4271 // Custom-legalize bitcasts from fixed-length vector types to illegal 4272 // scalar types in order to improve codegen. Bitcast the vector to a 4273 // one-element vector type whose element type is the same as the result 4274 // type, and extract the first element. 4275 LLVMContext &Context = *DAG.getContext(); 4276 SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0); 4277 Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, 4278 DAG.getConstant(0, DL, XLenVT))); 4279 } 4280 break; 4281 } 4282 case RISCVISD::GREVI: 4283 case RISCVISD::GORCI: { 4284 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4285 "Unexpected custom legalisation"); 4286 // This is similar to customLegalizeToWOp, except that we pass the second 4287 // operand (a TargetConstant) straight through: it is already of type 4288 // XLenVT. 4289 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 4290 SDValue NewOp0 = 4291 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4292 SDValue NewRes = 4293 DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1)); 4294 // ReplaceNodeResults requires we maintain the same type for the return 4295 // value. 4296 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 4297 break; 4298 } 4299 case RISCVISD::SHFLI: { 4300 // There is no SHFLIW instruction, but we can just promote the operation. 4301 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4302 "Unexpected custom legalisation"); 4303 SDValue NewOp0 = 4304 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4305 SDValue NewRes = 4306 DAG.getNode(RISCVISD::SHFLI, DL, MVT::i64, NewOp0, N->getOperand(1)); 4307 // ReplaceNodeResults requires we maintain the same type for the return 4308 // value. 4309 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 4310 break; 4311 } 4312 case ISD::BSWAP: 4313 case ISD::BITREVERSE: { 4314 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4315 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 4316 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 4317 N->getOperand(0)); 4318 unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24; 4319 SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0, 4320 DAG.getTargetConstant(Imm, DL, 4321 Subtarget.getXLenVT())); 4322 // ReplaceNodeResults requires we maintain the same type for the return 4323 // value. 4324 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW)); 4325 break; 4326 } 4327 case ISD::FSHL: 4328 case ISD::FSHR: { 4329 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4330 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 4331 SDValue NewOp0 = 4332 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4333 SDValue NewOp1 = 4334 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 4335 SDValue NewOp2 = 4336 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 4337 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 4338 // Mask the shift amount to 5 bits. 4339 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 4340 DAG.getConstant(0x1f, DL, MVT::i64)); 4341 unsigned Opc = 4342 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; 4343 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); 4344 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 4345 break; 4346 } 4347 case ISD::EXTRACT_VECTOR_ELT: { 4348 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element 4349 // type is illegal (currently only vXi64 RV32). 4350 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are 4351 // transferred to the destination register. We issue two of these from the 4352 // upper- and lower- halves of the SEW-bit vector element, slid down to the 4353 // first element. 4354 SDValue Vec = N->getOperand(0); 4355 SDValue Idx = N->getOperand(1); 4356 4357 // The vector type hasn't been legalized yet so we can't issue target 4358 // specific nodes if it needs legalization. 4359 // FIXME: We would manually legalize if it's important. 4360 if (!isTypeLegal(Vec.getValueType())) 4361 return; 4362 4363 MVT VecVT = Vec.getSimpleValueType(); 4364 4365 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && 4366 VecVT.getVectorElementType() == MVT::i64 && 4367 "Unexpected EXTRACT_VECTOR_ELT legalization"); 4368 4369 // If this is a fixed vector, we need to convert it to a scalable vector. 4370 MVT ContainerVT = VecVT; 4371 if (VecVT.isFixedLengthVector()) { 4372 ContainerVT = getContainerForFixedLengthVector(VecVT); 4373 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 4374 } 4375 4376 MVT XLenVT = Subtarget.getXLenVT(); 4377 4378 // Use a VL of 1 to avoid processing more elements than we need. 4379 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 4380 SDValue VL = DAG.getConstant(1, DL, XLenVT); 4381 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 4382 4383 // Unless the index is known to be 0, we must slide the vector down to get 4384 // the desired element into index 0. 4385 if (!isNullConstant(Idx)) { 4386 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 4387 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 4388 } 4389 4390 // Extract the lower XLEN bits of the correct vector element. 4391 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 4392 4393 // To extract the upper XLEN bits of the vector element, shift the first 4394 // element right by 32 bits and re-extract the lower XLEN bits. 4395 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 4396 DAG.getConstant(32, DL, XLenVT), VL); 4397 SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, 4398 ThirtyTwoV, Mask, VL); 4399 4400 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 4401 4402 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 4403 break; 4404 } 4405 case ISD::INTRINSIC_WO_CHAIN: { 4406 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 4407 switch (IntNo) { 4408 default: 4409 llvm_unreachable( 4410 "Don't know how to custom type legalize this intrinsic!"); 4411 case Intrinsic::riscv_orc_b: { 4412 // Lower to the GORCI encoding for orc.b with the operand extended. 4413 SDValue NewOp = 4414 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 4415 // If Zbp is enabled, use GORCIW which will sign extend the result. 4416 unsigned Opc = 4417 Subtarget.hasStdExtZbp() ? RISCVISD::GORCIW : RISCVISD::GORCI; 4418 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp, 4419 DAG.getTargetConstant(7, DL, MVT::i64)); 4420 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 4421 return; 4422 } 4423 case Intrinsic::riscv_vmv_x_s: { 4424 EVT VT = N->getValueType(0); 4425 MVT XLenVT = Subtarget.getXLenVT(); 4426 if (VT.bitsLT(XLenVT)) { 4427 // Simple case just extract using vmv.x.s and truncate. 4428 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, 4429 Subtarget.getXLenVT(), N->getOperand(1)); 4430 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); 4431 return; 4432 } 4433 4434 assert(VT == MVT::i64 && !Subtarget.is64Bit() && 4435 "Unexpected custom legalization"); 4436 4437 // We need to do the move in two steps. 4438 SDValue Vec = N->getOperand(1); 4439 MVT VecVT = Vec.getSimpleValueType(); 4440 4441 // First extract the lower XLEN bits of the element. 4442 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 4443 4444 // To extract the upper XLEN bits of the vector element, shift the first 4445 // element right by 32 bits and re-extract the lower XLEN bits. 4446 SDValue VL = DAG.getConstant(1, DL, XLenVT); 4447 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 4448 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 4449 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, 4450 DAG.getConstant(32, DL, XLenVT), VL); 4451 SDValue LShr32 = 4452 DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL); 4453 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 4454 4455 Results.push_back( 4456 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 4457 break; 4458 } 4459 } 4460 break; 4461 } 4462 case ISD::VECREDUCE_ADD: 4463 case ISD::VECREDUCE_AND: 4464 case ISD::VECREDUCE_OR: 4465 case ISD::VECREDUCE_XOR: 4466 case ISD::VECREDUCE_SMAX: 4467 case ISD::VECREDUCE_UMAX: 4468 case ISD::VECREDUCE_SMIN: 4469 case ISD::VECREDUCE_UMIN: 4470 if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG)) 4471 Results.push_back(V); 4472 break; 4473 } 4474 } 4475 4476 // A structure to hold one of the bit-manipulation patterns below. Together, a 4477 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 4478 // (or (and (shl x, 1), 0xAAAAAAAA), 4479 // (and (srl x, 1), 0x55555555)) 4480 struct RISCVBitmanipPat { 4481 SDValue Op; 4482 unsigned ShAmt; 4483 bool IsSHL; 4484 4485 bool formsPairWith(const RISCVBitmanipPat &Other) const { 4486 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 4487 } 4488 }; 4489 4490 // Matches patterns of the form 4491 // (and (shl x, C2), (C1 << C2)) 4492 // (and (srl x, C2), C1) 4493 // (shl (and x, C1), C2) 4494 // (srl (and x, (C1 << C2)), C2) 4495 // Where C2 is a power of 2 and C1 has at least that many leading zeroes. 4496 // The expected masks for each shift amount are specified in BitmanipMasks where 4497 // BitmanipMasks[log2(C2)] specifies the expected C1 value. 4498 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether 4499 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible 4500 // XLen is 64. 4501 static Optional<RISCVBitmanipPat> 4502 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { 4503 assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && 4504 "Unexpected number of masks"); 4505 Optional<uint64_t> Mask; 4506 // Optionally consume a mask around the shift operation. 4507 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 4508 Mask = Op.getConstantOperandVal(1); 4509 Op = Op.getOperand(0); 4510 } 4511 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 4512 return None; 4513 bool IsSHL = Op.getOpcode() == ISD::SHL; 4514 4515 if (!isa<ConstantSDNode>(Op.getOperand(1))) 4516 return None; 4517 uint64_t ShAmt = Op.getConstantOperandVal(1); 4518 4519 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 4520 if (ShAmt >= Width && !isPowerOf2_64(ShAmt)) 4521 return None; 4522 // If we don't have enough masks for 64 bit, then we must be trying to 4523 // match SHFL so we're only allowed to shift 1/4 of the width. 4524 if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) 4525 return None; 4526 4527 SDValue Src = Op.getOperand(0); 4528 4529 // The expected mask is shifted left when the AND is found around SHL 4530 // patterns. 4531 // ((x >> 1) & 0x55555555) 4532 // ((x << 1) & 0xAAAAAAAA) 4533 bool SHLExpMask = IsSHL; 4534 4535 if (!Mask) { 4536 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 4537 // the mask is all ones: consume that now. 4538 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 4539 Mask = Src.getConstantOperandVal(1); 4540 Src = Src.getOperand(0); 4541 // The expected mask is now in fact shifted left for SRL, so reverse the 4542 // decision. 4543 // ((x & 0xAAAAAAAA) >> 1) 4544 // ((x & 0x55555555) << 1) 4545 SHLExpMask = !SHLExpMask; 4546 } else { 4547 // Use a default shifted mask of all-ones if there's no AND, truncated 4548 // down to the expected width. This simplifies the logic later on. 4549 Mask = maskTrailingOnes<uint64_t>(Width); 4550 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 4551 } 4552 } 4553 4554 unsigned MaskIdx = Log2_32(ShAmt); 4555 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 4556 4557 if (SHLExpMask) 4558 ExpMask <<= ShAmt; 4559 4560 if (Mask != ExpMask) 4561 return None; 4562 4563 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 4564 } 4565 4566 // Matches any of the following bit-manipulation patterns: 4567 // (and (shl x, 1), (0x55555555 << 1)) 4568 // (and (srl x, 1), 0x55555555) 4569 // (shl (and x, 0x55555555), 1) 4570 // (srl (and x, (0x55555555 << 1)), 1) 4571 // where the shift amount and mask may vary thus: 4572 // [1] = 0x55555555 / 0xAAAAAAAA 4573 // [2] = 0x33333333 / 0xCCCCCCCC 4574 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 4575 // [8] = 0x00FF00FF / 0xFF00FF00 4576 // [16] = 0x0000FFFF / 0xFFFFFFFF 4577 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 4578 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { 4579 // These are the unshifted masks which we use to match bit-manipulation 4580 // patterns. They may be shifted left in certain circumstances. 4581 static const uint64_t BitmanipMasks[] = { 4582 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 4583 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; 4584 4585 return matchRISCVBitmanipPat(Op, BitmanipMasks); 4586 } 4587 4588 // Match the following pattern as a GREVI(W) operation 4589 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 4590 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 4591 const RISCVSubtarget &Subtarget) { 4592 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 4593 EVT VT = Op.getValueType(); 4594 4595 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 4596 auto LHS = matchGREVIPat(Op.getOperand(0)); 4597 auto RHS = matchGREVIPat(Op.getOperand(1)); 4598 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 4599 SDLoc DL(Op); 4600 return DAG.getNode( 4601 RISCVISD::GREVI, DL, VT, LHS->Op, 4602 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 4603 } 4604 } 4605 return SDValue(); 4606 } 4607 4608 // Matches any the following pattern as a GORCI(W) operation 4609 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 4610 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 4611 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 4612 // Note that with the variant of 3., 4613 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 4614 // the inner pattern will first be matched as GREVI and then the outer 4615 // pattern will be matched to GORC via the first rule above. 4616 // 4. (or (rotl/rotr x, bitwidth/2), x) 4617 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 4618 const RISCVSubtarget &Subtarget) { 4619 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 4620 EVT VT = Op.getValueType(); 4621 4622 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 4623 SDLoc DL(Op); 4624 SDValue Op0 = Op.getOperand(0); 4625 SDValue Op1 = Op.getOperand(1); 4626 4627 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 4628 if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X && 4629 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 4630 return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1)); 4631 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 4632 if ((Reverse.getOpcode() == ISD::ROTL || 4633 Reverse.getOpcode() == ISD::ROTR) && 4634 Reverse.getOperand(0) == X && 4635 isa<ConstantSDNode>(Reverse.getOperand(1))) { 4636 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 4637 if (RotAmt == (VT.getSizeInBits() / 2)) 4638 return DAG.getNode( 4639 RISCVISD::GORCI, DL, VT, X, 4640 DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT())); 4641 } 4642 return SDValue(); 4643 }; 4644 4645 // Check for either commutable permutation of (or (GREVI x, shamt), x) 4646 if (SDValue V = MatchOROfReverse(Op0, Op1)) 4647 return V; 4648 if (SDValue V = MatchOROfReverse(Op1, Op0)) 4649 return V; 4650 4651 // OR is commutable so canonicalize its OR operand to the left 4652 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 4653 std::swap(Op0, Op1); 4654 if (Op0.getOpcode() != ISD::OR) 4655 return SDValue(); 4656 SDValue OrOp0 = Op0.getOperand(0); 4657 SDValue OrOp1 = Op0.getOperand(1); 4658 auto LHS = matchGREVIPat(OrOp0); 4659 // OR is commutable so swap the operands and try again: x might have been 4660 // on the left 4661 if (!LHS) { 4662 std::swap(OrOp0, OrOp1); 4663 LHS = matchGREVIPat(OrOp0); 4664 } 4665 auto RHS = matchGREVIPat(Op1); 4666 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 4667 return DAG.getNode( 4668 RISCVISD::GORCI, DL, VT, LHS->Op, 4669 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 4670 } 4671 } 4672 return SDValue(); 4673 } 4674 4675 // Matches any of the following bit-manipulation patterns: 4676 // (and (shl x, 1), (0x22222222 << 1)) 4677 // (and (srl x, 1), 0x22222222) 4678 // (shl (and x, 0x22222222), 1) 4679 // (srl (and x, (0x22222222 << 1)), 1) 4680 // where the shift amount and mask may vary thus: 4681 // [1] = 0x22222222 / 0x44444444 4682 // [2] = 0x0C0C0C0C / 0x3C3C3C3C 4683 // [4] = 0x00F000F0 / 0x0F000F00 4684 // [8] = 0x0000FF00 / 0x00FF0000 4685 // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) 4686 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { 4687 // These are the unshifted masks which we use to match bit-manipulation 4688 // patterns. They may be shifted left in certain circumstances. 4689 static const uint64_t BitmanipMasks[] = { 4690 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, 4691 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; 4692 4693 return matchRISCVBitmanipPat(Op, BitmanipMasks); 4694 } 4695 4696 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) 4697 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, 4698 const RISCVSubtarget &Subtarget) { 4699 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 4700 EVT VT = Op.getValueType(); 4701 4702 if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) 4703 return SDValue(); 4704 4705 SDValue Op0 = Op.getOperand(0); 4706 SDValue Op1 = Op.getOperand(1); 4707 4708 // Or is commutable so canonicalize the second OR to the LHS. 4709 if (Op0.getOpcode() != ISD::OR) 4710 std::swap(Op0, Op1); 4711 if (Op0.getOpcode() != ISD::OR) 4712 return SDValue(); 4713 4714 // We found an inner OR, so our operands are the operands of the inner OR 4715 // and the other operand of the outer OR. 4716 SDValue A = Op0.getOperand(0); 4717 SDValue B = Op0.getOperand(1); 4718 SDValue C = Op1; 4719 4720 auto Match1 = matchSHFLPat(A); 4721 auto Match2 = matchSHFLPat(B); 4722 4723 // If neither matched, we failed. 4724 if (!Match1 && !Match2) 4725 return SDValue(); 4726 4727 // We had at least one match. if one failed, try the remaining C operand. 4728 if (!Match1) { 4729 std::swap(A, C); 4730 Match1 = matchSHFLPat(A); 4731 if (!Match1) 4732 return SDValue(); 4733 } else if (!Match2) { 4734 std::swap(B, C); 4735 Match2 = matchSHFLPat(B); 4736 if (!Match2) 4737 return SDValue(); 4738 } 4739 assert(Match1 && Match2); 4740 4741 // Make sure our matches pair up. 4742 if (!Match1->formsPairWith(*Match2)) 4743 return SDValue(); 4744 4745 // All the remains is to make sure C is an AND with the same input, that masks 4746 // out the bits that are being shuffled. 4747 if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || 4748 C.getOperand(0) != Match1->Op) 4749 return SDValue(); 4750 4751 uint64_t Mask = C.getConstantOperandVal(1); 4752 4753 static const uint64_t BitmanipMasks[] = { 4754 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, 4755 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, 4756 }; 4757 4758 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 4759 unsigned MaskIdx = Log2_32(Match1->ShAmt); 4760 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 4761 4762 if (Mask != ExpMask) 4763 return SDValue(); 4764 4765 SDLoc DL(Op); 4766 return DAG.getNode( 4767 RISCVISD::SHFLI, DL, VT, Match1->Op, 4768 DAG.getTargetConstant(Match1->ShAmt, DL, Subtarget.getXLenVT())); 4769 } 4770 4771 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 4772 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 4773 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 4774 // not undo itself, but they are redundant. 4775 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 4776 unsigned ShAmt1 = N->getConstantOperandVal(1); 4777 SDValue Src = N->getOperand(0); 4778 4779 if (Src.getOpcode() != N->getOpcode()) 4780 return SDValue(); 4781 4782 unsigned ShAmt2 = Src.getConstantOperandVal(1); 4783 Src = Src.getOperand(0); 4784 4785 unsigned CombinedShAmt; 4786 if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW) 4787 CombinedShAmt = ShAmt1 | ShAmt2; 4788 else 4789 CombinedShAmt = ShAmt1 ^ ShAmt2; 4790 4791 if (CombinedShAmt == 0) 4792 return Src; 4793 4794 SDLoc DL(N); 4795 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src, 4796 DAG.getTargetConstant(CombinedShAmt, DL, 4797 N->getOperand(1).getValueType())); 4798 } 4799 4800 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 4801 DAGCombinerInfo &DCI) const { 4802 SelectionDAG &DAG = DCI.DAG; 4803 4804 switch (N->getOpcode()) { 4805 default: 4806 break; 4807 case RISCVISD::SplitF64: { 4808 SDValue Op0 = N->getOperand(0); 4809 // If the input to SplitF64 is just BuildPairF64 then the operation is 4810 // redundant. Instead, use BuildPairF64's operands directly. 4811 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 4812 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 4813 4814 SDLoc DL(N); 4815 4816 // It's cheaper to materialise two 32-bit integers than to load a double 4817 // from the constant pool and transfer it to integer registers through the 4818 // stack. 4819 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 4820 APInt V = C->getValueAPF().bitcastToAPInt(); 4821 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 4822 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 4823 return DCI.CombineTo(N, Lo, Hi); 4824 } 4825 4826 // This is a target-specific version of a DAGCombine performed in 4827 // DAGCombiner::visitBITCAST. It performs the equivalent of: 4828 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 4829 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 4830 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 4831 !Op0.getNode()->hasOneUse()) 4832 break; 4833 SDValue NewSplitF64 = 4834 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 4835 Op0.getOperand(0)); 4836 SDValue Lo = NewSplitF64.getValue(0); 4837 SDValue Hi = NewSplitF64.getValue(1); 4838 APInt SignBit = APInt::getSignMask(32); 4839 if (Op0.getOpcode() == ISD::FNEG) { 4840 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 4841 DAG.getConstant(SignBit, DL, MVT::i32)); 4842 return DCI.CombineTo(N, Lo, NewHi); 4843 } 4844 assert(Op0.getOpcode() == ISD::FABS); 4845 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 4846 DAG.getConstant(~SignBit, DL, MVT::i32)); 4847 return DCI.CombineTo(N, Lo, NewHi); 4848 } 4849 case RISCVISD::SLLW: 4850 case RISCVISD::SRAW: 4851 case RISCVISD::SRLW: 4852 case RISCVISD::ROLW: 4853 case RISCVISD::RORW: { 4854 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 4855 SDValue LHS = N->getOperand(0); 4856 SDValue RHS = N->getOperand(1); 4857 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 4858 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 4859 if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || 4860 SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { 4861 if (N->getOpcode() != ISD::DELETED_NODE) 4862 DCI.AddToWorklist(N); 4863 return SDValue(N, 0); 4864 } 4865 break; 4866 } 4867 case RISCVISD::CLZW: 4868 case RISCVISD::CTZW: { 4869 // Only the lower 32 bits of the first operand are read 4870 SDValue Op0 = N->getOperand(0); 4871 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 4872 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 4873 if (N->getOpcode() != ISD::DELETED_NODE) 4874 DCI.AddToWorklist(N); 4875 return SDValue(N, 0); 4876 } 4877 break; 4878 } 4879 case RISCVISD::FSL: 4880 case RISCVISD::FSR: { 4881 // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. 4882 SDValue ShAmt = N->getOperand(2); 4883 unsigned BitWidth = ShAmt.getValueSizeInBits(); 4884 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 4885 APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1); 4886 if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 4887 if (N->getOpcode() != ISD::DELETED_NODE) 4888 DCI.AddToWorklist(N); 4889 return SDValue(N, 0); 4890 } 4891 break; 4892 } 4893 case RISCVISD::FSLW: 4894 case RISCVISD::FSRW: { 4895 // Only the lower 32 bits of Values and lower 6 bits of shift amount are 4896 // read. 4897 SDValue Op0 = N->getOperand(0); 4898 SDValue Op1 = N->getOperand(1); 4899 SDValue ShAmt = N->getOperand(2); 4900 APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 4901 APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); 4902 if (SimplifyDemandedBits(Op0, OpMask, DCI) || 4903 SimplifyDemandedBits(Op1, OpMask, DCI) || 4904 SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 4905 if (N->getOpcode() != ISD::DELETED_NODE) 4906 DCI.AddToWorklist(N); 4907 return SDValue(N, 0); 4908 } 4909 break; 4910 } 4911 case RISCVISD::GREVIW: 4912 case RISCVISD::GORCIW: { 4913 // Only the lower 32 bits of the first operand are read 4914 SDValue Op0 = N->getOperand(0); 4915 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 4916 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 4917 if (N->getOpcode() != ISD::DELETED_NODE) 4918 DCI.AddToWorklist(N); 4919 return SDValue(N, 0); 4920 } 4921 4922 return combineGREVI_GORCI(N, DCI.DAG); 4923 } 4924 case RISCVISD::FMV_X_ANYEXTW_RV64: { 4925 SDLoc DL(N); 4926 SDValue Op0 = N->getOperand(0); 4927 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 4928 // conversion is unnecessary and can be replaced with an ANY_EXTEND 4929 // of the FMV_W_X_RV64 operand. 4930 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 4931 assert(Op0.getOperand(0).getValueType() == MVT::i64 && 4932 "Unexpected value type!"); 4933 return Op0.getOperand(0); 4934 } 4935 4936 // This is a target-specific version of a DAGCombine performed in 4937 // DAGCombiner::visitBITCAST. It performs the equivalent of: 4938 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 4939 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 4940 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 4941 !Op0.getNode()->hasOneUse()) 4942 break; 4943 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 4944 Op0.getOperand(0)); 4945 APInt SignBit = APInt::getSignMask(32).sext(64); 4946 if (Op0.getOpcode() == ISD::FNEG) 4947 return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 4948 DAG.getConstant(SignBit, DL, MVT::i64)); 4949 4950 assert(Op0.getOpcode() == ISD::FABS); 4951 return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 4952 DAG.getConstant(~SignBit, DL, MVT::i64)); 4953 } 4954 case RISCVISD::GREVI: 4955 case RISCVISD::GORCI: 4956 return combineGREVI_GORCI(N, DCI.DAG); 4957 case ISD::OR: 4958 if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget)) 4959 return GREV; 4960 if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget)) 4961 return GORC; 4962 if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget)) 4963 return SHFL; 4964 break; 4965 case RISCVISD::SELECT_CC: { 4966 // Transform 4967 SDValue LHS = N->getOperand(0); 4968 SDValue RHS = N->getOperand(1); 4969 auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2)); 4970 if (!ISD::isIntEqualitySetCC(CCVal)) 4971 break; 4972 4973 // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> 4974 // (select_cc X, Y, lt, trueV, falseV) 4975 // Sometimes the setcc is introduced after select_cc has been formed. 4976 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 4977 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 4978 // If we're looking for eq 0 instead of ne 0, we need to invert the 4979 // condition. 4980 bool Invert = CCVal == ISD::SETEQ; 4981 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 4982 if (Invert) 4983 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 4984 4985 SDLoc DL(N); 4986 RHS = LHS.getOperand(1); 4987 LHS = LHS.getOperand(0); 4988 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 4989 4990 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 4991 return DAG.getNode( 4992 RISCVISD::SELECT_CC, DL, N->getValueType(0), 4993 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 4994 } 4995 4996 // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> 4997 // (select_cc X, Y, eq/ne, trueV, falseV) 4998 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 4999 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), 5000 {LHS.getOperand(0), LHS.getOperand(1), 5001 N->getOperand(2), N->getOperand(3), 5002 N->getOperand(4)}); 5003 // (select_cc X, 1, setne, trueV, falseV) -> 5004 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. 5005 // This can occur when legalizing some floating point comparisons. 5006 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 5007 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 5008 SDLoc DL(N); 5009 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 5010 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 5011 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 5012 return DAG.getNode( 5013 RISCVISD::SELECT_CC, DL, N->getValueType(0), 5014 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 5015 } 5016 5017 break; 5018 } 5019 case RISCVISD::BR_CC: { 5020 SDValue LHS = N->getOperand(1); 5021 SDValue RHS = N->getOperand(2); 5022 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get(); 5023 if (!ISD::isIntEqualitySetCC(CCVal)) 5024 break; 5025 5026 // Fold (br_cc (setlt X, Y), 0, ne, dest) -> 5027 // (br_cc X, Y, lt, dest) 5028 // Sometimes the setcc is introduced after br_cc has been formed. 5029 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 5030 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 5031 // If we're looking for eq 0 instead of ne 0, we need to invert the 5032 // condition. 5033 bool Invert = CCVal == ISD::SETEQ; 5034 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 5035 if (Invert) 5036 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 5037 5038 SDLoc DL(N); 5039 RHS = LHS.getOperand(1); 5040 LHS = LHS.getOperand(0); 5041 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 5042 5043 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 5044 N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal), 5045 N->getOperand(4)); 5046 } 5047 5048 // Fold (br_cc (xor X, Y), 0, eq/ne, dest) -> 5049 // (br_cc X, Y, eq/ne, trueV, falseV) 5050 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 5051 return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0), 5052 N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1), 5053 N->getOperand(3), N->getOperand(4)); 5054 5055 // (br_cc X, 1, setne, br_cc) -> 5056 // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1. 5057 // This can occur when legalizing some floating point comparisons. 5058 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 5059 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 5060 SDLoc DL(N); 5061 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 5062 SDValue TargetCC = DAG.getCondCode(CCVal); 5063 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 5064 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 5065 N->getOperand(0), LHS, RHS, TargetCC, 5066 N->getOperand(4)); 5067 } 5068 break; 5069 } 5070 case ISD::FCOPYSIGN: { 5071 EVT VT = N->getValueType(0); 5072 if (!VT.isVector()) 5073 break; 5074 // There is a form of VFSGNJ which injects the negated sign of its second 5075 // operand. Try and bubble any FNEG up after the extend/round to produce 5076 // this optimized pattern. Avoid modifying cases where FP_ROUND and 5077 // TRUNC=1. 5078 SDValue In2 = N->getOperand(1); 5079 // Avoid cases where the extend/round has multiple uses, as duplicating 5080 // those is typically more expensive than removing a fneg. 5081 if (!In2.hasOneUse()) 5082 break; 5083 if (In2.getOpcode() != ISD::FP_EXTEND && 5084 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) 5085 break; 5086 In2 = In2.getOperand(0); 5087 if (In2.getOpcode() != ISD::FNEG) 5088 break; 5089 SDLoc DL(N); 5090 SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); 5091 return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), 5092 DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); 5093 } 5094 case ISD::MGATHER: 5095 case ISD::MSCATTER: { 5096 if (!DCI.isBeforeLegalize()) 5097 break; 5098 MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N); 5099 SDValue Index = MGSN->getIndex(); 5100 EVT IndexVT = Index.getValueType(); 5101 MVT XLenVT = Subtarget.getXLenVT(); 5102 // RISCV indexed loads only support the "unsigned unscaled" addressing 5103 // mode, so anything else must be manually legalized. 5104 bool NeedsIdxLegalization = MGSN->isIndexScaled() || 5105 (MGSN->isIndexSigned() && 5106 IndexVT.getVectorElementType().bitsLT(XLenVT)); 5107 if (!NeedsIdxLegalization) 5108 break; 5109 5110 SDLoc DL(N); 5111 5112 // Any index legalization should first promote to XLenVT, so we don't lose 5113 // bits when scaling. This may create an illegal index type so we let 5114 // LLVM's legalization take care of the splitting. 5115 if (IndexVT.getVectorElementType().bitsLT(XLenVT)) { 5116 IndexVT = IndexVT.changeVectorElementType(XLenVT); 5117 Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND 5118 : ISD::ZERO_EXTEND, 5119 DL, IndexVT, Index); 5120 } 5121 5122 unsigned Scale = N->getConstantOperandVal(5); 5123 if (MGSN->isIndexScaled() && Scale != 1) { 5124 // Manually scale the indices by the element size. 5125 // TODO: Sanitize the scale operand here? 5126 assert(isPowerOf2_32(Scale) && "Expecting power-of-two types"); 5127 SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); 5128 Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); 5129 } 5130 5131 ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; 5132 if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) { 5133 return DAG.getMaskedGather( 5134 N->getVTList(), MGSN->getMemoryVT(), DL, 5135 {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(), 5136 MGSN->getBasePtr(), Index, MGN->getScale()}, 5137 MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); 5138 } 5139 const auto *MSN = cast<MaskedScatterSDNode>(N); 5140 return DAG.getMaskedScatter( 5141 N->getVTList(), MGSN->getMemoryVT(), DL, 5142 {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(), 5143 Index, MGSN->getScale()}, 5144 MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); 5145 } 5146 } 5147 5148 return SDValue(); 5149 } 5150 5151 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 5152 const SDNode *N, CombineLevel Level) const { 5153 // The following folds are only desirable if `(OP _, c1 << c2)` can be 5154 // materialised in fewer instructions than `(OP _, c1)`: 5155 // 5156 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 5157 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 5158 SDValue N0 = N->getOperand(0); 5159 EVT Ty = N0.getValueType(); 5160 if (Ty.isScalarInteger() && 5161 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 5162 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 5163 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 5164 if (C1 && C2) { 5165 const APInt &C1Int = C1->getAPIntValue(); 5166 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 5167 5168 // We can materialise `c1 << c2` into an add immediate, so it's "free", 5169 // and the combine should happen, to potentially allow further combines 5170 // later. 5171 if (ShiftedC1Int.getMinSignedBits() <= 64 && 5172 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 5173 return true; 5174 5175 // We can materialise `c1` in an add immediate, so it's "free", and the 5176 // combine should be prevented. 5177 if (C1Int.getMinSignedBits() <= 64 && 5178 isLegalAddImmediate(C1Int.getSExtValue())) 5179 return false; 5180 5181 // Neither constant will fit into an immediate, so find materialisation 5182 // costs. 5183 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 5184 Subtarget.is64Bit()); 5185 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 5186 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 5187 5188 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 5189 // combine should be prevented. 5190 if (C1Cost < ShiftedC1Cost) 5191 return false; 5192 } 5193 } 5194 return true; 5195 } 5196 5197 bool RISCVTargetLowering::targetShrinkDemandedConstant( 5198 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 5199 TargetLoweringOpt &TLO) const { 5200 // Delay this optimization as late as possible. 5201 if (!TLO.LegalOps) 5202 return false; 5203 5204 EVT VT = Op.getValueType(); 5205 if (VT.isVector()) 5206 return false; 5207 5208 // Only handle AND for now. 5209 if (Op.getOpcode() != ISD::AND) 5210 return false; 5211 5212 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 5213 if (!C) 5214 return false; 5215 5216 const APInt &Mask = C->getAPIntValue(); 5217 5218 // Clear all non-demanded bits initially. 5219 APInt ShrunkMask = Mask & DemandedBits; 5220 5221 // Try to make a smaller immediate by setting undemanded bits. 5222 5223 APInt ExpandedMask = Mask | ~DemandedBits; 5224 5225 auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool { 5226 return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask); 5227 }; 5228 auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool { 5229 if (NewMask == Mask) 5230 return true; 5231 SDLoc DL(Op); 5232 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 5233 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 5234 return TLO.CombineTo(Op, NewOp); 5235 }; 5236 5237 // If the shrunk mask fits in sign extended 12 bits, let the target 5238 // independent code apply it. 5239 if (ShrunkMask.isSignedIntN(12)) 5240 return false; 5241 5242 // Preserve (and X, 0xffff) when zext.h is supported. 5243 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 5244 APInt NewMask = APInt(Mask.getBitWidth(), 0xffff); 5245 if (IsLegalMask(NewMask)) 5246 return UseMask(NewMask); 5247 } 5248 5249 // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern. 5250 if (VT == MVT::i64) { 5251 APInt NewMask = APInt(64, 0xffffffff); 5252 if (IsLegalMask(NewMask)) 5253 return UseMask(NewMask); 5254 } 5255 5256 // For the remaining optimizations, we need to be able to make a negative 5257 // number through a combination of mask and undemanded bits. 5258 if (!ExpandedMask.isNegative()) 5259 return false; 5260 5261 // What is the fewest number of bits we need to represent the negative number. 5262 unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); 5263 5264 // Try to make a 12 bit negative immediate. If that fails try to make a 32 5265 // bit negative immediate unless the shrunk immediate already fits in 32 bits. 5266 APInt NewMask = ShrunkMask; 5267 if (MinSignedBits <= 12) 5268 NewMask.setBitsFrom(11); 5269 else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) 5270 NewMask.setBitsFrom(31); 5271 else 5272 return false; 5273 5274 // Sanity check that our new mask is a subset of the demanded mask. 5275 assert(IsLegalMask(NewMask)); 5276 return UseMask(NewMask); 5277 } 5278 5279 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 5280 KnownBits &Known, 5281 const APInt &DemandedElts, 5282 const SelectionDAG &DAG, 5283 unsigned Depth) const { 5284 unsigned BitWidth = Known.getBitWidth(); 5285 unsigned Opc = Op.getOpcode(); 5286 assert((Opc >= ISD::BUILTIN_OP_END || 5287 Opc == ISD::INTRINSIC_WO_CHAIN || 5288 Opc == ISD::INTRINSIC_W_CHAIN || 5289 Opc == ISD::INTRINSIC_VOID) && 5290 "Should use MaskedValueIsZero if you don't know whether Op" 5291 " is a target node!"); 5292 5293 Known.resetAll(); 5294 switch (Opc) { 5295 default: break; 5296 case RISCVISD::SELECT_CC: { 5297 Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1); 5298 // If we don't know any bits, early out. 5299 if (Known.isUnknown()) 5300 break; 5301 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1); 5302 5303 // Only known if known in both the LHS and RHS. 5304 Known = KnownBits::commonBits(Known, Known2); 5305 break; 5306 } 5307 case RISCVISD::REMUW: { 5308 KnownBits Known2; 5309 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 5310 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 5311 // We only care about the lower 32 bits. 5312 Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); 5313 // Restore the original width by sign extending. 5314 Known = Known.sext(BitWidth); 5315 break; 5316 } 5317 case RISCVISD::DIVUW: { 5318 KnownBits Known2; 5319 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 5320 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 5321 // We only care about the lower 32 bits. 5322 Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); 5323 // Restore the original width by sign extending. 5324 Known = Known.sext(BitWidth); 5325 break; 5326 } 5327 case RISCVISD::CTZW: { 5328 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 5329 unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros(); 5330 unsigned LowBits = Log2_32(PossibleTZ) + 1; 5331 Known.Zero.setBitsFrom(LowBits); 5332 break; 5333 } 5334 case RISCVISD::CLZW: { 5335 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 5336 unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros(); 5337 unsigned LowBits = Log2_32(PossibleLZ) + 1; 5338 Known.Zero.setBitsFrom(LowBits); 5339 break; 5340 } 5341 case RISCVISD::READ_VLENB: 5342 // We assume VLENB is at least 16 bytes. 5343 Known.Zero.setLowBits(4); 5344 break; 5345 } 5346 } 5347 5348 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 5349 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 5350 unsigned Depth) const { 5351 switch (Op.getOpcode()) { 5352 default: 5353 break; 5354 case RISCVISD::SLLW: 5355 case RISCVISD::SRAW: 5356 case RISCVISD::SRLW: 5357 case RISCVISD::DIVW: 5358 case RISCVISD::DIVUW: 5359 case RISCVISD::REMUW: 5360 case RISCVISD::ROLW: 5361 case RISCVISD::RORW: 5362 case RISCVISD::GREVIW: 5363 case RISCVISD::GORCIW: 5364 case RISCVISD::FSLW: 5365 case RISCVISD::FSRW: 5366 // TODO: As the result is sign-extended, this is conservatively correct. A 5367 // more precise answer could be calculated for SRAW depending on known 5368 // bits in the shift amount. 5369 return 33; 5370 case RISCVISD::SHFLI: { 5371 // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word 5372 // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but 5373 // will stay within the upper 32 bits. If there were more than 32 sign bits 5374 // before there will be at least 33 sign bits after. 5375 if (Op.getValueType() == MVT::i64 && 5376 (Op.getConstantOperandVal(1) & 0x10) == 0) { 5377 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 5378 if (Tmp > 32) 5379 return 33; 5380 } 5381 break; 5382 } 5383 case RISCVISD::VMV_X_S: 5384 // The number of sign bits of the scalar result is computed by obtaining the 5385 // element type of the input vector operand, subtracting its width from the 5386 // XLEN, and then adding one (sign bit within the element type). If the 5387 // element type is wider than XLen, the least-significant XLEN bits are 5388 // taken. 5389 if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen()) 5390 return 1; 5391 return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; 5392 } 5393 5394 return 1; 5395 } 5396 5397 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 5398 MachineBasicBlock *BB) { 5399 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 5400 5401 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 5402 // Should the count have wrapped while it was being read, we need to try 5403 // again. 5404 // ... 5405 // read: 5406 // rdcycleh x3 # load high word of cycle 5407 // rdcycle x2 # load low word of cycle 5408 // rdcycleh x4 # load high word of cycle 5409 // bne x3, x4, read # check if high word reads match, otherwise try again 5410 // ... 5411 5412 MachineFunction &MF = *BB->getParent(); 5413 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5414 MachineFunction::iterator It = ++BB->getIterator(); 5415 5416 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 5417 MF.insert(It, LoopMBB); 5418 5419 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 5420 MF.insert(It, DoneMBB); 5421 5422 // Transfer the remainder of BB and its successor edges to DoneMBB. 5423 DoneMBB->splice(DoneMBB->begin(), BB, 5424 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 5425 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 5426 5427 BB->addSuccessor(LoopMBB); 5428 5429 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5430 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 5431 Register LoReg = MI.getOperand(0).getReg(); 5432 Register HiReg = MI.getOperand(1).getReg(); 5433 DebugLoc DL = MI.getDebugLoc(); 5434 5435 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 5436 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 5437 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 5438 .addReg(RISCV::X0); 5439 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 5440 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 5441 .addReg(RISCV::X0); 5442 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 5443 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 5444 .addReg(RISCV::X0); 5445 5446 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 5447 .addReg(HiReg) 5448 .addReg(ReadAgainReg) 5449 .addMBB(LoopMBB); 5450 5451 LoopMBB->addSuccessor(LoopMBB); 5452 LoopMBB->addSuccessor(DoneMBB); 5453 5454 MI.eraseFromParent(); 5455 5456 return DoneMBB; 5457 } 5458 5459 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 5460 MachineBasicBlock *BB) { 5461 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 5462 5463 MachineFunction &MF = *BB->getParent(); 5464 DebugLoc DL = MI.getDebugLoc(); 5465 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 5466 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 5467 Register LoReg = MI.getOperand(0).getReg(); 5468 Register HiReg = MI.getOperand(1).getReg(); 5469 Register SrcReg = MI.getOperand(2).getReg(); 5470 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 5471 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 5472 5473 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 5474 RI); 5475 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 5476 MachineMemOperand *MMOLo = 5477 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 5478 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 5479 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 5480 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 5481 .addFrameIndex(FI) 5482 .addImm(0) 5483 .addMemOperand(MMOLo); 5484 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 5485 .addFrameIndex(FI) 5486 .addImm(4) 5487 .addMemOperand(MMOHi); 5488 MI.eraseFromParent(); // The pseudo instruction is gone now. 5489 return BB; 5490 } 5491 5492 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 5493 MachineBasicBlock *BB) { 5494 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 5495 "Unexpected instruction"); 5496 5497 MachineFunction &MF = *BB->getParent(); 5498 DebugLoc DL = MI.getDebugLoc(); 5499 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 5500 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 5501 Register DstReg = MI.getOperand(0).getReg(); 5502 Register LoReg = MI.getOperand(1).getReg(); 5503 Register HiReg = MI.getOperand(2).getReg(); 5504 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 5505 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 5506 5507 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 5508 MachineMemOperand *MMOLo = 5509 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 5510 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 5511 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 5512 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 5513 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 5514 .addFrameIndex(FI) 5515 .addImm(0) 5516 .addMemOperand(MMOLo); 5517 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 5518 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 5519 .addFrameIndex(FI) 5520 .addImm(4) 5521 .addMemOperand(MMOHi); 5522 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 5523 MI.eraseFromParent(); // The pseudo instruction is gone now. 5524 return BB; 5525 } 5526 5527 static bool isSelectPseudo(MachineInstr &MI) { 5528 switch (MI.getOpcode()) { 5529 default: 5530 return false; 5531 case RISCV::Select_GPR_Using_CC_GPR: 5532 case RISCV::Select_FPR16_Using_CC_GPR: 5533 case RISCV::Select_FPR32_Using_CC_GPR: 5534 case RISCV::Select_FPR64_Using_CC_GPR: 5535 return true; 5536 } 5537 } 5538 5539 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 5540 MachineBasicBlock *BB) { 5541 // To "insert" Select_* instructions, we actually have to insert the triangle 5542 // control-flow pattern. The incoming instructions know the destination vreg 5543 // to set, the condition code register to branch on, the true/false values to 5544 // select between, and the condcode to use to select the appropriate branch. 5545 // 5546 // We produce the following control flow: 5547 // HeadMBB 5548 // | \ 5549 // | IfFalseMBB 5550 // | / 5551 // TailMBB 5552 // 5553 // When we find a sequence of selects we attempt to optimize their emission 5554 // by sharing the control flow. Currently we only handle cases where we have 5555 // multiple selects with the exact same condition (same LHS, RHS and CC). 5556 // The selects may be interleaved with other instructions if the other 5557 // instructions meet some requirements we deem safe: 5558 // - They are debug instructions. Otherwise, 5559 // - They do not have side-effects, do not access memory and their inputs do 5560 // not depend on the results of the select pseudo-instructions. 5561 // The TrueV/FalseV operands of the selects cannot depend on the result of 5562 // previous selects in the sequence. 5563 // These conditions could be further relaxed. See the X86 target for a 5564 // related approach and more information. 5565 Register LHS = MI.getOperand(1).getReg(); 5566 Register RHS = MI.getOperand(2).getReg(); 5567 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 5568 5569 SmallVector<MachineInstr *, 4> SelectDebugValues; 5570 SmallSet<Register, 4> SelectDests; 5571 SelectDests.insert(MI.getOperand(0).getReg()); 5572 5573 MachineInstr *LastSelectPseudo = &MI; 5574 5575 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 5576 SequenceMBBI != E; ++SequenceMBBI) { 5577 if (SequenceMBBI->isDebugInstr()) 5578 continue; 5579 else if (isSelectPseudo(*SequenceMBBI)) { 5580 if (SequenceMBBI->getOperand(1).getReg() != LHS || 5581 SequenceMBBI->getOperand(2).getReg() != RHS || 5582 SequenceMBBI->getOperand(3).getImm() != CC || 5583 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 5584 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 5585 break; 5586 LastSelectPseudo = &*SequenceMBBI; 5587 SequenceMBBI->collectDebugValues(SelectDebugValues); 5588 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 5589 } else { 5590 if (SequenceMBBI->hasUnmodeledSideEffects() || 5591 SequenceMBBI->mayLoadOrStore()) 5592 break; 5593 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 5594 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 5595 })) 5596 break; 5597 } 5598 } 5599 5600 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 5601 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5602 DebugLoc DL = MI.getDebugLoc(); 5603 MachineFunction::iterator I = ++BB->getIterator(); 5604 5605 MachineBasicBlock *HeadMBB = BB; 5606 MachineFunction *F = BB->getParent(); 5607 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 5608 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 5609 5610 F->insert(I, IfFalseMBB); 5611 F->insert(I, TailMBB); 5612 5613 // Transfer debug instructions associated with the selects to TailMBB. 5614 for (MachineInstr *DebugInstr : SelectDebugValues) { 5615 TailMBB->push_back(DebugInstr->removeFromParent()); 5616 } 5617 5618 // Move all instructions after the sequence to TailMBB. 5619 TailMBB->splice(TailMBB->end(), HeadMBB, 5620 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 5621 // Update machine-CFG edges by transferring all successors of the current 5622 // block to the new block which will contain the Phi nodes for the selects. 5623 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 5624 // Set the successors for HeadMBB. 5625 HeadMBB->addSuccessor(IfFalseMBB); 5626 HeadMBB->addSuccessor(TailMBB); 5627 5628 // Insert appropriate branch. 5629 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 5630 5631 BuildMI(HeadMBB, DL, TII.get(Opcode)) 5632 .addReg(LHS) 5633 .addReg(RHS) 5634 .addMBB(TailMBB); 5635 5636 // IfFalseMBB just falls through to TailMBB. 5637 IfFalseMBB->addSuccessor(TailMBB); 5638 5639 // Create PHIs for all of the select pseudo-instructions. 5640 auto SelectMBBI = MI.getIterator(); 5641 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 5642 auto InsertionPoint = TailMBB->begin(); 5643 while (SelectMBBI != SelectEnd) { 5644 auto Next = std::next(SelectMBBI); 5645 if (isSelectPseudo(*SelectMBBI)) { 5646 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 5647 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 5648 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 5649 .addReg(SelectMBBI->getOperand(4).getReg()) 5650 .addMBB(HeadMBB) 5651 .addReg(SelectMBBI->getOperand(5).getReg()) 5652 .addMBB(IfFalseMBB); 5653 SelectMBBI->eraseFromParent(); 5654 } 5655 SelectMBBI = Next; 5656 } 5657 5658 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 5659 return TailMBB; 5660 } 5661 5662 static MachineInstr *elideCopies(MachineInstr *MI, 5663 const MachineRegisterInfo &MRI) { 5664 while (true) { 5665 if (!MI->isFullCopy()) 5666 return MI; 5667 if (!Register::isVirtualRegister(MI->getOperand(1).getReg())) 5668 return nullptr; 5669 MI = MRI.getVRegDef(MI->getOperand(1).getReg()); 5670 if (!MI) 5671 return nullptr; 5672 } 5673 } 5674 5675 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, 5676 int VLIndex, unsigned SEWIndex, 5677 RISCVVLMUL VLMul, bool ForceTailAgnostic) { 5678 MachineFunction &MF = *BB->getParent(); 5679 DebugLoc DL = MI.getDebugLoc(); 5680 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 5681 5682 unsigned SEW = MI.getOperand(SEWIndex).getImm(); 5683 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); 5684 RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8)); 5685 5686 MachineRegisterInfo &MRI = MF.getRegInfo(); 5687 5688 auto BuildVSETVLI = [&]() { 5689 if (VLIndex >= 0) { 5690 Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 5691 Register VLReg = MI.getOperand(VLIndex).getReg(); 5692 5693 // VL might be a compile time constant, but isel would have to put it 5694 // in a register. See if VL comes from an ADDI X0, imm. 5695 if (VLReg.isVirtual()) { 5696 MachineInstr *Def = MRI.getVRegDef(VLReg); 5697 if (Def && Def->getOpcode() == RISCV::ADDI && 5698 Def->getOperand(1).getReg() == RISCV::X0 && 5699 Def->getOperand(2).isImm()) { 5700 uint64_t Imm = Def->getOperand(2).getImm(); 5701 // VSETIVLI allows a 5-bit zero extended immediate. 5702 if (isUInt<5>(Imm)) 5703 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI)) 5704 .addReg(DestReg, RegState::Define | RegState::Dead) 5705 .addImm(Imm); 5706 } 5707 } 5708 5709 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) 5710 .addReg(DestReg, RegState::Define | RegState::Dead) 5711 .addReg(VLReg); 5712 } 5713 5714 // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0). 5715 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) 5716 .addReg(RISCV::X0, RegState::Define | RegState::Dead) 5717 .addReg(RISCV::X0, RegState::Kill); 5718 }; 5719 5720 MachineInstrBuilder MIB = BuildVSETVLI(); 5721 5722 // Default to tail agnostic unless the destination is tied to a source. In 5723 // that case the user would have some control over the tail values. The tail 5724 // policy is also ignored on instructions that only update element 0 like 5725 // vmv.s.x or reductions so use agnostic there to match the common case. 5726 // FIXME: This is conservatively correct, but we might want to detect that 5727 // the input is undefined. 5728 bool TailAgnostic = true; 5729 unsigned UseOpIdx; 5730 if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) { 5731 TailAgnostic = false; 5732 // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. 5733 const MachineOperand &UseMO = MI.getOperand(UseOpIdx); 5734 MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg()); 5735 if (UseMI) { 5736 UseMI = elideCopies(UseMI, MRI); 5737 if (UseMI && UseMI->isImplicitDef()) 5738 TailAgnostic = true; 5739 } 5740 } 5741 5742 // For simplicity we reuse the vtype representation here. 5743 MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth, 5744 /*TailAgnostic*/ TailAgnostic, 5745 /*MaskAgnostic*/ false)); 5746 5747 // Remove (now) redundant operands from pseudo 5748 if (VLIndex >= 0) { 5749 MI.getOperand(VLIndex).setReg(RISCV::NoRegister); 5750 MI.getOperand(VLIndex).setIsKill(false); 5751 } 5752 5753 return BB; 5754 } 5755 5756 MachineBasicBlock * 5757 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 5758 MachineBasicBlock *BB) const { 5759 uint64_t TSFlags = MI.getDesc().TSFlags; 5760 5761 if (TSFlags & RISCVII::HasSEWOpMask) { 5762 unsigned NumOperands = MI.getNumExplicitOperands(); 5763 int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1; 5764 unsigned SEWIndex = NumOperands - 1; 5765 bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask; 5766 5767 RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >> 5768 RISCVII::VLMulShift); 5769 return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic); 5770 } 5771 5772 switch (MI.getOpcode()) { 5773 default: 5774 llvm_unreachable("Unexpected instr type to insert"); 5775 case RISCV::ReadCycleWide: 5776 assert(!Subtarget.is64Bit() && 5777 "ReadCycleWrite is only to be used on riscv32"); 5778 return emitReadCycleWidePseudo(MI, BB); 5779 case RISCV::Select_GPR_Using_CC_GPR: 5780 case RISCV::Select_FPR16_Using_CC_GPR: 5781 case RISCV::Select_FPR32_Using_CC_GPR: 5782 case RISCV::Select_FPR64_Using_CC_GPR: 5783 return emitSelectPseudo(MI, BB); 5784 case RISCV::BuildPairF64Pseudo: 5785 return emitBuildPairF64Pseudo(MI, BB); 5786 case RISCV::SplitF64Pseudo: 5787 return emitSplitF64Pseudo(MI, BB); 5788 } 5789 } 5790 5791 // Calling Convention Implementation. 5792 // The expectations for frontend ABI lowering vary from target to target. 5793 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 5794 // details, but this is a longer term goal. For now, we simply try to keep the 5795 // role of the frontend as simple and well-defined as possible. The rules can 5796 // be summarised as: 5797 // * Never split up large scalar arguments. We handle them here. 5798 // * If a hardfloat calling convention is being used, and the struct may be 5799 // passed in a pair of registers (fp+fp, int+fp), and both registers are 5800 // available, then pass as two separate arguments. If either the GPRs or FPRs 5801 // are exhausted, then pass according to the rule below. 5802 // * If a struct could never be passed in registers or directly in a stack 5803 // slot (as it is larger than 2*XLEN and the floating point rules don't 5804 // apply), then pass it using a pointer with the byval attribute. 5805 // * If a struct is less than 2*XLEN, then coerce to either a two-element 5806 // word-sized array or a 2*XLEN scalar (depending on alignment). 5807 // * The frontend can determine whether a struct is returned by reference or 5808 // not based on its size and fields. If it will be returned by reference, the 5809 // frontend must modify the prototype so a pointer with the sret annotation is 5810 // passed as the first argument. This is not necessary for large scalar 5811 // returns. 5812 // * Struct return values and varargs should be coerced to structs containing 5813 // register-size fields in the same situations they would be for fixed 5814 // arguments. 5815 5816 static const MCPhysReg ArgGPRs[] = { 5817 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 5818 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 5819 }; 5820 static const MCPhysReg ArgFPR16s[] = { 5821 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 5822 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 5823 }; 5824 static const MCPhysReg ArgFPR32s[] = { 5825 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 5826 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 5827 }; 5828 static const MCPhysReg ArgFPR64s[] = { 5829 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 5830 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 5831 }; 5832 // This is an interim calling convention and it may be changed in the future. 5833 static const MCPhysReg ArgVRs[] = { 5834 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 5835 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 5836 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 5837 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 5838 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 5839 RISCV::V20M2, RISCV::V22M2}; 5840 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 5841 RISCV::V20M4}; 5842 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 5843 5844 // Pass a 2*XLEN argument that has been split into two XLEN values through 5845 // registers or the stack as necessary. 5846 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 5847 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 5848 MVT ValVT2, MVT LocVT2, 5849 ISD::ArgFlagsTy ArgFlags2) { 5850 unsigned XLenInBytes = XLen / 8; 5851 if (Register Reg = State.AllocateReg(ArgGPRs)) { 5852 // At least one half can be passed via register. 5853 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 5854 VA1.getLocVT(), CCValAssign::Full)); 5855 } else { 5856 // Both halves must be passed on the stack, with proper alignment. 5857 Align StackAlign = 5858 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 5859 State.addLoc( 5860 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 5861 State.AllocateStack(XLenInBytes, StackAlign), 5862 VA1.getLocVT(), CCValAssign::Full)); 5863 State.addLoc(CCValAssign::getMem( 5864 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 5865 LocVT2, CCValAssign::Full)); 5866 return false; 5867 } 5868 5869 if (Register Reg = State.AllocateReg(ArgGPRs)) { 5870 // The second half can also be passed via register. 5871 State.addLoc( 5872 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 5873 } else { 5874 // The second half is passed via the stack, without additional alignment. 5875 State.addLoc(CCValAssign::getMem( 5876 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 5877 LocVT2, CCValAssign::Full)); 5878 } 5879 5880 return false; 5881 } 5882 5883 // Implements the RISC-V calling convention. Returns true upon failure. 5884 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 5885 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 5886 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 5887 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, 5888 Optional<unsigned> FirstMaskArgument) { 5889 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 5890 assert(XLen == 32 || XLen == 64); 5891 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 5892 5893 // Any return value split in to more than two values can't be returned 5894 // directly. Vectors are returned via the available vector registers. 5895 if (!LocVT.isVector() && IsRet && ValNo > 1) 5896 return true; 5897 5898 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 5899 // variadic argument, or if no F16/F32 argument registers are available. 5900 bool UseGPRForF16_F32 = true; 5901 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 5902 // variadic argument, or if no F64 argument registers are available. 5903 bool UseGPRForF64 = true; 5904 5905 switch (ABI) { 5906 default: 5907 llvm_unreachable("Unexpected ABI"); 5908 case RISCVABI::ABI_ILP32: 5909 case RISCVABI::ABI_LP64: 5910 break; 5911 case RISCVABI::ABI_ILP32F: 5912 case RISCVABI::ABI_LP64F: 5913 UseGPRForF16_F32 = !IsFixed; 5914 break; 5915 case RISCVABI::ABI_ILP32D: 5916 case RISCVABI::ABI_LP64D: 5917 UseGPRForF16_F32 = !IsFixed; 5918 UseGPRForF64 = !IsFixed; 5919 break; 5920 } 5921 5922 // FPR16, FPR32, and FPR64 alias each other. 5923 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 5924 UseGPRForF16_F32 = true; 5925 UseGPRForF64 = true; 5926 } 5927 5928 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 5929 // similar local variables rather than directly checking against the target 5930 // ABI. 5931 5932 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 5933 LocVT = XLenVT; 5934 LocInfo = CCValAssign::BCvt; 5935 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 5936 LocVT = MVT::i64; 5937 LocInfo = CCValAssign::BCvt; 5938 } 5939 5940 // If this is a variadic argument, the RISC-V calling convention requires 5941 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 5942 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 5943 // be used regardless of whether the original argument was split during 5944 // legalisation or not. The argument will not be passed by registers if the 5945 // original type is larger than 2*XLEN, so the register alignment rule does 5946 // not apply. 5947 unsigned TwoXLenInBytes = (2 * XLen) / 8; 5948 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 5949 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 5950 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 5951 // Skip 'odd' register if necessary. 5952 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 5953 State.AllocateReg(ArgGPRs); 5954 } 5955 5956 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 5957 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 5958 State.getPendingArgFlags(); 5959 5960 assert(PendingLocs.size() == PendingArgFlags.size() && 5961 "PendingLocs and PendingArgFlags out of sync"); 5962 5963 // Handle passing f64 on RV32D with a soft float ABI or when floating point 5964 // registers are exhausted. 5965 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 5966 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 5967 "Can't lower f64 if it is split"); 5968 // Depending on available argument GPRS, f64 may be passed in a pair of 5969 // GPRs, split between a GPR and the stack, or passed completely on the 5970 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 5971 // cases. 5972 Register Reg = State.AllocateReg(ArgGPRs); 5973 LocVT = MVT::i32; 5974 if (!Reg) { 5975 unsigned StackOffset = State.AllocateStack(8, Align(8)); 5976 State.addLoc( 5977 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 5978 return false; 5979 } 5980 if (!State.AllocateReg(ArgGPRs)) 5981 State.AllocateStack(4, Align(4)); 5982 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5983 return false; 5984 } 5985 5986 // Fixed-length vectors are located in the corresponding scalable-vector 5987 // container types. 5988 if (ValVT.isFixedLengthVector()) 5989 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 5990 5991 // Split arguments might be passed indirectly, so keep track of the pending 5992 // values. Split vectors are passed via a mix of registers and indirectly, so 5993 // treat them as we would any other argument. 5994 if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 5995 LocVT = XLenVT; 5996 LocInfo = CCValAssign::Indirect; 5997 PendingLocs.push_back( 5998 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 5999 PendingArgFlags.push_back(ArgFlags); 6000 if (!ArgFlags.isSplitEnd()) { 6001 return false; 6002 } 6003 } 6004 6005 // If the split argument only had two elements, it should be passed directly 6006 // in registers or on the stack. 6007 if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 6008 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 6009 // Apply the normal calling convention rules to the first half of the 6010 // split argument. 6011 CCValAssign VA = PendingLocs[0]; 6012 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 6013 PendingLocs.clear(); 6014 PendingArgFlags.clear(); 6015 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 6016 ArgFlags); 6017 } 6018 6019 // Allocate to a register if possible, or else a stack slot. 6020 Register Reg; 6021 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 6022 Reg = State.AllocateReg(ArgFPR16s); 6023 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 6024 Reg = State.AllocateReg(ArgFPR32s); 6025 else if (ValVT == MVT::f64 && !UseGPRForF64) 6026 Reg = State.AllocateReg(ArgFPR64s); 6027 else if (ValVT.isVector()) { 6028 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 6029 if (RC == &RISCV::VRRegClass) { 6030 // Assign the first mask argument to V0. 6031 // This is an interim calling convention and it may be changed in the 6032 // future. 6033 if (FirstMaskArgument.hasValue() && 6034 ValNo == FirstMaskArgument.getValue()) { 6035 Reg = State.AllocateReg(RISCV::V0); 6036 } else { 6037 Reg = State.AllocateReg(ArgVRs); 6038 } 6039 } else if (RC == &RISCV::VRM2RegClass) { 6040 Reg = State.AllocateReg(ArgVRM2s); 6041 } else if (RC == &RISCV::VRM4RegClass) { 6042 Reg = State.AllocateReg(ArgVRM4s); 6043 } else if (RC == &RISCV::VRM8RegClass) { 6044 Reg = State.AllocateReg(ArgVRM8s); 6045 } else { 6046 llvm_unreachable("Unhandled class register for ValueType"); 6047 } 6048 if (!Reg) { 6049 // For return values, the vector must be passed fully via registers or 6050 // via the stack. 6051 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 6052 // but we're using all of them. 6053 if (IsRet) 6054 return true; 6055 LocInfo = CCValAssign::Indirect; 6056 // Try using a GPR to pass the address 6057 Reg = State.AllocateReg(ArgGPRs); 6058 LocVT = XLenVT; 6059 } 6060 } else 6061 Reg = State.AllocateReg(ArgGPRs); 6062 unsigned StackOffset = 6063 Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); 6064 6065 // If we reach this point and PendingLocs is non-empty, we must be at the 6066 // end of a split argument that must be passed indirectly. 6067 if (!PendingLocs.empty()) { 6068 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 6069 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 6070 6071 for (auto &It : PendingLocs) { 6072 if (Reg) 6073 It.convertToReg(Reg); 6074 else 6075 It.convertToMem(StackOffset); 6076 State.addLoc(It); 6077 } 6078 PendingLocs.clear(); 6079 PendingArgFlags.clear(); 6080 return false; 6081 } 6082 6083 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 6084 (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && 6085 "Expected an XLenVT or vector types at this stage"); 6086 6087 if (Reg) { 6088 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6089 return false; 6090 } 6091 6092 // When a floating-point value is passed on the stack, no bit-conversion is 6093 // needed. 6094 if (ValVT.isFloatingPoint()) { 6095 LocVT = ValVT; 6096 LocInfo = CCValAssign::Full; 6097 } 6098 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 6099 return false; 6100 } 6101 6102 template <typename ArgTy> 6103 static Optional<unsigned> preAssignMask(const ArgTy &Args) { 6104 for (const auto &ArgIdx : enumerate(Args)) { 6105 MVT ArgVT = ArgIdx.value().VT; 6106 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) 6107 return ArgIdx.index(); 6108 } 6109 return None; 6110 } 6111 6112 void RISCVTargetLowering::analyzeInputArgs( 6113 MachineFunction &MF, CCState &CCInfo, 6114 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 6115 unsigned NumArgs = Ins.size(); 6116 FunctionType *FType = MF.getFunction().getFunctionType(); 6117 6118 Optional<unsigned> FirstMaskArgument; 6119 if (Subtarget.hasStdExtV()) 6120 FirstMaskArgument = preAssignMask(Ins); 6121 6122 for (unsigned i = 0; i != NumArgs; ++i) { 6123 MVT ArgVT = Ins[i].VT; 6124 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 6125 6126 Type *ArgTy = nullptr; 6127 if (IsRet) 6128 ArgTy = FType->getReturnType(); 6129 else if (Ins[i].isOrigArg()) 6130 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 6131 6132 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 6133 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 6134 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, 6135 FirstMaskArgument)) { 6136 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 6137 << EVT(ArgVT).getEVTString() << '\n'); 6138 llvm_unreachable(nullptr); 6139 } 6140 } 6141 } 6142 6143 void RISCVTargetLowering::analyzeOutputArgs( 6144 MachineFunction &MF, CCState &CCInfo, 6145 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 6146 CallLoweringInfo *CLI) const { 6147 unsigned NumArgs = Outs.size(); 6148 6149 Optional<unsigned> FirstMaskArgument; 6150 if (Subtarget.hasStdExtV()) 6151 FirstMaskArgument = preAssignMask(Outs); 6152 6153 for (unsigned i = 0; i != NumArgs; i++) { 6154 MVT ArgVT = Outs[i].VT; 6155 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 6156 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 6157 6158 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 6159 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 6160 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, 6161 FirstMaskArgument)) { 6162 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 6163 << EVT(ArgVT).getEVTString() << "\n"); 6164 llvm_unreachable(nullptr); 6165 } 6166 } 6167 } 6168 6169 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 6170 // values. 6171 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 6172 const CCValAssign &VA, const SDLoc &DL, 6173 const RISCVSubtarget &Subtarget) { 6174 switch (VA.getLocInfo()) { 6175 default: 6176 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 6177 case CCValAssign::Full: 6178 if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector()) 6179 Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget); 6180 break; 6181 case CCValAssign::BCvt: 6182 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 6183 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 6184 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 6185 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 6186 else 6187 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 6188 break; 6189 } 6190 return Val; 6191 } 6192 6193 // The caller is responsible for loading the full value if the argument is 6194 // passed with CCValAssign::Indirect. 6195 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 6196 const CCValAssign &VA, const SDLoc &DL, 6197 const RISCVTargetLowering &TLI) { 6198 MachineFunction &MF = DAG.getMachineFunction(); 6199 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6200 EVT LocVT = VA.getLocVT(); 6201 SDValue Val; 6202 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); 6203 Register VReg = RegInfo.createVirtualRegister(RC); 6204 RegInfo.addLiveIn(VA.getLocReg(), VReg); 6205 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 6206 6207 if (VA.getLocInfo() == CCValAssign::Indirect) 6208 return Val; 6209 6210 return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget()); 6211 } 6212 6213 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 6214 const CCValAssign &VA, const SDLoc &DL, 6215 const RISCVSubtarget &Subtarget) { 6216 EVT LocVT = VA.getLocVT(); 6217 6218 switch (VA.getLocInfo()) { 6219 default: 6220 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 6221 case CCValAssign::Full: 6222 if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector()) 6223 Val = convertToScalableVector(LocVT, Val, DAG, Subtarget); 6224 break; 6225 case CCValAssign::BCvt: 6226 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 6227 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 6228 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 6229 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 6230 else 6231 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 6232 break; 6233 } 6234 return Val; 6235 } 6236 6237 // The caller is responsible for loading the full value if the argument is 6238 // passed with CCValAssign::Indirect. 6239 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 6240 const CCValAssign &VA, const SDLoc &DL) { 6241 MachineFunction &MF = DAG.getMachineFunction(); 6242 MachineFrameInfo &MFI = MF.getFrameInfo(); 6243 EVT LocVT = VA.getLocVT(); 6244 EVT ValVT = VA.getValVT(); 6245 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 6246 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 6247 VA.getLocMemOffset(), /*Immutable=*/true); 6248 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 6249 SDValue Val; 6250 6251 ISD::LoadExtType ExtType; 6252 switch (VA.getLocInfo()) { 6253 default: 6254 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 6255 case CCValAssign::Full: 6256 case CCValAssign::Indirect: 6257 case CCValAssign::BCvt: 6258 ExtType = ISD::NON_EXTLOAD; 6259 break; 6260 } 6261 Val = DAG.getExtLoad( 6262 ExtType, DL, LocVT, Chain, FIN, 6263 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 6264 return Val; 6265 } 6266 6267 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 6268 const CCValAssign &VA, const SDLoc &DL) { 6269 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 6270 "Unexpected VA"); 6271 MachineFunction &MF = DAG.getMachineFunction(); 6272 MachineFrameInfo &MFI = MF.getFrameInfo(); 6273 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6274 6275 if (VA.isMemLoc()) { 6276 // f64 is passed on the stack. 6277 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 6278 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 6279 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 6280 MachinePointerInfo::getFixedStack(MF, FI)); 6281 } 6282 6283 assert(VA.isRegLoc() && "Expected register VA assignment"); 6284 6285 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 6286 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 6287 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 6288 SDValue Hi; 6289 if (VA.getLocReg() == RISCV::X17) { 6290 // Second half of f64 is passed on the stack. 6291 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 6292 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 6293 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 6294 MachinePointerInfo::getFixedStack(MF, FI)); 6295 } else { 6296 // Second half of f64 is passed in another GPR. 6297 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 6298 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 6299 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 6300 } 6301 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 6302 } 6303 6304 // FastCC has less than 1% performance improvement for some particular 6305 // benchmark. But theoretically, it may has benenfit for some cases. 6306 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 6307 CCValAssign::LocInfo LocInfo, 6308 ISD::ArgFlagsTy ArgFlags, CCState &State) { 6309 6310 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 6311 // X5 and X6 might be used for save-restore libcall. 6312 static const MCPhysReg GPRList[] = { 6313 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 6314 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 6315 RISCV::X29, RISCV::X30, RISCV::X31}; 6316 if (unsigned Reg = State.AllocateReg(GPRList)) { 6317 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6318 return false; 6319 } 6320 } 6321 6322 if (LocVT == MVT::f16) { 6323 static const MCPhysReg FPR16List[] = { 6324 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 6325 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 6326 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 6327 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 6328 if (unsigned Reg = State.AllocateReg(FPR16List)) { 6329 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6330 return false; 6331 } 6332 } 6333 6334 if (LocVT == MVT::f32) { 6335 static const MCPhysReg FPR32List[] = { 6336 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 6337 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 6338 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 6339 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 6340 if (unsigned Reg = State.AllocateReg(FPR32List)) { 6341 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6342 return false; 6343 } 6344 } 6345 6346 if (LocVT == MVT::f64) { 6347 static const MCPhysReg FPR64List[] = { 6348 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 6349 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 6350 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 6351 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 6352 if (unsigned Reg = State.AllocateReg(FPR64List)) { 6353 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6354 return false; 6355 } 6356 } 6357 6358 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 6359 unsigned Offset4 = State.AllocateStack(4, Align(4)); 6360 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 6361 return false; 6362 } 6363 6364 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 6365 unsigned Offset5 = State.AllocateStack(8, Align(8)); 6366 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 6367 return false; 6368 } 6369 6370 return true; // CC didn't match. 6371 } 6372 6373 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 6374 CCValAssign::LocInfo LocInfo, 6375 ISD::ArgFlagsTy ArgFlags, CCState &State) { 6376 6377 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 6378 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 6379 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 6380 static const MCPhysReg GPRList[] = { 6381 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 6382 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 6383 if (unsigned Reg = State.AllocateReg(GPRList)) { 6384 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6385 return false; 6386 } 6387 } 6388 6389 if (LocVT == MVT::f32) { 6390 // Pass in STG registers: F1, ..., F6 6391 // fs0 ... fs5 6392 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 6393 RISCV::F18_F, RISCV::F19_F, 6394 RISCV::F20_F, RISCV::F21_F}; 6395 if (unsigned Reg = State.AllocateReg(FPR32List)) { 6396 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6397 return false; 6398 } 6399 } 6400 6401 if (LocVT == MVT::f64) { 6402 // Pass in STG registers: D1, ..., D6 6403 // fs6 ... fs11 6404 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 6405 RISCV::F24_D, RISCV::F25_D, 6406 RISCV::F26_D, RISCV::F27_D}; 6407 if (unsigned Reg = State.AllocateReg(FPR64List)) { 6408 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6409 return false; 6410 } 6411 } 6412 6413 report_fatal_error("No registers left in GHC calling convention"); 6414 return true; 6415 } 6416 6417 // Transform physical registers into virtual registers. 6418 SDValue RISCVTargetLowering::LowerFormalArguments( 6419 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 6420 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 6421 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 6422 6423 MachineFunction &MF = DAG.getMachineFunction(); 6424 6425 switch (CallConv) { 6426 default: 6427 report_fatal_error("Unsupported calling convention"); 6428 case CallingConv::C: 6429 case CallingConv::Fast: 6430 break; 6431 case CallingConv::GHC: 6432 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 6433 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 6434 report_fatal_error( 6435 "GHC calling convention requires the F and D instruction set extensions"); 6436 } 6437 6438 const Function &Func = MF.getFunction(); 6439 if (Func.hasFnAttribute("interrupt")) { 6440 if (!Func.arg_empty()) 6441 report_fatal_error( 6442 "Functions with the interrupt attribute cannot have arguments!"); 6443 6444 StringRef Kind = 6445 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 6446 6447 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 6448 report_fatal_error( 6449 "Function interrupt attribute argument not supported!"); 6450 } 6451 6452 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6453 MVT XLenVT = Subtarget.getXLenVT(); 6454 unsigned XLenInBytes = Subtarget.getXLen() / 8; 6455 // Used with vargs to acumulate store chains. 6456 std::vector<SDValue> OutChains; 6457 6458 // Assign locations to all of the incoming arguments. 6459 SmallVector<CCValAssign, 16> ArgLocs; 6460 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 6461 6462 if (CallConv == CallingConv::Fast) 6463 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 6464 else if (CallConv == CallingConv::GHC) 6465 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 6466 else 6467 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 6468 6469 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 6470 CCValAssign &VA = ArgLocs[i]; 6471 SDValue ArgValue; 6472 // Passing f64 on RV32D with a soft float ABI must be handled as a special 6473 // case. 6474 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 6475 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 6476 else if (VA.isRegLoc()) 6477 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); 6478 else 6479 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 6480 6481 if (VA.getLocInfo() == CCValAssign::Indirect) { 6482 // If the original argument was split and passed by reference (e.g. i128 6483 // on RV32), we need to load all parts of it here (using the same 6484 // address). Vectors may be partly split to registers and partly to the 6485 // stack, in which case the base address is partly offset and subsequent 6486 // stores are relative to that. 6487 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 6488 MachinePointerInfo())); 6489 unsigned ArgIndex = Ins[i].OrigArgIndex; 6490 unsigned ArgPartOffset = Ins[i].PartOffset; 6491 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 6492 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 6493 CCValAssign &PartVA = ArgLocs[i + 1]; 6494 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset; 6495 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 6496 DAG.getIntPtrConstant(PartOffset, DL)); 6497 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 6498 MachinePointerInfo())); 6499 ++i; 6500 } 6501 continue; 6502 } 6503 InVals.push_back(ArgValue); 6504 } 6505 6506 if (IsVarArg) { 6507 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 6508 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 6509 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 6510 MachineFrameInfo &MFI = MF.getFrameInfo(); 6511 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6512 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 6513 6514 // Offset of the first variable argument from stack pointer, and size of 6515 // the vararg save area. For now, the varargs save area is either zero or 6516 // large enough to hold a0-a7. 6517 int VaArgOffset, VarArgsSaveSize; 6518 6519 // If all registers are allocated, then all varargs must be passed on the 6520 // stack and we don't need to save any argregs. 6521 if (ArgRegs.size() == Idx) { 6522 VaArgOffset = CCInfo.getNextStackOffset(); 6523 VarArgsSaveSize = 0; 6524 } else { 6525 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 6526 VaArgOffset = -VarArgsSaveSize; 6527 } 6528 6529 // Record the frame index of the first variable argument 6530 // which is a value necessary to VASTART. 6531 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 6532 RVFI->setVarArgsFrameIndex(FI); 6533 6534 // If saving an odd number of registers then create an extra stack slot to 6535 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 6536 // offsets to even-numbered registered remain 2*XLEN-aligned. 6537 if (Idx % 2) { 6538 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 6539 VarArgsSaveSize += XLenInBytes; 6540 } 6541 6542 // Copy the integer registers that may have been used for passing varargs 6543 // to the vararg save area. 6544 for (unsigned I = Idx; I < ArgRegs.size(); 6545 ++I, VaArgOffset += XLenInBytes) { 6546 const Register Reg = RegInfo.createVirtualRegister(RC); 6547 RegInfo.addLiveIn(ArgRegs[I], Reg); 6548 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 6549 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 6550 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 6551 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 6552 MachinePointerInfo::getFixedStack(MF, FI)); 6553 cast<StoreSDNode>(Store.getNode()) 6554 ->getMemOperand() 6555 ->setValue((Value *)nullptr); 6556 OutChains.push_back(Store); 6557 } 6558 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 6559 } 6560 6561 // All stores are grouped in one node to allow the matching between 6562 // the size of Ins and InVals. This only happens for vararg functions. 6563 if (!OutChains.empty()) { 6564 OutChains.push_back(Chain); 6565 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 6566 } 6567 6568 return Chain; 6569 } 6570 6571 /// isEligibleForTailCallOptimization - Check whether the call is eligible 6572 /// for tail call optimization. 6573 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 6574 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 6575 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 6576 const SmallVector<CCValAssign, 16> &ArgLocs) const { 6577 6578 auto &Callee = CLI.Callee; 6579 auto CalleeCC = CLI.CallConv; 6580 auto &Outs = CLI.Outs; 6581 auto &Caller = MF.getFunction(); 6582 auto CallerCC = Caller.getCallingConv(); 6583 6584 // Exception-handling functions need a special set of instructions to 6585 // indicate a return to the hardware. Tail-calling another function would 6586 // probably break this. 6587 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 6588 // should be expanded as new function attributes are introduced. 6589 if (Caller.hasFnAttribute("interrupt")) 6590 return false; 6591 6592 // Do not tail call opt if the stack is used to pass parameters. 6593 if (CCInfo.getNextStackOffset() != 0) 6594 return false; 6595 6596 // Do not tail call opt if any parameters need to be passed indirectly. 6597 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 6598 // passed indirectly. So the address of the value will be passed in a 6599 // register, or if not available, then the address is put on the stack. In 6600 // order to pass indirectly, space on the stack often needs to be allocated 6601 // in order to store the value. In this case the CCInfo.getNextStackOffset() 6602 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 6603 // are passed CCValAssign::Indirect. 6604 for (auto &VA : ArgLocs) 6605 if (VA.getLocInfo() == CCValAssign::Indirect) 6606 return false; 6607 6608 // Do not tail call opt if either caller or callee uses struct return 6609 // semantics. 6610 auto IsCallerStructRet = Caller.hasStructRetAttr(); 6611 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 6612 if (IsCallerStructRet || IsCalleeStructRet) 6613 return false; 6614 6615 // Externally-defined functions with weak linkage should not be 6616 // tail-called. The behaviour of branch instructions in this situation (as 6617 // used for tail calls) is implementation-defined, so we cannot rely on the 6618 // linker replacing the tail call with a return. 6619 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 6620 const GlobalValue *GV = G->getGlobal(); 6621 if (GV->hasExternalWeakLinkage()) 6622 return false; 6623 } 6624 6625 // The callee has to preserve all registers the caller needs to preserve. 6626 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6627 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 6628 if (CalleeCC != CallerCC) { 6629 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 6630 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 6631 return false; 6632 } 6633 6634 // Byval parameters hand the function a pointer directly into the stack area 6635 // we want to reuse during a tail call. Working around this *is* possible 6636 // but less efficient and uglier in LowerCall. 6637 for (auto &Arg : Outs) 6638 if (Arg.Flags.isByVal()) 6639 return false; 6640 6641 return true; 6642 } 6643 6644 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 6645 // and output parameter nodes. 6646 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 6647 SmallVectorImpl<SDValue> &InVals) const { 6648 SelectionDAG &DAG = CLI.DAG; 6649 SDLoc &DL = CLI.DL; 6650 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 6651 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 6652 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 6653 SDValue Chain = CLI.Chain; 6654 SDValue Callee = CLI.Callee; 6655 bool &IsTailCall = CLI.IsTailCall; 6656 CallingConv::ID CallConv = CLI.CallConv; 6657 bool IsVarArg = CLI.IsVarArg; 6658 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6659 MVT XLenVT = Subtarget.getXLenVT(); 6660 6661 MachineFunction &MF = DAG.getMachineFunction(); 6662 6663 // Analyze the operands of the call, assigning locations to each operand. 6664 SmallVector<CCValAssign, 16> ArgLocs; 6665 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 6666 6667 if (CallConv == CallingConv::Fast) 6668 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 6669 else if (CallConv == CallingConv::GHC) 6670 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 6671 else 6672 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 6673 6674 // Check if it's really possible to do a tail call. 6675 if (IsTailCall) 6676 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 6677 6678 if (IsTailCall) 6679 ++NumTailCalls; 6680 else if (CLI.CB && CLI.CB->isMustTailCall()) 6681 report_fatal_error("failed to perform tail call elimination on a call " 6682 "site marked musttail"); 6683 6684 // Get a count of how many bytes are to be pushed on the stack. 6685 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 6686 6687 // Create local copies for byval args 6688 SmallVector<SDValue, 8> ByValArgs; 6689 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 6690 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6691 if (!Flags.isByVal()) 6692 continue; 6693 6694 SDValue Arg = OutVals[i]; 6695 unsigned Size = Flags.getByValSize(); 6696 Align Alignment = Flags.getNonZeroByValAlign(); 6697 6698 int FI = 6699 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 6700 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 6701 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 6702 6703 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 6704 /*IsVolatile=*/false, 6705 /*AlwaysInline=*/false, IsTailCall, 6706 MachinePointerInfo(), MachinePointerInfo()); 6707 ByValArgs.push_back(FIPtr); 6708 } 6709 6710 if (!IsTailCall) 6711 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 6712 6713 // Copy argument values to their designated locations. 6714 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 6715 SmallVector<SDValue, 8> MemOpChains; 6716 SDValue StackPtr; 6717 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 6718 CCValAssign &VA = ArgLocs[i]; 6719 SDValue ArgValue = OutVals[i]; 6720 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6721 6722 // Handle passing f64 on RV32D with a soft float ABI as a special case. 6723 bool IsF64OnRV32DSoftABI = 6724 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 6725 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 6726 SDValue SplitF64 = DAG.getNode( 6727 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 6728 SDValue Lo = SplitF64.getValue(0); 6729 SDValue Hi = SplitF64.getValue(1); 6730 6731 Register RegLo = VA.getLocReg(); 6732 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 6733 6734 if (RegLo == RISCV::X17) { 6735 // Second half of f64 is passed on the stack. 6736 // Work out the address of the stack slot. 6737 if (!StackPtr.getNode()) 6738 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 6739 // Emit the store. 6740 MemOpChains.push_back( 6741 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 6742 } else { 6743 // Second half of f64 is passed in another GPR. 6744 assert(RegLo < RISCV::X31 && "Invalid register pair"); 6745 Register RegHigh = RegLo + 1; 6746 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 6747 } 6748 continue; 6749 } 6750 6751 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 6752 // as any other MemLoc. 6753 6754 // Promote the value if needed. 6755 // For now, only handle fully promoted and indirect arguments. 6756 if (VA.getLocInfo() == CCValAssign::Indirect) { 6757 // Store the argument in a stack slot and pass its address. 6758 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 6759 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 6760 MemOpChains.push_back( 6761 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 6762 MachinePointerInfo::getFixedStack(MF, FI))); 6763 // If the original argument was split (e.g. i128), we need 6764 // to store the required parts of it here (and pass just one address). 6765 // Vectors may be partly split to registers and partly to the stack, in 6766 // which case the base address is partly offset and subsequent stores are 6767 // relative to that. 6768 unsigned ArgIndex = Outs[i].OrigArgIndex; 6769 unsigned ArgPartOffset = Outs[i].PartOffset; 6770 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 6771 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 6772 SDValue PartValue = OutVals[i + 1]; 6773 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset; 6774 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 6775 DAG.getIntPtrConstant(PartOffset, DL)); 6776 MemOpChains.push_back( 6777 DAG.getStore(Chain, DL, PartValue, Address, 6778 MachinePointerInfo::getFixedStack(MF, FI))); 6779 ++i; 6780 } 6781 ArgValue = SpillSlot; 6782 } else { 6783 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget); 6784 } 6785 6786 // Use local copy if it is a byval arg. 6787 if (Flags.isByVal()) 6788 ArgValue = ByValArgs[j++]; 6789 6790 if (VA.isRegLoc()) { 6791 // Queue up the argument copies and emit them at the end. 6792 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 6793 } else { 6794 assert(VA.isMemLoc() && "Argument not register or memory"); 6795 assert(!IsTailCall && "Tail call not allowed if stack is used " 6796 "for passing parameters"); 6797 6798 // Work out the address of the stack slot. 6799 if (!StackPtr.getNode()) 6800 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 6801 SDValue Address = 6802 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 6803 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 6804 6805 // Emit the store. 6806 MemOpChains.push_back( 6807 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 6808 } 6809 } 6810 6811 // Join the stores, which are independent of one another. 6812 if (!MemOpChains.empty()) 6813 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 6814 6815 SDValue Glue; 6816 6817 // Build a sequence of copy-to-reg nodes, chained and glued together. 6818 for (auto &Reg : RegsToPass) { 6819 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 6820 Glue = Chain.getValue(1); 6821 } 6822 6823 // Validate that none of the argument registers have been marked as 6824 // reserved, if so report an error. Do the same for the return address if this 6825 // is not a tailcall. 6826 validateCCReservedRegs(RegsToPass, MF); 6827 if (!IsTailCall && 6828 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 6829 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 6830 MF.getFunction(), 6831 "Return address register required, but has been reserved."}); 6832 6833 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 6834 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 6835 // split it and then direct call can be matched by PseudoCALL. 6836 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 6837 const GlobalValue *GV = S->getGlobal(); 6838 6839 unsigned OpFlags = RISCVII::MO_CALL; 6840 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 6841 OpFlags = RISCVII::MO_PLT; 6842 6843 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 6844 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 6845 unsigned OpFlags = RISCVII::MO_CALL; 6846 6847 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 6848 nullptr)) 6849 OpFlags = RISCVII::MO_PLT; 6850 6851 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 6852 } 6853 6854 // The first call operand is the chain and the second is the target address. 6855 SmallVector<SDValue, 8> Ops; 6856 Ops.push_back(Chain); 6857 Ops.push_back(Callee); 6858 6859 // Add argument registers to the end of the list so that they are 6860 // known live into the call. 6861 for (auto &Reg : RegsToPass) 6862 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 6863 6864 if (!IsTailCall) { 6865 // Add a register mask operand representing the call-preserved registers. 6866 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 6867 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 6868 assert(Mask && "Missing call preserved mask for calling convention"); 6869 Ops.push_back(DAG.getRegisterMask(Mask)); 6870 } 6871 6872 // Glue the call to the argument copies, if any. 6873 if (Glue.getNode()) 6874 Ops.push_back(Glue); 6875 6876 // Emit the call. 6877 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 6878 6879 if (IsTailCall) { 6880 MF.getFrameInfo().setHasTailCall(); 6881 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 6882 } 6883 6884 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 6885 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 6886 Glue = Chain.getValue(1); 6887 6888 // Mark the end of the call, which is glued to the call itself. 6889 Chain = DAG.getCALLSEQ_END(Chain, 6890 DAG.getConstant(NumBytes, DL, PtrVT, true), 6891 DAG.getConstant(0, DL, PtrVT, true), 6892 Glue, DL); 6893 Glue = Chain.getValue(1); 6894 6895 // Assign locations to each value returned by this call. 6896 SmallVector<CCValAssign, 16> RVLocs; 6897 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 6898 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 6899 6900 // Copy all of the result registers out of their specified physreg. 6901 for (auto &VA : RVLocs) { 6902 // Copy the value out 6903 SDValue RetValue = 6904 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 6905 // Glue the RetValue to the end of the call sequence 6906 Chain = RetValue.getValue(1); 6907 Glue = RetValue.getValue(2); 6908 6909 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 6910 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 6911 SDValue RetValue2 = 6912 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 6913 Chain = RetValue2.getValue(1); 6914 Glue = RetValue2.getValue(2); 6915 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 6916 RetValue2); 6917 } 6918 6919 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget); 6920 6921 InVals.push_back(RetValue); 6922 } 6923 6924 return Chain; 6925 } 6926 6927 bool RISCVTargetLowering::CanLowerReturn( 6928 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 6929 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 6930 SmallVector<CCValAssign, 16> RVLocs; 6931 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 6932 6933 Optional<unsigned> FirstMaskArgument; 6934 if (Subtarget.hasStdExtV()) 6935 FirstMaskArgument = preAssignMask(Outs); 6936 6937 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 6938 MVT VT = Outs[i].VT; 6939 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 6940 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 6941 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 6942 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, 6943 *this, FirstMaskArgument)) 6944 return false; 6945 } 6946 return true; 6947 } 6948 6949 SDValue 6950 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 6951 bool IsVarArg, 6952 const SmallVectorImpl<ISD::OutputArg> &Outs, 6953 const SmallVectorImpl<SDValue> &OutVals, 6954 const SDLoc &DL, SelectionDAG &DAG) const { 6955 const MachineFunction &MF = DAG.getMachineFunction(); 6956 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 6957 6958 // Stores the assignment of the return value to a location. 6959 SmallVector<CCValAssign, 16> RVLocs; 6960 6961 // Info about the registers and stack slot. 6962 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 6963 *DAG.getContext()); 6964 6965 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 6966 nullptr); 6967 6968 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 6969 report_fatal_error("GHC functions return void only"); 6970 6971 SDValue Glue; 6972 SmallVector<SDValue, 4> RetOps(1, Chain); 6973 6974 // Copy the result values into the output registers. 6975 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 6976 SDValue Val = OutVals[i]; 6977 CCValAssign &VA = RVLocs[i]; 6978 assert(VA.isRegLoc() && "Can only return in registers!"); 6979 6980 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 6981 // Handle returning f64 on RV32D with a soft float ABI. 6982 assert(VA.isRegLoc() && "Expected return via registers"); 6983 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 6984 DAG.getVTList(MVT::i32, MVT::i32), Val); 6985 SDValue Lo = SplitF64.getValue(0); 6986 SDValue Hi = SplitF64.getValue(1); 6987 Register RegLo = VA.getLocReg(); 6988 assert(RegLo < RISCV::X31 && "Invalid register pair"); 6989 Register RegHi = RegLo + 1; 6990 6991 if (STI.isRegisterReservedByUser(RegLo) || 6992 STI.isRegisterReservedByUser(RegHi)) 6993 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 6994 MF.getFunction(), 6995 "Return value register required, but has been reserved."}); 6996 6997 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 6998 Glue = Chain.getValue(1); 6999 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 7000 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 7001 Glue = Chain.getValue(1); 7002 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 7003 } else { 7004 // Handle a 'normal' return. 7005 Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget); 7006 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 7007 7008 if (STI.isRegisterReservedByUser(VA.getLocReg())) 7009 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 7010 MF.getFunction(), 7011 "Return value register required, but has been reserved."}); 7012 7013 // Guarantee that all emitted copies are stuck together. 7014 Glue = Chain.getValue(1); 7015 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7016 } 7017 } 7018 7019 RetOps[0] = Chain; // Update chain. 7020 7021 // Add the glue node if we have it. 7022 if (Glue.getNode()) { 7023 RetOps.push_back(Glue); 7024 } 7025 7026 // Interrupt service routines use different return instructions. 7027 const Function &Func = DAG.getMachineFunction().getFunction(); 7028 if (Func.hasFnAttribute("interrupt")) { 7029 if (!Func.getReturnType()->isVoidTy()) 7030 report_fatal_error( 7031 "Functions with the interrupt attribute must have void return type!"); 7032 7033 MachineFunction &MF = DAG.getMachineFunction(); 7034 StringRef Kind = 7035 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 7036 7037 unsigned RetOpc; 7038 if (Kind == "user") 7039 RetOpc = RISCVISD::URET_FLAG; 7040 else if (Kind == "supervisor") 7041 RetOpc = RISCVISD::SRET_FLAG; 7042 else 7043 RetOpc = RISCVISD::MRET_FLAG; 7044 7045 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 7046 } 7047 7048 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 7049 } 7050 7051 void RISCVTargetLowering::validateCCReservedRegs( 7052 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 7053 MachineFunction &MF) const { 7054 const Function &F = MF.getFunction(); 7055 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 7056 7057 if (llvm::any_of(Regs, [&STI](auto Reg) { 7058 return STI.isRegisterReservedByUser(Reg.first); 7059 })) 7060 F.getContext().diagnose(DiagnosticInfoUnsupported{ 7061 F, "Argument register required, but has been reserved."}); 7062 } 7063 7064 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 7065 return CI->isTailCall(); 7066 } 7067 7068 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 7069 #define NODE_NAME_CASE(NODE) \ 7070 case RISCVISD::NODE: \ 7071 return "RISCVISD::" #NODE; 7072 // clang-format off 7073 switch ((RISCVISD::NodeType)Opcode) { 7074 case RISCVISD::FIRST_NUMBER: 7075 break; 7076 NODE_NAME_CASE(RET_FLAG) 7077 NODE_NAME_CASE(URET_FLAG) 7078 NODE_NAME_CASE(SRET_FLAG) 7079 NODE_NAME_CASE(MRET_FLAG) 7080 NODE_NAME_CASE(CALL) 7081 NODE_NAME_CASE(SELECT_CC) 7082 NODE_NAME_CASE(BR_CC) 7083 NODE_NAME_CASE(BuildPairF64) 7084 NODE_NAME_CASE(SplitF64) 7085 NODE_NAME_CASE(TAIL) 7086 NODE_NAME_CASE(MULHSU) 7087 NODE_NAME_CASE(SLLW) 7088 NODE_NAME_CASE(SRAW) 7089 NODE_NAME_CASE(SRLW) 7090 NODE_NAME_CASE(DIVW) 7091 NODE_NAME_CASE(DIVUW) 7092 NODE_NAME_CASE(REMUW) 7093 NODE_NAME_CASE(ROLW) 7094 NODE_NAME_CASE(RORW) 7095 NODE_NAME_CASE(CLZW) 7096 NODE_NAME_CASE(CTZW) 7097 NODE_NAME_CASE(FSLW) 7098 NODE_NAME_CASE(FSRW) 7099 NODE_NAME_CASE(FSL) 7100 NODE_NAME_CASE(FSR) 7101 NODE_NAME_CASE(FMV_H_X) 7102 NODE_NAME_CASE(FMV_X_ANYEXTH) 7103 NODE_NAME_CASE(FMV_W_X_RV64) 7104 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 7105 NODE_NAME_CASE(READ_CYCLE_WIDE) 7106 NODE_NAME_CASE(GREVI) 7107 NODE_NAME_CASE(GREVIW) 7108 NODE_NAME_CASE(GORCI) 7109 NODE_NAME_CASE(GORCIW) 7110 NODE_NAME_CASE(SHFLI) 7111 NODE_NAME_CASE(VMV_V_X_VL) 7112 NODE_NAME_CASE(VFMV_V_F_VL) 7113 NODE_NAME_CASE(VMV_X_S) 7114 NODE_NAME_CASE(VMV_S_X_VL) 7115 NODE_NAME_CASE(VFMV_S_F_VL) 7116 NODE_NAME_CASE(SPLAT_VECTOR_I64) 7117 NODE_NAME_CASE(READ_VLENB) 7118 NODE_NAME_CASE(TRUNCATE_VECTOR_VL) 7119 NODE_NAME_CASE(VLEFF) 7120 NODE_NAME_CASE(VLEFF_MASK) 7121 NODE_NAME_CASE(VSLIDEUP_VL) 7122 NODE_NAME_CASE(VSLIDE1UP_VL) 7123 NODE_NAME_CASE(VSLIDEDOWN_VL) 7124 NODE_NAME_CASE(VSLIDE1DOWN_VL) 7125 NODE_NAME_CASE(VID_VL) 7126 NODE_NAME_CASE(VFNCVT_ROD_VL) 7127 NODE_NAME_CASE(VECREDUCE_ADD_VL) 7128 NODE_NAME_CASE(VECREDUCE_UMAX_VL) 7129 NODE_NAME_CASE(VECREDUCE_SMAX_VL) 7130 NODE_NAME_CASE(VECREDUCE_UMIN_VL) 7131 NODE_NAME_CASE(VECREDUCE_SMIN_VL) 7132 NODE_NAME_CASE(VECREDUCE_AND_VL) 7133 NODE_NAME_CASE(VECREDUCE_OR_VL) 7134 NODE_NAME_CASE(VECREDUCE_XOR_VL) 7135 NODE_NAME_CASE(VECREDUCE_FADD_VL) 7136 NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL) 7137 NODE_NAME_CASE(ADD_VL) 7138 NODE_NAME_CASE(AND_VL) 7139 NODE_NAME_CASE(MUL_VL) 7140 NODE_NAME_CASE(OR_VL) 7141 NODE_NAME_CASE(SDIV_VL) 7142 NODE_NAME_CASE(SHL_VL) 7143 NODE_NAME_CASE(SREM_VL) 7144 NODE_NAME_CASE(SRA_VL) 7145 NODE_NAME_CASE(SRL_VL) 7146 NODE_NAME_CASE(SUB_VL) 7147 NODE_NAME_CASE(UDIV_VL) 7148 NODE_NAME_CASE(UREM_VL) 7149 NODE_NAME_CASE(XOR_VL) 7150 NODE_NAME_CASE(FADD_VL) 7151 NODE_NAME_CASE(FSUB_VL) 7152 NODE_NAME_CASE(FMUL_VL) 7153 NODE_NAME_CASE(FDIV_VL) 7154 NODE_NAME_CASE(FNEG_VL) 7155 NODE_NAME_CASE(FABS_VL) 7156 NODE_NAME_CASE(FSQRT_VL) 7157 NODE_NAME_CASE(FMA_VL) 7158 NODE_NAME_CASE(FCOPYSIGN_VL) 7159 NODE_NAME_CASE(SMIN_VL) 7160 NODE_NAME_CASE(SMAX_VL) 7161 NODE_NAME_CASE(UMIN_VL) 7162 NODE_NAME_CASE(UMAX_VL) 7163 NODE_NAME_CASE(MULHS_VL) 7164 NODE_NAME_CASE(MULHU_VL) 7165 NODE_NAME_CASE(FP_TO_SINT_VL) 7166 NODE_NAME_CASE(FP_TO_UINT_VL) 7167 NODE_NAME_CASE(SINT_TO_FP_VL) 7168 NODE_NAME_CASE(UINT_TO_FP_VL) 7169 NODE_NAME_CASE(FP_EXTEND_VL) 7170 NODE_NAME_CASE(FP_ROUND_VL) 7171 NODE_NAME_CASE(SETCC_VL) 7172 NODE_NAME_CASE(VSELECT_VL) 7173 NODE_NAME_CASE(VMAND_VL) 7174 NODE_NAME_CASE(VMOR_VL) 7175 NODE_NAME_CASE(VMXOR_VL) 7176 NODE_NAME_CASE(VMCLR_VL) 7177 NODE_NAME_CASE(VMSET_VL) 7178 NODE_NAME_CASE(VRGATHER_VX_VL) 7179 NODE_NAME_CASE(VRGATHER_VV_VL) 7180 NODE_NAME_CASE(VRGATHEREI16_VV_VL) 7181 NODE_NAME_CASE(VSEXT_VL) 7182 NODE_NAME_CASE(VZEXT_VL) 7183 NODE_NAME_CASE(VPOPC_VL) 7184 NODE_NAME_CASE(VLE_VL) 7185 NODE_NAME_CASE(VSE_VL) 7186 NODE_NAME_CASE(READ_CSR) 7187 NODE_NAME_CASE(WRITE_CSR) 7188 NODE_NAME_CASE(SWAP_CSR) 7189 } 7190 // clang-format on 7191 return nullptr; 7192 #undef NODE_NAME_CASE 7193 } 7194 7195 /// getConstraintType - Given a constraint letter, return the type of 7196 /// constraint it is for this target. 7197 RISCVTargetLowering::ConstraintType 7198 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 7199 if (Constraint.size() == 1) { 7200 switch (Constraint[0]) { 7201 default: 7202 break; 7203 case 'f': 7204 case 'v': 7205 return C_RegisterClass; 7206 case 'I': 7207 case 'J': 7208 case 'K': 7209 return C_Immediate; 7210 case 'A': 7211 return C_Memory; 7212 } 7213 } 7214 return TargetLowering::getConstraintType(Constraint); 7215 } 7216 7217 std::pair<unsigned, const TargetRegisterClass *> 7218 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 7219 StringRef Constraint, 7220 MVT VT) const { 7221 // First, see if this is a constraint that directly corresponds to a 7222 // RISCV register class. 7223 if (Constraint.size() == 1) { 7224 switch (Constraint[0]) { 7225 case 'r': 7226 return std::make_pair(0U, &RISCV::GPRRegClass); 7227 case 'f': 7228 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 7229 return std::make_pair(0U, &RISCV::FPR16RegClass); 7230 if (Subtarget.hasStdExtF() && VT == MVT::f32) 7231 return std::make_pair(0U, &RISCV::FPR32RegClass); 7232 if (Subtarget.hasStdExtD() && VT == MVT::f64) 7233 return std::make_pair(0U, &RISCV::FPR64RegClass); 7234 break; 7235 case 'v': 7236 for (const auto *RC : 7237 {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass, 7238 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 7239 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) 7240 return std::make_pair(0U, RC); 7241 } 7242 break; 7243 default: 7244 break; 7245 } 7246 } 7247 7248 // Clang will correctly decode the usage of register name aliases into their 7249 // official names. However, other frontends like `rustc` do not. This allows 7250 // users of these frontends to use the ABI names for registers in LLVM-style 7251 // register constraints. 7252 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 7253 .Case("{zero}", RISCV::X0) 7254 .Case("{ra}", RISCV::X1) 7255 .Case("{sp}", RISCV::X2) 7256 .Case("{gp}", RISCV::X3) 7257 .Case("{tp}", RISCV::X4) 7258 .Case("{t0}", RISCV::X5) 7259 .Case("{t1}", RISCV::X6) 7260 .Case("{t2}", RISCV::X7) 7261 .Cases("{s0}", "{fp}", RISCV::X8) 7262 .Case("{s1}", RISCV::X9) 7263 .Case("{a0}", RISCV::X10) 7264 .Case("{a1}", RISCV::X11) 7265 .Case("{a2}", RISCV::X12) 7266 .Case("{a3}", RISCV::X13) 7267 .Case("{a4}", RISCV::X14) 7268 .Case("{a5}", RISCV::X15) 7269 .Case("{a6}", RISCV::X16) 7270 .Case("{a7}", RISCV::X17) 7271 .Case("{s2}", RISCV::X18) 7272 .Case("{s3}", RISCV::X19) 7273 .Case("{s4}", RISCV::X20) 7274 .Case("{s5}", RISCV::X21) 7275 .Case("{s6}", RISCV::X22) 7276 .Case("{s7}", RISCV::X23) 7277 .Case("{s8}", RISCV::X24) 7278 .Case("{s9}", RISCV::X25) 7279 .Case("{s10}", RISCV::X26) 7280 .Case("{s11}", RISCV::X27) 7281 .Case("{t3}", RISCV::X28) 7282 .Case("{t4}", RISCV::X29) 7283 .Case("{t5}", RISCV::X30) 7284 .Case("{t6}", RISCV::X31) 7285 .Default(RISCV::NoRegister); 7286 if (XRegFromAlias != RISCV::NoRegister) 7287 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 7288 7289 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 7290 // TableGen record rather than the AsmName to choose registers for InlineAsm 7291 // constraints, plus we want to match those names to the widest floating point 7292 // register type available, manually select floating point registers here. 7293 // 7294 // The second case is the ABI name of the register, so that frontends can also 7295 // use the ABI names in register constraint lists. 7296 if (Subtarget.hasStdExtF()) { 7297 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 7298 .Cases("{f0}", "{ft0}", RISCV::F0_F) 7299 .Cases("{f1}", "{ft1}", RISCV::F1_F) 7300 .Cases("{f2}", "{ft2}", RISCV::F2_F) 7301 .Cases("{f3}", "{ft3}", RISCV::F3_F) 7302 .Cases("{f4}", "{ft4}", RISCV::F4_F) 7303 .Cases("{f5}", "{ft5}", RISCV::F5_F) 7304 .Cases("{f6}", "{ft6}", RISCV::F6_F) 7305 .Cases("{f7}", "{ft7}", RISCV::F7_F) 7306 .Cases("{f8}", "{fs0}", RISCV::F8_F) 7307 .Cases("{f9}", "{fs1}", RISCV::F9_F) 7308 .Cases("{f10}", "{fa0}", RISCV::F10_F) 7309 .Cases("{f11}", "{fa1}", RISCV::F11_F) 7310 .Cases("{f12}", "{fa2}", RISCV::F12_F) 7311 .Cases("{f13}", "{fa3}", RISCV::F13_F) 7312 .Cases("{f14}", "{fa4}", RISCV::F14_F) 7313 .Cases("{f15}", "{fa5}", RISCV::F15_F) 7314 .Cases("{f16}", "{fa6}", RISCV::F16_F) 7315 .Cases("{f17}", "{fa7}", RISCV::F17_F) 7316 .Cases("{f18}", "{fs2}", RISCV::F18_F) 7317 .Cases("{f19}", "{fs3}", RISCV::F19_F) 7318 .Cases("{f20}", "{fs4}", RISCV::F20_F) 7319 .Cases("{f21}", "{fs5}", RISCV::F21_F) 7320 .Cases("{f22}", "{fs6}", RISCV::F22_F) 7321 .Cases("{f23}", "{fs7}", RISCV::F23_F) 7322 .Cases("{f24}", "{fs8}", RISCV::F24_F) 7323 .Cases("{f25}", "{fs9}", RISCV::F25_F) 7324 .Cases("{f26}", "{fs10}", RISCV::F26_F) 7325 .Cases("{f27}", "{fs11}", RISCV::F27_F) 7326 .Cases("{f28}", "{ft8}", RISCV::F28_F) 7327 .Cases("{f29}", "{ft9}", RISCV::F29_F) 7328 .Cases("{f30}", "{ft10}", RISCV::F30_F) 7329 .Cases("{f31}", "{ft11}", RISCV::F31_F) 7330 .Default(RISCV::NoRegister); 7331 if (FReg != RISCV::NoRegister) { 7332 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 7333 if (Subtarget.hasStdExtD()) { 7334 unsigned RegNo = FReg - RISCV::F0_F; 7335 unsigned DReg = RISCV::F0_D + RegNo; 7336 return std::make_pair(DReg, &RISCV::FPR64RegClass); 7337 } 7338 return std::make_pair(FReg, &RISCV::FPR32RegClass); 7339 } 7340 } 7341 7342 if (Subtarget.hasStdExtV()) { 7343 Register VReg = StringSwitch<Register>(Constraint.lower()) 7344 .Case("{v0}", RISCV::V0) 7345 .Case("{v1}", RISCV::V1) 7346 .Case("{v2}", RISCV::V2) 7347 .Case("{v3}", RISCV::V3) 7348 .Case("{v4}", RISCV::V4) 7349 .Case("{v5}", RISCV::V5) 7350 .Case("{v6}", RISCV::V6) 7351 .Case("{v7}", RISCV::V7) 7352 .Case("{v8}", RISCV::V8) 7353 .Case("{v9}", RISCV::V9) 7354 .Case("{v10}", RISCV::V10) 7355 .Case("{v11}", RISCV::V11) 7356 .Case("{v12}", RISCV::V12) 7357 .Case("{v13}", RISCV::V13) 7358 .Case("{v14}", RISCV::V14) 7359 .Case("{v15}", RISCV::V15) 7360 .Case("{v16}", RISCV::V16) 7361 .Case("{v17}", RISCV::V17) 7362 .Case("{v18}", RISCV::V18) 7363 .Case("{v19}", RISCV::V19) 7364 .Case("{v20}", RISCV::V20) 7365 .Case("{v21}", RISCV::V21) 7366 .Case("{v22}", RISCV::V22) 7367 .Case("{v23}", RISCV::V23) 7368 .Case("{v24}", RISCV::V24) 7369 .Case("{v25}", RISCV::V25) 7370 .Case("{v26}", RISCV::V26) 7371 .Case("{v27}", RISCV::V27) 7372 .Case("{v28}", RISCV::V28) 7373 .Case("{v29}", RISCV::V29) 7374 .Case("{v30}", RISCV::V30) 7375 .Case("{v31}", RISCV::V31) 7376 .Default(RISCV::NoRegister); 7377 if (VReg != RISCV::NoRegister) { 7378 if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) 7379 return std::make_pair(VReg, &RISCV::VMRegClass); 7380 if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy)) 7381 return std::make_pair(VReg, &RISCV::VRRegClass); 7382 for (const auto *RC : 7383 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 7384 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) { 7385 VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC); 7386 return std::make_pair(VReg, RC); 7387 } 7388 } 7389 } 7390 } 7391 7392 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 7393 } 7394 7395 unsigned 7396 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 7397 // Currently only support length 1 constraints. 7398 if (ConstraintCode.size() == 1) { 7399 switch (ConstraintCode[0]) { 7400 case 'A': 7401 return InlineAsm::Constraint_A; 7402 default: 7403 break; 7404 } 7405 } 7406 7407 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 7408 } 7409 7410 void RISCVTargetLowering::LowerAsmOperandForConstraint( 7411 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 7412 SelectionDAG &DAG) const { 7413 // Currently only support length 1 constraints. 7414 if (Constraint.length() == 1) { 7415 switch (Constraint[0]) { 7416 case 'I': 7417 // Validate & create a 12-bit signed immediate operand. 7418 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 7419 uint64_t CVal = C->getSExtValue(); 7420 if (isInt<12>(CVal)) 7421 Ops.push_back( 7422 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 7423 } 7424 return; 7425 case 'J': 7426 // Validate & create an integer zero operand. 7427 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 7428 if (C->getZExtValue() == 0) 7429 Ops.push_back( 7430 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 7431 return; 7432 case 'K': 7433 // Validate & create a 5-bit unsigned immediate operand. 7434 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 7435 uint64_t CVal = C->getZExtValue(); 7436 if (isUInt<5>(CVal)) 7437 Ops.push_back( 7438 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 7439 } 7440 return; 7441 default: 7442 break; 7443 } 7444 } 7445 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 7446 } 7447 7448 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 7449 Instruction *Inst, 7450 AtomicOrdering Ord) const { 7451 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 7452 return Builder.CreateFence(Ord); 7453 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 7454 return Builder.CreateFence(AtomicOrdering::Release); 7455 return nullptr; 7456 } 7457 7458 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 7459 Instruction *Inst, 7460 AtomicOrdering Ord) const { 7461 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 7462 return Builder.CreateFence(AtomicOrdering::Acquire); 7463 return nullptr; 7464 } 7465 7466 TargetLowering::AtomicExpansionKind 7467 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 7468 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 7469 // point operations can't be used in an lr/sc sequence without breaking the 7470 // forward-progress guarantee. 7471 if (AI->isFloatingPointOperation()) 7472 return AtomicExpansionKind::CmpXChg; 7473 7474 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 7475 if (Size == 8 || Size == 16) 7476 return AtomicExpansionKind::MaskedIntrinsic; 7477 return AtomicExpansionKind::None; 7478 } 7479 7480 static Intrinsic::ID 7481 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 7482 if (XLen == 32) { 7483 switch (BinOp) { 7484 default: 7485 llvm_unreachable("Unexpected AtomicRMW BinOp"); 7486 case AtomicRMWInst::Xchg: 7487 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 7488 case AtomicRMWInst::Add: 7489 return Intrinsic::riscv_masked_atomicrmw_add_i32; 7490 case AtomicRMWInst::Sub: 7491 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 7492 case AtomicRMWInst::Nand: 7493 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 7494 case AtomicRMWInst::Max: 7495 return Intrinsic::riscv_masked_atomicrmw_max_i32; 7496 case AtomicRMWInst::Min: 7497 return Intrinsic::riscv_masked_atomicrmw_min_i32; 7498 case AtomicRMWInst::UMax: 7499 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 7500 case AtomicRMWInst::UMin: 7501 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 7502 } 7503 } 7504 7505 if (XLen == 64) { 7506 switch (BinOp) { 7507 default: 7508 llvm_unreachable("Unexpected AtomicRMW BinOp"); 7509 case AtomicRMWInst::Xchg: 7510 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 7511 case AtomicRMWInst::Add: 7512 return Intrinsic::riscv_masked_atomicrmw_add_i64; 7513 case AtomicRMWInst::Sub: 7514 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 7515 case AtomicRMWInst::Nand: 7516 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 7517 case AtomicRMWInst::Max: 7518 return Intrinsic::riscv_masked_atomicrmw_max_i64; 7519 case AtomicRMWInst::Min: 7520 return Intrinsic::riscv_masked_atomicrmw_min_i64; 7521 case AtomicRMWInst::UMax: 7522 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 7523 case AtomicRMWInst::UMin: 7524 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 7525 } 7526 } 7527 7528 llvm_unreachable("Unexpected XLen\n"); 7529 } 7530 7531 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 7532 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 7533 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 7534 unsigned XLen = Subtarget.getXLen(); 7535 Value *Ordering = 7536 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 7537 Type *Tys[] = {AlignedAddr->getType()}; 7538 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 7539 AI->getModule(), 7540 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 7541 7542 if (XLen == 64) { 7543 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 7544 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 7545 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 7546 } 7547 7548 Value *Result; 7549 7550 // Must pass the shift amount needed to sign extend the loaded value prior 7551 // to performing a signed comparison for min/max. ShiftAmt is the number of 7552 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 7553 // is the number of bits to left+right shift the value in order to 7554 // sign-extend. 7555 if (AI->getOperation() == AtomicRMWInst::Min || 7556 AI->getOperation() == AtomicRMWInst::Max) { 7557 const DataLayout &DL = AI->getModule()->getDataLayout(); 7558 unsigned ValWidth = 7559 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 7560 Value *SextShamt = 7561 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 7562 Result = Builder.CreateCall(LrwOpScwLoop, 7563 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 7564 } else { 7565 Result = 7566 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 7567 } 7568 7569 if (XLen == 64) 7570 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 7571 return Result; 7572 } 7573 7574 TargetLowering::AtomicExpansionKind 7575 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 7576 AtomicCmpXchgInst *CI) const { 7577 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 7578 if (Size == 8 || Size == 16) 7579 return AtomicExpansionKind::MaskedIntrinsic; 7580 return AtomicExpansionKind::None; 7581 } 7582 7583 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 7584 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 7585 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 7586 unsigned XLen = Subtarget.getXLen(); 7587 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 7588 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 7589 if (XLen == 64) { 7590 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 7591 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 7592 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 7593 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 7594 } 7595 Type *Tys[] = {AlignedAddr->getType()}; 7596 Function *MaskedCmpXchg = 7597 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 7598 Value *Result = Builder.CreateCall( 7599 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 7600 if (XLen == 64) 7601 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 7602 return Result; 7603 } 7604 7605 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const { 7606 return false; 7607 } 7608 7609 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 7610 EVT VT) const { 7611 VT = VT.getScalarType(); 7612 7613 if (!VT.isSimple()) 7614 return false; 7615 7616 switch (VT.getSimpleVT().SimpleTy) { 7617 case MVT::f16: 7618 return Subtarget.hasStdExtZfh(); 7619 case MVT::f32: 7620 return Subtarget.hasStdExtF(); 7621 case MVT::f64: 7622 return Subtarget.hasStdExtD(); 7623 default: 7624 break; 7625 } 7626 7627 return false; 7628 } 7629 7630 Register RISCVTargetLowering::getExceptionPointerRegister( 7631 const Constant *PersonalityFn) const { 7632 return RISCV::X10; 7633 } 7634 7635 Register RISCVTargetLowering::getExceptionSelectorRegister( 7636 const Constant *PersonalityFn) const { 7637 return RISCV::X11; 7638 } 7639 7640 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 7641 // Return false to suppress the unnecessary extensions if the LibCall 7642 // arguments or return value is f32 type for LP64 ABI. 7643 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 7644 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 7645 return false; 7646 7647 return true; 7648 } 7649 7650 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 7651 if (Subtarget.is64Bit() && Type == MVT::i32) 7652 return true; 7653 7654 return IsSigned; 7655 } 7656 7657 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 7658 SDValue C) const { 7659 // Check integral scalar types. 7660 if (VT.isScalarInteger()) { 7661 // Omit the optimization if the sub target has the M extension and the data 7662 // size exceeds XLen. 7663 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) 7664 return false; 7665 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 7666 // Break the MUL to a SLLI and an ADD/SUB. 7667 const APInt &Imm = ConstNode->getAPIntValue(); 7668 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 7669 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) 7670 return true; 7671 // Omit the following optimization if the sub target has the M extension 7672 // and the data size >= XLen. 7673 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) 7674 return false; 7675 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs 7676 // a pair of LUI/ADDI. 7677 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { 7678 APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); 7679 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || 7680 (1 - ImmS).isPowerOf2()) 7681 return true; 7682 } 7683 } 7684 } 7685 7686 return false; 7687 } 7688 7689 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const { 7690 if (!Subtarget.useRVVForFixedLengthVectors()) 7691 return false; 7692 7693 if (!VT.isFixedLengthVector()) 7694 return false; 7695 7696 // Don't use RVV for vectors we cannot scalarize if required. 7697 switch (VT.getVectorElementType().SimpleTy) { 7698 // i1 is supported but has different rules. 7699 default: 7700 return false; 7701 case MVT::i1: 7702 // Masks can only use a single register. 7703 if (VT.getVectorNumElements() > Subtarget.getMinRVVVectorSizeInBits()) 7704 return false; 7705 break; 7706 case MVT::i8: 7707 case MVT::i16: 7708 case MVT::i32: 7709 case MVT::i64: 7710 break; 7711 case MVT::f16: 7712 if (!Subtarget.hasStdExtZfh()) 7713 return false; 7714 break; 7715 case MVT::f32: 7716 if (!Subtarget.hasStdExtF()) 7717 return false; 7718 break; 7719 case MVT::f64: 7720 if (!Subtarget.hasStdExtD()) 7721 return false; 7722 break; 7723 } 7724 7725 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 7726 // Don't use RVV for types that don't fit. 7727 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors()) 7728 return false; 7729 7730 // TODO: Perhaps an artificial restriction, but worth having whilst getting 7731 // the base fixed length RVV support in place. 7732 if (!VT.isPow2VectorType()) 7733 return false; 7734 7735 return true; 7736 } 7737 7738 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( 7739 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 7740 bool *Fast) const { 7741 if (!VT.isScalableVector()) 7742 return false; 7743 7744 EVT ElemVT = VT.getVectorElementType(); 7745 if (Alignment >= ElemVT.getStoreSize()) { 7746 if (Fast) 7747 *Fast = true; 7748 return true; 7749 } 7750 7751 return false; 7752 } 7753 7754 bool RISCVTargetLowering::splitValueIntoRegisterParts( 7755 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 7756 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 7757 bool IsABIRegCopy = CC.hasValue(); 7758 EVT ValueVT = Val.getValueType(); 7759 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { 7760 // Cast the f16 to i16, extend to i32, pad with ones to make a float nan, 7761 // and cast to f32. 7762 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val); 7763 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val); 7764 Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val, 7765 DAG.getConstant(0xFFFF0000, DL, MVT::i32)); 7766 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val); 7767 Parts[0] = Val; 7768 return true; 7769 } 7770 7771 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 7772 LLVMContext &Context = *DAG.getContext(); 7773 EVT ValueEltVT = ValueVT.getVectorElementType(); 7774 EVT PartEltVT = PartVT.getVectorElementType(); 7775 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 7776 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 7777 if (PartVTBitSize % ValueVTBitSize == 0) { 7778 // If the element types are different, bitcast to the same element type of 7779 // PartVT first. 7780 if (ValueEltVT != PartEltVT) { 7781 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); 7782 assert(Count != 0 && "The number of element should not be zero."); 7783 EVT SameEltTypeVT = 7784 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); 7785 Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val); 7786 } 7787 Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT), 7788 Val, DAG.getConstant(0, DL, Subtarget.getXLenVT())); 7789 Parts[0] = Val; 7790 return true; 7791 } 7792 } 7793 return false; 7794 } 7795 7796 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( 7797 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 7798 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 7799 bool IsABIRegCopy = CC.hasValue(); 7800 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { 7801 SDValue Val = Parts[0]; 7802 7803 // Cast the f32 to i32, truncate to i16, and cast back to f16. 7804 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val); 7805 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val); 7806 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val); 7807 return Val; 7808 } 7809 7810 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 7811 LLVMContext &Context = *DAG.getContext(); 7812 SDValue Val = Parts[0]; 7813 EVT ValueEltVT = ValueVT.getVectorElementType(); 7814 EVT PartEltVT = PartVT.getVectorElementType(); 7815 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 7816 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 7817 if (PartVTBitSize % ValueVTBitSize == 0) { 7818 EVT SameEltTypeVT = ValueVT; 7819 // If the element types are different, convert it to the same element type 7820 // of PartVT. 7821 if (ValueEltVT != PartEltVT) { 7822 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); 7823 assert(Count != 0 && "The number of element should not be zero."); 7824 SameEltTypeVT = 7825 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); 7826 } 7827 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val, 7828 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 7829 if (ValueEltVT != PartEltVT) 7830 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 7831 return Val; 7832 } 7833 } 7834 return SDValue(); 7835 } 7836 7837 #define GET_REGISTER_MATCHER 7838 #include "RISCVGenAsmMatcher.inc" 7839 7840 Register 7841 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 7842 const MachineFunction &MF) const { 7843 Register Reg = MatchRegisterAltName(RegName); 7844 if (Reg == RISCV::NoRegister) 7845 Reg = MatchRegisterName(RegName); 7846 if (Reg == RISCV::NoRegister) 7847 report_fatal_error( 7848 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 7849 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 7850 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 7851 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 7852 StringRef(RegName) + "\".")); 7853 return Reg; 7854 } 7855 7856 namespace llvm { 7857 namespace RISCVVIntrinsicsTable { 7858 7859 #define GET_RISCVVIntrinsicsTable_IMPL 7860 #include "RISCVGenSearchableTables.inc" 7861 7862 } // namespace RISCVVIntrinsicsTable 7863 7864 } // namespace llvm 7865