1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28 #include "llvm/CodeGen/ValueTypes.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/DiagnosticPrinter.h" 31 #include "llvm/IR/IntrinsicsRISCV.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "riscv-lower" 42 43 STATISTIC(NumTailCalls, "Number of tail calls"); 44 45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 46 const RISCVSubtarget &STI) 47 : TargetLowering(TM), Subtarget(STI) { 48 49 if (Subtarget.isRV32E()) 50 report_fatal_error("Codegen not yet implemented for RV32E"); 51 52 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 53 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 54 55 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 56 !Subtarget.hasStdExtF()) { 57 errs() << "Hard-float 'f' ABI can't be used for a target that " 58 "doesn't support the F instruction set extension (ignoring " 59 "target-abi)\n"; 60 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 61 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 62 !Subtarget.hasStdExtD()) { 63 errs() << "Hard-float 'd' ABI can't be used for a target that " 64 "doesn't support the D instruction set extension (ignoring " 65 "target-abi)\n"; 66 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 67 } 68 69 switch (ABI) { 70 default: 71 report_fatal_error("Don't know how to lower this ABI"); 72 case RISCVABI::ABI_ILP32: 73 case RISCVABI::ABI_ILP32F: 74 case RISCVABI::ABI_ILP32D: 75 case RISCVABI::ABI_LP64: 76 case RISCVABI::ABI_LP64F: 77 case RISCVABI::ABI_LP64D: 78 break; 79 } 80 81 MVT XLenVT = Subtarget.getXLenVT(); 82 83 // Set up the register classes. 84 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 85 86 if (Subtarget.hasStdExtZfh()) 87 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 88 if (Subtarget.hasStdExtF()) 89 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 90 if (Subtarget.hasStdExtD()) 91 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 92 93 static const MVT::SimpleValueType BoolVecVTs[] = { 94 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, 95 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; 96 static const MVT::SimpleValueType IntVecVTs[] = { 97 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, 98 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, 99 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, 100 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, 101 MVT::nxv4i64, MVT::nxv8i64}; 102 static const MVT::SimpleValueType F16VecVTs[] = { 103 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, 104 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; 105 static const MVT::SimpleValueType F32VecVTs[] = { 106 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; 107 static const MVT::SimpleValueType F64VecVTs[] = { 108 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; 109 110 if (Subtarget.hasStdExtV()) { 111 auto addRegClassForRVV = [this](MVT VT) { 112 unsigned Size = VT.getSizeInBits().getKnownMinValue(); 113 assert(Size <= 512 && isPowerOf2_32(Size)); 114 const TargetRegisterClass *RC; 115 if (Size <= 64) 116 RC = &RISCV::VRRegClass; 117 else if (Size == 128) 118 RC = &RISCV::VRM2RegClass; 119 else if (Size == 256) 120 RC = &RISCV::VRM4RegClass; 121 else 122 RC = &RISCV::VRM8RegClass; 123 124 addRegisterClass(VT, RC); 125 }; 126 127 for (MVT VT : BoolVecVTs) 128 addRegClassForRVV(VT); 129 for (MVT VT : IntVecVTs) 130 addRegClassForRVV(VT); 131 132 if (Subtarget.hasStdExtZfh()) 133 for (MVT VT : F16VecVTs) 134 addRegClassForRVV(VT); 135 136 if (Subtarget.hasStdExtF()) 137 for (MVT VT : F32VecVTs) 138 addRegClassForRVV(VT); 139 140 if (Subtarget.hasStdExtD()) 141 for (MVT VT : F64VecVTs) 142 addRegClassForRVV(VT); 143 144 if (Subtarget.useRVVForFixedLengthVectors()) { 145 auto addRegClassForFixedVectors = [this](MVT VT) { 146 MVT ContainerVT = getContainerForFixedLengthVector(VT); 147 unsigned RCID = getRegClassIDForVecVT(ContainerVT); 148 const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 149 addRegisterClass(VT, TRI.getRegClass(RCID)); 150 }; 151 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) 152 if (useRVVForFixedLengthVectorVT(VT)) 153 addRegClassForFixedVectors(VT); 154 155 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) 156 if (useRVVForFixedLengthVectorVT(VT)) 157 addRegClassForFixedVectors(VT); 158 } 159 } 160 161 // Compute derived properties from the register classes. 162 computeRegisterProperties(STI.getRegisterInfo()); 163 164 setStackPointerRegisterToSaveRestore(RISCV::X2); 165 166 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 167 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 168 169 // TODO: add all necessary setOperationAction calls. 170 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 171 172 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 173 setOperationAction(ISD::BR_CC, XLenVT, Expand); 174 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 175 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 176 177 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 178 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 179 180 setOperationAction(ISD::VASTART, MVT::Other, Custom); 181 setOperationAction(ISD::VAARG, MVT::Other, Expand); 182 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 183 setOperationAction(ISD::VAEND, MVT::Other, Expand); 184 185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 186 if (!Subtarget.hasStdExtZbb()) { 187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 188 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 189 } 190 191 if (Subtarget.is64Bit()) { 192 setOperationAction(ISD::ADD, MVT::i32, Custom); 193 setOperationAction(ISD::SUB, MVT::i32, Custom); 194 setOperationAction(ISD::SHL, MVT::i32, Custom); 195 setOperationAction(ISD::SRA, MVT::i32, Custom); 196 setOperationAction(ISD::SRL, MVT::i32, Custom); 197 198 setOperationAction(ISD::UADDO, MVT::i32, Custom); 199 setOperationAction(ISD::USUBO, MVT::i32, Custom); 200 setOperationAction(ISD::UADDSAT, MVT::i32, Custom); 201 setOperationAction(ISD::USUBSAT, MVT::i32, Custom); 202 } 203 204 if (!Subtarget.hasStdExtM()) { 205 setOperationAction(ISD::MUL, XLenVT, Expand); 206 setOperationAction(ISD::MULHS, XLenVT, Expand); 207 setOperationAction(ISD::MULHU, XLenVT, Expand); 208 setOperationAction(ISD::SDIV, XLenVT, Expand); 209 setOperationAction(ISD::UDIV, XLenVT, Expand); 210 setOperationAction(ISD::SREM, XLenVT, Expand); 211 setOperationAction(ISD::UREM, XLenVT, Expand); 212 } else { 213 if (Subtarget.is64Bit()) { 214 setOperationAction(ISD::MUL, MVT::i32, Custom); 215 setOperationAction(ISD::MUL, MVT::i128, Custom); 216 217 setOperationAction(ISD::SDIV, MVT::i8, Custom); 218 setOperationAction(ISD::UDIV, MVT::i8, Custom); 219 setOperationAction(ISD::UREM, MVT::i8, Custom); 220 setOperationAction(ISD::SDIV, MVT::i16, Custom); 221 setOperationAction(ISD::UDIV, MVT::i16, Custom); 222 setOperationAction(ISD::UREM, MVT::i16, Custom); 223 setOperationAction(ISD::SDIV, MVT::i32, Custom); 224 setOperationAction(ISD::UDIV, MVT::i32, Custom); 225 setOperationAction(ISD::UREM, MVT::i32, Custom); 226 } else { 227 setOperationAction(ISD::MUL, MVT::i64, Custom); 228 } 229 } 230 231 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 232 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 233 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 234 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 235 236 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 237 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 238 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 239 240 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 241 if (Subtarget.is64Bit()) { 242 setOperationAction(ISD::ROTL, MVT::i32, Custom); 243 setOperationAction(ISD::ROTR, MVT::i32, Custom); 244 } 245 } else { 246 setOperationAction(ISD::ROTL, XLenVT, Expand); 247 setOperationAction(ISD::ROTR, XLenVT, Expand); 248 } 249 250 if (Subtarget.hasStdExtZbp()) { 251 // Custom lower bswap/bitreverse so we can convert them to GREVI to enable 252 // more combining. 253 setOperationAction(ISD::BITREVERSE, XLenVT, Custom); 254 setOperationAction(ISD::BSWAP, XLenVT, Custom); 255 setOperationAction(ISD::BITREVERSE, MVT::i8, Custom); 256 // BSWAP i8 doesn't exist. 257 setOperationAction(ISD::BITREVERSE, MVT::i16, Custom); 258 setOperationAction(ISD::BSWAP, MVT::i16, Custom); 259 260 if (Subtarget.is64Bit()) { 261 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); 262 setOperationAction(ISD::BSWAP, MVT::i32, Custom); 263 } 264 } else { 265 // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll 266 // pattern match it directly in isel. 267 setOperationAction(ISD::BSWAP, XLenVT, 268 Subtarget.hasStdExtZbb() ? Legal : Expand); 269 } 270 271 if (Subtarget.hasStdExtZbb()) { 272 setOperationAction(ISD::SMIN, XLenVT, Legal); 273 setOperationAction(ISD::SMAX, XLenVT, Legal); 274 setOperationAction(ISD::UMIN, XLenVT, Legal); 275 setOperationAction(ISD::UMAX, XLenVT, Legal); 276 277 if (Subtarget.is64Bit()) { 278 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 279 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 280 setOperationAction(ISD::CTLZ, MVT::i32, Custom); 281 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 282 } 283 } else { 284 setOperationAction(ISD::CTTZ, XLenVT, Expand); 285 setOperationAction(ISD::CTLZ, XLenVT, Expand); 286 setOperationAction(ISD::CTPOP, XLenVT, Expand); 287 } 288 289 if (Subtarget.hasStdExtZbt()) { 290 setOperationAction(ISD::FSHL, XLenVT, Custom); 291 setOperationAction(ISD::FSHR, XLenVT, Custom); 292 setOperationAction(ISD::SELECT, XLenVT, Legal); 293 294 if (Subtarget.is64Bit()) { 295 setOperationAction(ISD::FSHL, MVT::i32, Custom); 296 setOperationAction(ISD::FSHR, MVT::i32, Custom); 297 } 298 } else { 299 setOperationAction(ISD::SELECT, XLenVT, Custom); 300 } 301 302 ISD::CondCode FPCCToExpand[] = { 303 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 304 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 305 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 306 307 ISD::NodeType FPOpToExpand[] = { 308 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 309 ISD::FP_TO_FP16}; 310 311 if (Subtarget.hasStdExtZfh()) 312 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 313 314 if (Subtarget.hasStdExtZfh()) { 315 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 316 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 317 setOperationAction(ISD::LRINT, MVT::f16, Legal); 318 setOperationAction(ISD::LLRINT, MVT::f16, Legal); 319 setOperationAction(ISD::LROUND, MVT::f16, Legal); 320 setOperationAction(ISD::LLROUND, MVT::f16, Legal); 321 for (auto CC : FPCCToExpand) 322 setCondCodeAction(CC, MVT::f16, Expand); 323 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 324 setOperationAction(ISD::SELECT, MVT::f16, Custom); 325 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 326 for (auto Op : FPOpToExpand) 327 setOperationAction(Op, MVT::f16, Expand); 328 } 329 330 if (Subtarget.hasStdExtF()) { 331 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 332 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 333 setOperationAction(ISD::LRINT, MVT::f32, Legal); 334 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 335 setOperationAction(ISD::LROUND, MVT::f32, Legal); 336 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 337 for (auto CC : FPCCToExpand) 338 setCondCodeAction(CC, MVT::f32, Expand); 339 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 340 setOperationAction(ISD::SELECT, MVT::f32, Custom); 341 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 342 for (auto Op : FPOpToExpand) 343 setOperationAction(Op, MVT::f32, Expand); 344 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 345 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 346 } 347 348 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 349 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 350 351 if (Subtarget.hasStdExtD()) { 352 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 353 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 354 setOperationAction(ISD::LRINT, MVT::f64, Legal); 355 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 356 setOperationAction(ISD::LROUND, MVT::f64, Legal); 357 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 358 for (auto CC : FPCCToExpand) 359 setCondCodeAction(CC, MVT::f64, Expand); 360 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 361 setOperationAction(ISD::SELECT, MVT::f64, Custom); 362 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 363 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 364 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 365 for (auto Op : FPOpToExpand) 366 setOperationAction(Op, MVT::f64, Expand); 367 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 368 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 369 } 370 371 if (Subtarget.is64Bit()) { 372 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 373 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 374 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 375 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 376 } 377 378 if (Subtarget.hasStdExtF()) { 379 setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom); 380 setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); 381 } 382 383 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 384 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 385 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 386 setOperationAction(ISD::JumpTable, XLenVT, Custom); 387 388 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 389 390 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 391 // Unfortunately this can't be determined just from the ISA naming string. 392 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 393 Subtarget.is64Bit() ? Legal : Custom); 394 395 setOperationAction(ISD::TRAP, MVT::Other, Legal); 396 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 397 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 398 if (Subtarget.is64Bit()) 399 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 400 401 if (Subtarget.hasStdExtA()) { 402 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 403 setMinCmpXchgSizeInBits(32); 404 } else { 405 setMaxAtomicSizeInBitsSupported(0); 406 } 407 408 setBooleanContents(ZeroOrOneBooleanContent); 409 410 if (Subtarget.hasStdExtV()) { 411 setBooleanVectorContents(ZeroOrOneBooleanContent); 412 413 setOperationAction(ISD::VSCALE, XLenVT, Custom); 414 415 // RVV intrinsics may have illegal operands. 416 // We also need to custom legalize vmv.x.s. 417 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); 418 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 419 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 420 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 421 if (Subtarget.is64Bit()) { 422 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); 423 } else { 424 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 425 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 426 } 427 428 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 429 430 static unsigned IntegerVPOps[] = { 431 ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV, 432 ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, 433 ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL}; 434 435 static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB, 436 ISD::VP_FMUL, ISD::VP_FDIV}; 437 438 if (!Subtarget.is64Bit()) { 439 // We must custom-lower certain vXi64 operations on RV32 due to the vector 440 // element type being illegal. 441 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); 442 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); 443 444 setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); 445 setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); 446 setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); 447 setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); 448 setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); 449 setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); 450 setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); 451 setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); 452 } 453 454 for (MVT VT : BoolVecVTs) { 455 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); 456 457 // Mask VTs are custom-expanded into a series of standard nodes 458 setOperationAction(ISD::TRUNCATE, VT, Custom); 459 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 460 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 461 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 462 463 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 464 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 465 466 setOperationAction(ISD::SELECT, VT, Custom); 467 setOperationAction(ISD::SELECT_CC, VT, Expand); 468 setOperationAction(ISD::VSELECT, VT, Expand); 469 470 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 471 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 472 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 473 474 // RVV has native int->float & float->int conversions where the 475 // element type sizes are within one power-of-two of each other. Any 476 // wider distances between type sizes have to be lowered as sequences 477 // which progressively narrow the gap in stages. 478 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 479 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 480 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 481 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 482 483 // Expand all extending loads to types larger than this, and truncating 484 // stores from types larger than this. 485 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { 486 setTruncStoreAction(OtherVT, VT, Expand); 487 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 488 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); 489 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); 490 } 491 } 492 493 for (MVT VT : IntVecVTs) { 494 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 495 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); 496 497 setOperationAction(ISD::SMIN, VT, Legal); 498 setOperationAction(ISD::SMAX, VT, Legal); 499 setOperationAction(ISD::UMIN, VT, Legal); 500 setOperationAction(ISD::UMAX, VT, Legal); 501 502 setOperationAction(ISD::ROTL, VT, Expand); 503 setOperationAction(ISD::ROTR, VT, Expand); 504 505 // Custom-lower extensions and truncations from/to mask types. 506 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 507 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 508 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 509 510 // RVV has native int->float & float->int conversions where the 511 // element type sizes are within one power-of-two of each other. Any 512 // wider distances between type sizes have to be lowered as sequences 513 // which progressively narrow the gap in stages. 514 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 515 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 516 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 517 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 518 519 setOperationAction(ISD::SADDSAT, VT, Legal); 520 setOperationAction(ISD::UADDSAT, VT, Legal); 521 setOperationAction(ISD::SSUBSAT, VT, Legal); 522 setOperationAction(ISD::USUBSAT, VT, Legal); 523 524 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" 525 // nodes which truncate by one power of two at a time. 526 setOperationAction(ISD::TRUNCATE, VT, Custom); 527 528 // Custom-lower insert/extract operations to simplify patterns. 529 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 530 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 531 532 // Custom-lower reduction operations to set up the corresponding custom 533 // nodes' operands. 534 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 535 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 536 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 537 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 538 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 539 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 540 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 541 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 542 543 for (unsigned VPOpc : IntegerVPOps) 544 setOperationAction(VPOpc, VT, Custom); 545 546 setOperationAction(ISD::LOAD, VT, Custom); 547 setOperationAction(ISD::STORE, VT, Custom); 548 549 setOperationAction(ISD::MLOAD, VT, Custom); 550 setOperationAction(ISD::MSTORE, VT, Custom); 551 setOperationAction(ISD::MGATHER, VT, Custom); 552 setOperationAction(ISD::MSCATTER, VT, Custom); 553 554 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 555 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 556 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 557 558 setOperationAction(ISD::SELECT, VT, Custom); 559 setOperationAction(ISD::SELECT_CC, VT, Expand); 560 561 setOperationAction(ISD::STEP_VECTOR, VT, Custom); 562 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); 563 564 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { 565 setTruncStoreAction(VT, OtherVT, Expand); 566 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 567 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); 568 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); 569 } 570 } 571 572 // Expand various CCs to best match the RVV ISA, which natively supports UNE 573 // but no other unordered comparisons, and supports all ordered comparisons 574 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization 575 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), 576 // and we pattern-match those back to the "original", swapping operands once 577 // more. This way we catch both operations and both "vf" and "fv" forms with 578 // fewer patterns. 579 ISD::CondCode VFPCCToExpand[] = { 580 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 581 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, 582 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, 583 }; 584 585 // Sets common operation actions on RVV floating-point vector types. 586 const auto SetCommonVFPActions = [&](MVT VT) { 587 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 588 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type 589 // sizes are within one power-of-two of each other. Therefore conversions 590 // between vXf16 and vXf64 must be lowered as sequences which convert via 591 // vXf32. 592 setOperationAction(ISD::FP_ROUND, VT, Custom); 593 setOperationAction(ISD::FP_EXTEND, VT, Custom); 594 // Custom-lower insert/extract operations to simplify patterns. 595 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 596 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 597 // Expand various condition codes (explained above). 598 for (auto CC : VFPCCToExpand) 599 setCondCodeAction(CC, VT, Expand); 600 601 setOperationAction(ISD::FMINNUM, VT, Legal); 602 setOperationAction(ISD::FMAXNUM, VT, Legal); 603 604 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 605 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 606 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); 607 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); 608 setOperationAction(ISD::FCOPYSIGN, VT, Legal); 609 610 setOperationAction(ISD::LOAD, VT, Custom); 611 setOperationAction(ISD::STORE, VT, Custom); 612 613 setOperationAction(ISD::MLOAD, VT, Custom); 614 setOperationAction(ISD::MSTORE, VT, Custom); 615 setOperationAction(ISD::MGATHER, VT, Custom); 616 setOperationAction(ISD::MSCATTER, VT, Custom); 617 618 setOperationAction(ISD::SELECT, VT, Custom); 619 setOperationAction(ISD::SELECT_CC, VT, Expand); 620 621 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 622 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 623 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 624 625 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); 626 627 for (unsigned VPOpc : FloatingPointVPOps) 628 setOperationAction(VPOpc, VT, Custom); 629 }; 630 631 // Sets common extload/truncstore actions on RVV floating-point vector 632 // types. 633 const auto SetCommonVFPExtLoadTruncStoreActions = 634 [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) { 635 for (auto SmallVT : SmallerVTs) { 636 setTruncStoreAction(VT, SmallVT, Expand); 637 setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand); 638 } 639 }; 640 641 if (Subtarget.hasStdExtZfh()) 642 for (MVT VT : F16VecVTs) 643 SetCommonVFPActions(VT); 644 645 for (MVT VT : F32VecVTs) { 646 if (Subtarget.hasStdExtF()) 647 SetCommonVFPActions(VT); 648 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); 649 } 650 651 for (MVT VT : F64VecVTs) { 652 if (Subtarget.hasStdExtD()) 653 SetCommonVFPActions(VT); 654 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); 655 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs); 656 } 657 658 if (Subtarget.useRVVForFixedLengthVectors()) { 659 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 660 if (!useRVVForFixedLengthVectorVT(VT)) 661 continue; 662 663 // By default everything must be expanded. 664 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 665 setOperationAction(Op, VT, Expand); 666 for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) { 667 setTruncStoreAction(VT, OtherVT, Expand); 668 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 669 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand); 670 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand); 671 } 672 673 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 674 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 675 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 676 677 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 678 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 679 680 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 681 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 682 683 setOperationAction(ISD::LOAD, VT, Custom); 684 setOperationAction(ISD::STORE, VT, Custom); 685 686 setOperationAction(ISD::SETCC, VT, Custom); 687 688 setOperationAction(ISD::SELECT, VT, Custom); 689 690 setOperationAction(ISD::TRUNCATE, VT, Custom); 691 692 setOperationAction(ISD::BITCAST, VT, Custom); 693 694 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 695 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 696 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 697 698 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 699 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 700 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 701 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 702 703 // Operations below are different for between masks and other vectors. 704 if (VT.getVectorElementType() == MVT::i1) { 705 setOperationAction(ISD::AND, VT, Custom); 706 setOperationAction(ISD::OR, VT, Custom); 707 setOperationAction(ISD::XOR, VT, Custom); 708 continue; 709 } 710 711 // Use SPLAT_VECTOR to prevent type legalization from destroying the 712 // splats when type legalizing i64 scalar on RV32. 713 // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs 714 // improvements first. 715 if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) { 716 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); 717 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); 718 } 719 720 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 721 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 722 723 setOperationAction(ISD::MLOAD, VT, Custom); 724 setOperationAction(ISD::MSTORE, VT, Custom); 725 setOperationAction(ISD::MGATHER, VT, Custom); 726 setOperationAction(ISD::MSCATTER, VT, Custom); 727 setOperationAction(ISD::ADD, VT, Custom); 728 setOperationAction(ISD::MUL, VT, Custom); 729 setOperationAction(ISD::SUB, VT, Custom); 730 setOperationAction(ISD::AND, VT, Custom); 731 setOperationAction(ISD::OR, VT, Custom); 732 setOperationAction(ISD::XOR, VT, Custom); 733 setOperationAction(ISD::SDIV, VT, Custom); 734 setOperationAction(ISD::SREM, VT, Custom); 735 setOperationAction(ISD::UDIV, VT, Custom); 736 setOperationAction(ISD::UREM, VT, Custom); 737 setOperationAction(ISD::SHL, VT, Custom); 738 setOperationAction(ISD::SRA, VT, Custom); 739 setOperationAction(ISD::SRL, VT, Custom); 740 741 setOperationAction(ISD::SMIN, VT, Custom); 742 setOperationAction(ISD::SMAX, VT, Custom); 743 setOperationAction(ISD::UMIN, VT, Custom); 744 setOperationAction(ISD::UMAX, VT, Custom); 745 setOperationAction(ISD::ABS, VT, Custom); 746 747 setOperationAction(ISD::MULHS, VT, Custom); 748 setOperationAction(ISD::MULHU, VT, Custom); 749 750 setOperationAction(ISD::SADDSAT, VT, Custom); 751 setOperationAction(ISD::UADDSAT, VT, Custom); 752 setOperationAction(ISD::SSUBSAT, VT, Custom); 753 setOperationAction(ISD::USUBSAT, VT, Custom); 754 755 setOperationAction(ISD::VSELECT, VT, Custom); 756 setOperationAction(ISD::SELECT_CC, VT, Expand); 757 758 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 759 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 760 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 761 762 // Custom-lower reduction operations to set up the corresponding custom 763 // nodes' operands. 764 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 765 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 766 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 767 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 768 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 769 770 for (unsigned VPOpc : IntegerVPOps) 771 setOperationAction(VPOpc, VT, Custom); 772 } 773 774 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { 775 if (!useRVVForFixedLengthVectorVT(VT)) 776 continue; 777 778 // By default everything must be expanded. 779 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 780 setOperationAction(Op, VT, Expand); 781 for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { 782 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 783 setTruncStoreAction(VT, OtherVT, Expand); 784 } 785 786 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 787 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 788 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 789 790 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 791 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 792 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 793 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 794 795 setOperationAction(ISD::LOAD, VT, Custom); 796 setOperationAction(ISD::STORE, VT, Custom); 797 setOperationAction(ISD::MLOAD, VT, Custom); 798 setOperationAction(ISD::MSTORE, VT, Custom); 799 setOperationAction(ISD::MGATHER, VT, Custom); 800 setOperationAction(ISD::MSCATTER, VT, Custom); 801 setOperationAction(ISD::FADD, VT, Custom); 802 setOperationAction(ISD::FSUB, VT, Custom); 803 setOperationAction(ISD::FMUL, VT, Custom); 804 setOperationAction(ISD::FDIV, VT, Custom); 805 setOperationAction(ISD::FNEG, VT, Custom); 806 setOperationAction(ISD::FABS, VT, Custom); 807 setOperationAction(ISD::FCOPYSIGN, VT, Custom); 808 setOperationAction(ISD::FSQRT, VT, Custom); 809 setOperationAction(ISD::FMA, VT, Custom); 810 setOperationAction(ISD::FMINNUM, VT, Custom); 811 setOperationAction(ISD::FMAXNUM, VT, Custom); 812 813 setOperationAction(ISD::FP_ROUND, VT, Custom); 814 setOperationAction(ISD::FP_EXTEND, VT, Custom); 815 816 for (auto CC : VFPCCToExpand) 817 setCondCodeAction(CC, VT, Expand); 818 819 setOperationAction(ISD::VSELECT, VT, Custom); 820 setOperationAction(ISD::SELECT, VT, Custom); 821 setOperationAction(ISD::SELECT_CC, VT, Expand); 822 823 setOperationAction(ISD::BITCAST, VT, Custom); 824 825 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 826 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 827 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); 828 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); 829 830 for (unsigned VPOpc : FloatingPointVPOps) 831 setOperationAction(VPOpc, VT, Custom); 832 } 833 834 // Custom-legalize bitcasts from fixed-length vectors to scalar types. 835 setOperationAction(ISD::BITCAST, MVT::i8, Custom); 836 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 837 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 838 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 839 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 840 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 841 setOperationAction(ISD::BITCAST, MVT::f64, Custom); 842 } 843 } 844 845 // Function alignments. 846 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 847 setMinFunctionAlignment(FunctionAlignment); 848 setPrefFunctionAlignment(FunctionAlignment); 849 850 setMinimumJumpTableEntries(5); 851 852 // Jumps are expensive, compared to logic 853 setJumpIsExpensive(); 854 855 // We can use any register for comparisons 856 setHasMultipleConditionRegisters(); 857 858 setTargetDAGCombine(ISD::AND); 859 setTargetDAGCombine(ISD::OR); 860 setTargetDAGCombine(ISD::XOR); 861 setTargetDAGCombine(ISD::ANY_EXTEND); 862 setTargetDAGCombine(ISD::ZERO_EXTEND); 863 if (Subtarget.hasStdExtV()) { 864 setTargetDAGCombine(ISD::FCOPYSIGN); 865 setTargetDAGCombine(ISD::MGATHER); 866 setTargetDAGCombine(ISD::MSCATTER); 867 setTargetDAGCombine(ISD::SRA); 868 setTargetDAGCombine(ISD::SRL); 869 setTargetDAGCombine(ISD::SHL); 870 } 871 } 872 873 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, 874 LLVMContext &Context, 875 EVT VT) const { 876 if (!VT.isVector()) 877 return getPointerTy(DL); 878 if (Subtarget.hasStdExtV() && 879 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) 880 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); 881 return VT.changeVectorElementTypeToInteger(); 882 } 883 884 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const { 885 return Subtarget.getXLenVT(); 886 } 887 888 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 889 const CallInst &I, 890 MachineFunction &MF, 891 unsigned Intrinsic) const { 892 switch (Intrinsic) { 893 default: 894 return false; 895 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 896 case Intrinsic::riscv_masked_atomicrmw_add_i32: 897 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 898 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 899 case Intrinsic::riscv_masked_atomicrmw_max_i32: 900 case Intrinsic::riscv_masked_atomicrmw_min_i32: 901 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 902 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 903 case Intrinsic::riscv_masked_cmpxchg_i32: { 904 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 905 Info.opc = ISD::INTRINSIC_W_CHAIN; 906 Info.memVT = MVT::getVT(PtrTy->getElementType()); 907 Info.ptrVal = I.getArgOperand(0); 908 Info.offset = 0; 909 Info.align = Align(4); 910 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 911 MachineMemOperand::MOVolatile; 912 return true; 913 } 914 } 915 } 916 917 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 918 const AddrMode &AM, Type *Ty, 919 unsigned AS, 920 Instruction *I) const { 921 // No global is ever allowed as a base. 922 if (AM.BaseGV) 923 return false; 924 925 // Require a 12-bit signed offset. 926 if (!isInt<12>(AM.BaseOffs)) 927 return false; 928 929 switch (AM.Scale) { 930 case 0: // "r+i" or just "i", depending on HasBaseReg. 931 break; 932 case 1: 933 if (!AM.HasBaseReg) // allow "r+i". 934 break; 935 return false; // disallow "r+r" or "r+r+i". 936 default: 937 return false; 938 } 939 940 return true; 941 } 942 943 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 944 return isInt<12>(Imm); 945 } 946 947 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 948 return isInt<12>(Imm); 949 } 950 951 // On RV32, 64-bit integers are split into their high and low parts and held 952 // in two different registers, so the trunc is free since the low register can 953 // just be used. 954 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 955 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 956 return false; 957 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 958 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 959 return (SrcBits == 64 && DestBits == 32); 960 } 961 962 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 963 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 964 !SrcVT.isInteger() || !DstVT.isInteger()) 965 return false; 966 unsigned SrcBits = SrcVT.getSizeInBits(); 967 unsigned DestBits = DstVT.getSizeInBits(); 968 return (SrcBits == 64 && DestBits == 32); 969 } 970 971 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 972 // Zexts are free if they can be combined with a load. 973 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 974 EVT MemVT = LD->getMemoryVT(); 975 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 976 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 977 (LD->getExtensionType() == ISD::NON_EXTLOAD || 978 LD->getExtensionType() == ISD::ZEXTLOAD)) 979 return true; 980 } 981 982 return TargetLowering::isZExtFree(Val, VT2); 983 } 984 985 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 986 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 987 } 988 989 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 990 return Subtarget.hasStdExtZbb(); 991 } 992 993 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 994 return Subtarget.hasStdExtZbb(); 995 } 996 997 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 998 bool ForCodeSize) const { 999 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 1000 return false; 1001 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 1002 return false; 1003 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 1004 return false; 1005 if (Imm.isNegZero()) 1006 return false; 1007 return Imm.isZero(); 1008 } 1009 1010 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 1011 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 1012 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 1013 (VT == MVT::f64 && Subtarget.hasStdExtD()); 1014 } 1015 1016 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 1017 CallingConv::ID CC, 1018 EVT VT) const { 1019 // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still 1020 // end up using a GPR but that will be decided based on ABI. 1021 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) 1022 return MVT::f32; 1023 1024 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 1025 } 1026 1027 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 1028 CallingConv::ID CC, 1029 EVT VT) const { 1030 // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still 1031 // end up using a GPR but that will be decided based on ABI. 1032 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) 1033 return 1; 1034 1035 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 1036 } 1037 1038 // Changes the condition code and swaps operands if necessary, so the SetCC 1039 // operation matches one of the comparisons supported directly by branches 1040 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare 1041 // with 1/-1. 1042 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, 1043 ISD::CondCode &CC, SelectionDAG &DAG) { 1044 // Convert X > -1 to X >= 0. 1045 if (CC == ISD::SETGT && isAllOnesConstant(RHS)) { 1046 RHS = DAG.getConstant(0, DL, RHS.getValueType()); 1047 CC = ISD::SETGE; 1048 return; 1049 } 1050 // Convert X < 1 to 0 >= X. 1051 if (CC == ISD::SETLT && isOneConstant(RHS)) { 1052 RHS = LHS; 1053 LHS = DAG.getConstant(0, DL, RHS.getValueType()); 1054 CC = ISD::SETGE; 1055 return; 1056 } 1057 1058 switch (CC) { 1059 default: 1060 break; 1061 case ISD::SETGT: 1062 case ISD::SETLE: 1063 case ISD::SETUGT: 1064 case ISD::SETULE: 1065 CC = ISD::getSetCCSwappedOperands(CC); 1066 std::swap(LHS, RHS); 1067 break; 1068 } 1069 } 1070 1071 // Return the RISC-V branch opcode that matches the given DAG integer 1072 // condition code. The CondCode must be one of those supported by the RISC-V 1073 // ISA (see translateSetCCForBranch). 1074 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 1075 switch (CC) { 1076 default: 1077 llvm_unreachable("Unsupported CondCode"); 1078 case ISD::SETEQ: 1079 return RISCV::BEQ; 1080 case ISD::SETNE: 1081 return RISCV::BNE; 1082 case ISD::SETLT: 1083 return RISCV::BLT; 1084 case ISD::SETGE: 1085 return RISCV::BGE; 1086 case ISD::SETULT: 1087 return RISCV::BLTU; 1088 case ISD::SETUGE: 1089 return RISCV::BGEU; 1090 } 1091 } 1092 1093 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { 1094 assert(VT.isScalableVector() && "Expecting a scalable vector type"); 1095 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); 1096 if (VT.getVectorElementType() == MVT::i1) 1097 KnownSize *= 8; 1098 1099 switch (KnownSize) { 1100 default: 1101 llvm_unreachable("Invalid LMUL."); 1102 case 8: 1103 return RISCVII::VLMUL::LMUL_F8; 1104 case 16: 1105 return RISCVII::VLMUL::LMUL_F4; 1106 case 32: 1107 return RISCVII::VLMUL::LMUL_F2; 1108 case 64: 1109 return RISCVII::VLMUL::LMUL_1; 1110 case 128: 1111 return RISCVII::VLMUL::LMUL_2; 1112 case 256: 1113 return RISCVII::VLMUL::LMUL_4; 1114 case 512: 1115 return RISCVII::VLMUL::LMUL_8; 1116 } 1117 } 1118 1119 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) { 1120 switch (LMul) { 1121 default: 1122 llvm_unreachable("Invalid LMUL."); 1123 case RISCVII::VLMUL::LMUL_F8: 1124 case RISCVII::VLMUL::LMUL_F4: 1125 case RISCVII::VLMUL::LMUL_F2: 1126 case RISCVII::VLMUL::LMUL_1: 1127 return RISCV::VRRegClassID; 1128 case RISCVII::VLMUL::LMUL_2: 1129 return RISCV::VRM2RegClassID; 1130 case RISCVII::VLMUL::LMUL_4: 1131 return RISCV::VRM4RegClassID; 1132 case RISCVII::VLMUL::LMUL_8: 1133 return RISCV::VRM8RegClassID; 1134 } 1135 } 1136 1137 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { 1138 RISCVII::VLMUL LMUL = getLMUL(VT); 1139 if (LMUL == RISCVII::VLMUL::LMUL_F8 || 1140 LMUL == RISCVII::VLMUL::LMUL_F4 || 1141 LMUL == RISCVII::VLMUL::LMUL_F2 || 1142 LMUL == RISCVII::VLMUL::LMUL_1) { 1143 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 1144 "Unexpected subreg numbering"); 1145 return RISCV::sub_vrm1_0 + Index; 1146 } 1147 if (LMUL == RISCVII::VLMUL::LMUL_2) { 1148 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 1149 "Unexpected subreg numbering"); 1150 return RISCV::sub_vrm2_0 + Index; 1151 } 1152 if (LMUL == RISCVII::VLMUL::LMUL_4) { 1153 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 1154 "Unexpected subreg numbering"); 1155 return RISCV::sub_vrm4_0 + Index; 1156 } 1157 llvm_unreachable("Invalid vector type."); 1158 } 1159 1160 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { 1161 if (VT.getVectorElementType() == MVT::i1) 1162 return RISCV::VRRegClassID; 1163 return getRegClassIDForLMUL(getLMUL(VT)); 1164 } 1165 1166 // Attempt to decompose a subvector insert/extract between VecVT and 1167 // SubVecVT via subregister indices. Returns the subregister index that 1168 // can perform the subvector insert/extract with the given element index, as 1169 // well as the index corresponding to any leftover subvectors that must be 1170 // further inserted/extracted within the register class for SubVecVT. 1171 std::pair<unsigned, unsigned> 1172 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 1173 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, 1174 const RISCVRegisterInfo *TRI) { 1175 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && 1176 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && 1177 RISCV::VRM2RegClassID > RISCV::VRRegClassID), 1178 "Register classes not ordered"); 1179 unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); 1180 unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); 1181 // Try to compose a subregister index that takes us from the incoming 1182 // LMUL>1 register class down to the outgoing one. At each step we half 1183 // the LMUL: 1184 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 1185 // Note that this is not guaranteed to find a subregister index, such as 1186 // when we are extracting from one VR type to another. 1187 unsigned SubRegIdx = RISCV::NoSubRegister; 1188 for (const unsigned RCID : 1189 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) 1190 if (VecRegClassID > RCID && SubRegClassID <= RCID) { 1191 VecVT = VecVT.getHalfNumVectorElementsVT(); 1192 bool IsHi = 1193 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); 1194 SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, 1195 getSubregIndexByMVT(VecVT, IsHi)); 1196 if (IsHi) 1197 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); 1198 } 1199 return {SubRegIdx, InsertExtractIdx}; 1200 } 1201 1202 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar 1203 // stores for those types. 1204 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const { 1205 return !Subtarget.useRVVForFixedLengthVectors() || 1206 (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1); 1207 } 1208 1209 static bool useRVVForFixedLengthVectorVT(MVT VT, 1210 const RISCVSubtarget &Subtarget) { 1211 assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!"); 1212 if (!Subtarget.useRVVForFixedLengthVectors()) 1213 return false; 1214 1215 // We only support a set of vector types with a consistent maximum fixed size 1216 // across all supported vector element types to avoid legalization issues. 1217 // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest 1218 // fixed-length vector type we support is 1024 bytes. 1219 if (VT.getFixedSizeInBits() > 1024 * 8) 1220 return false; 1221 1222 unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits(); 1223 1224 // Don't use RVV for vectors we cannot scalarize if required. 1225 switch (VT.getVectorElementType().SimpleTy) { 1226 // i1 is supported but has different rules. 1227 default: 1228 return false; 1229 case MVT::i1: 1230 // Masks can only use a single register. 1231 if (VT.getVectorNumElements() > MinVLen) 1232 return false; 1233 MinVLen /= 8; 1234 break; 1235 case MVT::i8: 1236 case MVT::i16: 1237 case MVT::i32: 1238 case MVT::i64: 1239 break; 1240 case MVT::f16: 1241 if (!Subtarget.hasStdExtZfh()) 1242 return false; 1243 break; 1244 case MVT::f32: 1245 if (!Subtarget.hasStdExtF()) 1246 return false; 1247 break; 1248 case MVT::f64: 1249 if (!Subtarget.hasStdExtD()) 1250 return false; 1251 break; 1252 } 1253 1254 unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen); 1255 // Don't use RVV for types that don't fit. 1256 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors()) 1257 return false; 1258 1259 // TODO: Perhaps an artificial restriction, but worth having whilst getting 1260 // the base fixed length RVV support in place. 1261 if (!VT.isPow2VectorType()) 1262 return false; 1263 1264 return true; 1265 } 1266 1267 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const { 1268 return ::useRVVForFixedLengthVectorVT(VT, Subtarget); 1269 } 1270 1271 // Return the largest legal scalable vector type that matches VT's element type. 1272 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT, 1273 const RISCVSubtarget &Subtarget) { 1274 // This may be called before legal types are setup. 1275 assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || 1276 useRVVForFixedLengthVectorVT(VT, Subtarget)) && 1277 "Expected legal fixed length vector!"); 1278 1279 unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits(); 1280 1281 MVT EltVT = VT.getVectorElementType(); 1282 switch (EltVT.SimpleTy) { 1283 default: 1284 llvm_unreachable("unexpected element type for RVV container"); 1285 case MVT::i1: 1286 case MVT::i8: 1287 case MVT::i16: 1288 case MVT::i32: 1289 case MVT::i64: 1290 case MVT::f16: 1291 case MVT::f32: 1292 case MVT::f64: { 1293 // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for 1294 // narrower types, but we can't have a fractional LMUL with demoninator less 1295 // than 64/SEW. 1296 unsigned NumElts = 1297 divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock); 1298 return MVT::getScalableVectorVT(EltVT, NumElts); 1299 } 1300 } 1301 } 1302 1303 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT, 1304 const RISCVSubtarget &Subtarget) { 1305 return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, 1306 Subtarget); 1307 } 1308 1309 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { 1310 return ::getContainerForFixedLengthVector(*this, VT, getSubtarget()); 1311 } 1312 1313 // Grow V to consume an entire RVV register. 1314 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1315 const RISCVSubtarget &Subtarget) { 1316 assert(VT.isScalableVector() && 1317 "Expected to convert into a scalable vector!"); 1318 assert(V.getValueType().isFixedLengthVector() && 1319 "Expected a fixed length vector operand!"); 1320 SDLoc DL(V); 1321 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1322 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); 1323 } 1324 1325 // Shrink V so it's just big enough to maintain a VT's worth of data. 1326 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1327 const RISCVSubtarget &Subtarget) { 1328 assert(VT.isFixedLengthVector() && 1329 "Expected to convert into a fixed length vector!"); 1330 assert(V.getValueType().isScalableVector() && 1331 "Expected a scalable vector operand!"); 1332 SDLoc DL(V); 1333 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1334 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); 1335 } 1336 1337 // Gets the two common "VL" operands: an all-ones mask and the vector length. 1338 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is 1339 // the vector type that it is contained in. 1340 static std::pair<SDValue, SDValue> 1341 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, 1342 const RISCVSubtarget &Subtarget) { 1343 assert(ContainerVT.isScalableVector() && "Expecting scalable container type"); 1344 MVT XLenVT = Subtarget.getXLenVT(); 1345 SDValue VL = VecVT.isFixedLengthVector() 1346 ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) 1347 : DAG.getRegister(RISCV::X0, XLenVT); 1348 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 1349 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 1350 return {Mask, VL}; 1351 } 1352 1353 // As above but assuming the given type is a scalable vector type. 1354 static std::pair<SDValue, SDValue> 1355 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, 1356 const RISCVSubtarget &Subtarget) { 1357 assert(VecVT.isScalableVector() && "Expecting a scalable vector"); 1358 return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); 1359 } 1360 1361 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few 1362 // of either is (currently) supported. This can get us into an infinite loop 1363 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR 1364 // as a ..., etc. 1365 // Until either (or both) of these can reliably lower any node, reporting that 1366 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks 1367 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, 1368 // which is not desirable. 1369 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( 1370 EVT VT, unsigned DefinedValues) const { 1371 return false; 1372 } 1373 1374 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 1375 // Only splats are currently supported. 1376 if (ShuffleVectorSDNode::isSplatMask(M.data(), VT)) 1377 return true; 1378 1379 return false; 1380 } 1381 1382 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG, 1383 const RISCVSubtarget &Subtarget) { 1384 MVT VT = Op.getSimpleValueType(); 1385 assert(VT.isFixedLengthVector() && "Unexpected vector!"); 1386 1387 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); 1388 1389 SDLoc DL(Op); 1390 SDValue Mask, VL; 1391 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1392 1393 unsigned Opc = 1394 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; 1395 SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL); 1396 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 1397 } 1398 1399 struct VIDSequence { 1400 int64_t Step; 1401 int64_t Addend; 1402 }; 1403 1404 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S] 1405 // to the (non-zero) step S and start value X. This can be then lowered as the 1406 // RVV sequence (VID * S) + X, for example. 1407 // Note that this method will also match potentially unappealing index 1408 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to 1409 // determine whether this is worth generating code for. 1410 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) { 1411 unsigned NumElts = Op.getNumOperands(); 1412 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR"); 1413 if (!Op.getValueType().isInteger()) 1414 return None; 1415 1416 Optional<int64_t> SeqStep, SeqAddend; 1417 Optional<std::pair<uint64_t, unsigned>> PrevElt; 1418 unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits(); 1419 for (unsigned Idx = 0; Idx < NumElts; Idx++) { 1420 // Assume undef elements match the sequence; we just have to be careful 1421 // when interpolating across them. 1422 if (Op.getOperand(Idx).isUndef()) 1423 continue; 1424 // The BUILD_VECTOR must be all constants. 1425 if (!isa<ConstantSDNode>(Op.getOperand(Idx))) 1426 return None; 1427 1428 uint64_t Val = Op.getConstantOperandVal(Idx) & 1429 maskTrailingOnes<uint64_t>(EltSizeInBits); 1430 1431 if (PrevElt) { 1432 // Calculate the step since the last non-undef element, and ensure 1433 // it's consistent across the entire sequence. 1434 int64_t Diff = SignExtend64(Val - PrevElt->first, EltSizeInBits); 1435 // The difference must cleanly divide the element span. 1436 if (Diff % (Idx - PrevElt->second) != 0) 1437 return None; 1438 int64_t Step = Diff / (Idx - PrevElt->second); 1439 // A zero step indicates we're either a not an index sequence, or we 1440 // have a fractional step. This must be handled by a more complex 1441 // pattern recognition (undefs complicate things here). 1442 if (Step == 0) 1443 return None; 1444 if (!SeqStep) 1445 SeqStep = Step; 1446 else if (Step != SeqStep) 1447 return None; 1448 } 1449 1450 // Record and/or check any addend. 1451 if (SeqStep) { 1452 int64_t Addend = 1453 SignExtend64(Val - (Idx * (uint64_t)*SeqStep), EltSizeInBits); 1454 if (!SeqAddend) 1455 SeqAddend = Addend; 1456 else if (SeqAddend != Addend) 1457 return None; 1458 } 1459 1460 // Record this non-undef element for later. 1461 PrevElt = std::make_pair(Val, Idx); 1462 } 1463 // We need to have logged both a step and an addend for this to count as 1464 // a legal index sequence. 1465 if (!SeqStep || !SeqAddend) 1466 return None; 1467 1468 return VIDSequence{*SeqStep, *SeqAddend}; 1469 } 1470 1471 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 1472 const RISCVSubtarget &Subtarget) { 1473 MVT VT = Op.getSimpleValueType(); 1474 assert(VT.isFixedLengthVector() && "Unexpected vector!"); 1475 1476 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); 1477 1478 SDLoc DL(Op); 1479 SDValue Mask, VL; 1480 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1481 1482 MVT XLenVT = Subtarget.getXLenVT(); 1483 unsigned NumElts = Op.getNumOperands(); 1484 1485 if (VT.getVectorElementType() == MVT::i1) { 1486 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 1487 SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); 1488 return convertFromScalableVector(VT, VMClr, DAG, Subtarget); 1489 } 1490 1491 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 1492 SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 1493 return convertFromScalableVector(VT, VMSet, DAG, Subtarget); 1494 } 1495 1496 // Lower constant mask BUILD_VECTORs via an integer vector type, in 1497 // scalar integer chunks whose bit-width depends on the number of mask 1498 // bits and XLEN. 1499 // First, determine the most appropriate scalar integer type to use. This 1500 // is at most XLenVT, but may be shrunk to a smaller vector element type 1501 // according to the size of the final vector - use i8 chunks rather than 1502 // XLenVT if we're producing a v8i1. This results in more consistent 1503 // codegen across RV32 and RV64. 1504 unsigned NumViaIntegerBits = 1505 std::min(std::max(NumElts, 8u), Subtarget.getXLen()); 1506 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) { 1507 // If we have to use more than one INSERT_VECTOR_ELT then this 1508 // optimization is likely to increase code size; avoid peforming it in 1509 // such a case. We can use a load from a constant pool in this case. 1510 if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits) 1511 return SDValue(); 1512 // Now we can create our integer vector type. Note that it may be larger 1513 // than the resulting mask type: v4i1 would use v1i8 as its integer type. 1514 MVT IntegerViaVecVT = 1515 MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits), 1516 divideCeil(NumElts, NumViaIntegerBits)); 1517 1518 uint64_t Bits = 0; 1519 unsigned BitPos = 0, IntegerEltIdx = 0; 1520 SDValue Vec = DAG.getUNDEF(IntegerViaVecVT); 1521 1522 for (unsigned I = 0; I < NumElts; I++, BitPos++) { 1523 // Once we accumulate enough bits to fill our scalar type, insert into 1524 // our vector and clear our accumulated data. 1525 if (I != 0 && I % NumViaIntegerBits == 0) { 1526 if (NumViaIntegerBits <= 32) 1527 Bits = SignExtend64(Bits, 32); 1528 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); 1529 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, 1530 Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT)); 1531 Bits = 0; 1532 BitPos = 0; 1533 IntegerEltIdx++; 1534 } 1535 SDValue V = Op.getOperand(I); 1536 bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue(); 1537 Bits |= ((uint64_t)BitValue << BitPos); 1538 } 1539 1540 // Insert the (remaining) scalar value into position in our integer 1541 // vector type. 1542 if (NumViaIntegerBits <= 32) 1543 Bits = SignExtend64(Bits, 32); 1544 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); 1545 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt, 1546 DAG.getConstant(IntegerEltIdx, DL, XLenVT)); 1547 1548 if (NumElts < NumViaIntegerBits) { 1549 // If we're producing a smaller vector than our minimum legal integer 1550 // type, bitcast to the equivalent (known-legal) mask type, and extract 1551 // our final mask. 1552 assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type"); 1553 Vec = DAG.getBitcast(MVT::v8i1, Vec); 1554 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec, 1555 DAG.getConstant(0, DL, XLenVT)); 1556 } else { 1557 // Else we must have produced an integer type with the same size as the 1558 // mask type; bitcast for the final result. 1559 assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits()); 1560 Vec = DAG.getBitcast(VT, Vec); 1561 } 1562 1563 return Vec; 1564 } 1565 1566 // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask 1567 // vector type, we have a legal equivalently-sized i8 type, so we can use 1568 // that. 1569 MVT WideVecVT = VT.changeVectorElementType(MVT::i8); 1570 SDValue VecZero = DAG.getConstant(0, DL, WideVecVT); 1571 1572 SDValue WideVec; 1573 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 1574 // For a splat, perform a scalar truncate before creating the wider 1575 // vector. 1576 assert(Splat.getValueType() == XLenVT && 1577 "Unexpected type for i1 splat value"); 1578 Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat, 1579 DAG.getConstant(1, DL, XLenVT)); 1580 WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat); 1581 } else { 1582 SmallVector<SDValue, 8> Ops(Op->op_values()); 1583 WideVec = DAG.getBuildVector(WideVecVT, DL, Ops); 1584 SDValue VecOne = DAG.getConstant(1, DL, WideVecVT); 1585 WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne); 1586 } 1587 1588 return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE); 1589 } 1590 1591 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 1592 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL 1593 : RISCVISD::VMV_V_X_VL; 1594 Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); 1595 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 1596 } 1597 1598 // Try and match index sequences, which we can lower to the vid instruction 1599 // with optional modifications. An all-undef vector is matched by 1600 // getSplatValue, above. 1601 if (auto SimpleVID = isSimpleVIDSequence(Op)) { 1602 int64_t Step = SimpleVID->Step; 1603 int64_t Addend = SimpleVID->Addend; 1604 // Only emit VIDs with suitably-small steps/addends. We use imm5 is a 1605 // threshold since it's the immediate value many RVV instructions accept. 1606 if (isInt<5>(Step) && isInt<5>(Addend)) { 1607 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); 1608 // Convert right out of the scalable type so we can use standard ISD 1609 // nodes for the rest of the computation. If we used scalable types with 1610 // these, we'd lose the fixed-length vector info and generate worse 1611 // vsetvli code. 1612 VID = convertFromScalableVector(VT, VID, DAG, Subtarget); 1613 assert(Step != 0 && "Invalid step"); 1614 bool Negate = false; 1615 if (Step != 1) { 1616 int64_t SplatStepVal = Step; 1617 unsigned Opcode = ISD::MUL; 1618 if (isPowerOf2_64(std::abs(Step))) { 1619 Negate = Step < 0; 1620 Opcode = ISD::SHL; 1621 SplatStepVal = Log2_64(std::abs(Step)); 1622 } 1623 SDValue SplatStep = DAG.getSplatVector( 1624 VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT)); 1625 VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep); 1626 } 1627 if (Addend != 0 || Negate) { 1628 SDValue SplatAddend = 1629 DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT)); 1630 VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID); 1631 } 1632 return VID; 1633 } 1634 } 1635 1636 // Attempt to detect "hidden" splats, which only reveal themselves as splats 1637 // when re-interpreted as a vector with a larger element type. For example, 1638 // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1 1639 // could be instead splat as 1640 // v2i32 = build_vector i32 0x00010000, i32 0x00010000 1641 // TODO: This optimization could also work on non-constant splats, but it 1642 // would require bit-manipulation instructions to construct the splat value. 1643 SmallVector<SDValue> Sequence; 1644 unsigned EltBitSize = VT.getScalarSizeInBits(); 1645 const auto *BV = cast<BuildVectorSDNode>(Op); 1646 if (VT.isInteger() && EltBitSize < 64 && 1647 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && 1648 BV->getRepeatedSequence(Sequence) && 1649 (Sequence.size() * EltBitSize) <= 64) { 1650 unsigned SeqLen = Sequence.size(); 1651 MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen); 1652 MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen); 1653 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || 1654 ViaIntVT == MVT::i64) && 1655 "Unexpected sequence type"); 1656 1657 unsigned EltIdx = 0; 1658 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize); 1659 uint64_t SplatValue = 0; 1660 // Construct the amalgamated value which can be splatted as this larger 1661 // vector type. 1662 for (const auto &SeqV : Sequence) { 1663 if (!SeqV.isUndef()) 1664 SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask) 1665 << (EltIdx * EltBitSize)); 1666 EltIdx++; 1667 } 1668 1669 // On RV64, sign-extend from 32 to 64 bits where possible in order to 1670 // achieve better constant materializion. 1671 if (Subtarget.is64Bit() && ViaIntVT == MVT::i32) 1672 SplatValue = SignExtend64(SplatValue, 32); 1673 1674 // Since we can't introduce illegal i64 types at this stage, we can only 1675 // perform an i64 splat on RV32 if it is its own sign-extended value. That 1676 // way we can use RVV instructions to splat. 1677 assert((ViaIntVT.bitsLE(XLenVT) || 1678 (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) && 1679 "Unexpected bitcast sequence"); 1680 if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) { 1681 SDValue ViaVL = 1682 DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT); 1683 MVT ViaContainerVT = 1684 getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget); 1685 SDValue Splat = 1686 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT, 1687 DAG.getConstant(SplatValue, DL, XLenVT), ViaVL); 1688 Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget); 1689 return DAG.getBitcast(VT, Splat); 1690 } 1691 } 1692 1693 // Try and optimize BUILD_VECTORs with "dominant values" - these are values 1694 // which constitute a large proportion of the elements. In such cases we can 1695 // splat a vector with the dominant element and make up the shortfall with 1696 // INSERT_VECTOR_ELTs. 1697 // Note that this includes vectors of 2 elements by association. The 1698 // upper-most element is the "dominant" one, allowing us to use a splat to 1699 // "insert" the upper element, and an insert of the lower element at position 1700 // 0, which improves codegen. 1701 SDValue DominantValue; 1702 unsigned MostCommonCount = 0; 1703 DenseMap<SDValue, unsigned> ValueCounts; 1704 unsigned NumUndefElts = 1705 count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); }); 1706 1707 for (SDValue V : Op->op_values()) { 1708 if (V.isUndef()) 1709 continue; 1710 1711 ValueCounts.insert(std::make_pair(V, 0)); 1712 unsigned &Count = ValueCounts[V]; 1713 1714 // Is this value dominant? In case of a tie, prefer the highest element as 1715 // it's cheaper to insert near the beginning of a vector than it is at the 1716 // end. 1717 if (++Count >= MostCommonCount) { 1718 DominantValue = V; 1719 MostCommonCount = Count; 1720 } 1721 } 1722 1723 assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR"); 1724 unsigned NumDefElts = NumElts - NumUndefElts; 1725 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2; 1726 1727 // Don't perform this optimization when optimizing for size, since 1728 // materializing elements and inserting them tends to cause code bloat. 1729 if (!DAG.shouldOptForSize() && 1730 ((MostCommonCount > DominantValueCountThreshold) || 1731 (ValueCounts.size() <= Log2_32(NumDefElts)))) { 1732 // Start by splatting the most common element. 1733 SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue); 1734 1735 DenseSet<SDValue> Processed{DominantValue}; 1736 MVT SelMaskTy = VT.changeVectorElementType(MVT::i1); 1737 for (const auto &OpIdx : enumerate(Op->ops())) { 1738 const SDValue &V = OpIdx.value(); 1739 if (V.isUndef() || !Processed.insert(V).second) 1740 continue; 1741 if (ValueCounts[V] == 1) { 1742 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, 1743 DAG.getConstant(OpIdx.index(), DL, XLenVT)); 1744 } else { 1745 // Blend in all instances of this value using a VSELECT, using a 1746 // mask where each bit signals whether that element is the one 1747 // we're after. 1748 SmallVector<SDValue> Ops; 1749 transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) { 1750 return DAG.getConstant(V == V1, DL, XLenVT); 1751 }); 1752 Vec = DAG.getNode(ISD::VSELECT, DL, VT, 1753 DAG.getBuildVector(SelMaskTy, DL, Ops), 1754 DAG.getSplatBuildVector(VT, DL, V), Vec); 1755 } 1756 } 1757 1758 return Vec; 1759 } 1760 1761 return SDValue(); 1762 } 1763 1764 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo, 1765 SDValue Hi, SDValue VL, SelectionDAG &DAG) { 1766 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { 1767 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); 1768 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); 1769 // If Hi constant is all the same sign bit as Lo, lower this as a custom 1770 // node in order to try and match RVV vector/scalar instructions. 1771 if ((LoC >> 31) == HiC) 1772 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); 1773 } 1774 1775 // Fall back to a stack store and stride x0 vector load. 1776 return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL); 1777 } 1778 1779 // Called by type legalization to handle splat of i64 on RV32. 1780 // FIXME: We can optimize this when the type has sign or zero bits in one 1781 // of the halves. 1782 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar, 1783 SDValue VL, SelectionDAG &DAG) { 1784 assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!"); 1785 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 1786 DAG.getConstant(0, DL, MVT::i32)); 1787 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 1788 DAG.getConstant(1, DL, MVT::i32)); 1789 return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG); 1790 } 1791 1792 // This function lowers a splat of a scalar operand Splat with the vector 1793 // length VL. It ensures the final sequence is type legal, which is useful when 1794 // lowering a splat after type legalization. 1795 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL, 1796 SelectionDAG &DAG, 1797 const RISCVSubtarget &Subtarget) { 1798 if (VT.isFloatingPoint()) 1799 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL); 1800 1801 MVT XLenVT = Subtarget.getXLenVT(); 1802 1803 // Simplest case is that the operand needs to be promoted to XLenVT. 1804 if (Scalar.getValueType().bitsLE(XLenVT)) { 1805 // If the operand is a constant, sign extend to increase our chances 1806 // of being able to use a .vi instruction. ANY_EXTEND would become a 1807 // a zero extend and the simm5 check in isel would fail. 1808 // FIXME: Should we ignore the upper bits in isel instead? 1809 unsigned ExtOpc = 1810 isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 1811 Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar); 1812 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL); 1813 } 1814 1815 assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && 1816 "Unexpected scalar for splat lowering!"); 1817 1818 // Otherwise use the more complicated splatting algorithm. 1819 return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); 1820 } 1821 1822 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 1823 const RISCVSubtarget &Subtarget) { 1824 SDValue V1 = Op.getOperand(0); 1825 SDValue V2 = Op.getOperand(1); 1826 SDLoc DL(Op); 1827 MVT XLenVT = Subtarget.getXLenVT(); 1828 MVT VT = Op.getSimpleValueType(); 1829 unsigned NumElts = VT.getVectorNumElements(); 1830 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 1831 1832 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); 1833 1834 SDValue TrueMask, VL; 1835 std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1836 1837 if (SVN->isSplat()) { 1838 const int Lane = SVN->getSplatIndex(); 1839 if (Lane >= 0) { 1840 MVT SVT = VT.getVectorElementType(); 1841 1842 // Turn splatted vector load into a strided load with an X0 stride. 1843 SDValue V = V1; 1844 // Peek through CONCAT_VECTORS as VectorCombine can concat a vector 1845 // with undef. 1846 // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts? 1847 int Offset = Lane; 1848 if (V.getOpcode() == ISD::CONCAT_VECTORS) { 1849 int OpElements = 1850 V.getOperand(0).getSimpleValueType().getVectorNumElements(); 1851 V = V.getOperand(Offset / OpElements); 1852 Offset %= OpElements; 1853 } 1854 1855 // We need to ensure the load isn't atomic or volatile. 1856 if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) { 1857 auto *Ld = cast<LoadSDNode>(V); 1858 Offset *= SVT.getStoreSize(); 1859 SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), 1860 TypeSize::Fixed(Offset), DL); 1861 1862 // If this is SEW=64 on RV32, use a strided load with a stride of x0. 1863 if (SVT.isInteger() && SVT.bitsGT(XLenVT)) { 1864 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 1865 SDValue IntID = 1866 DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT); 1867 SDValue Ops[] = {Ld->getChain(), IntID, NewAddr, 1868 DAG.getRegister(RISCV::X0, XLenVT), VL}; 1869 SDValue NewLoad = DAG.getMemIntrinsicNode( 1870 ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT, 1871 DAG.getMachineFunction().getMachineMemOperand( 1872 Ld->getMemOperand(), Offset, SVT.getStoreSize())); 1873 DAG.makeEquivalentMemoryOrdering(Ld, NewLoad); 1874 return convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 1875 } 1876 1877 // Otherwise use a scalar load and splat. This will give the best 1878 // opportunity to fold a splat into the operation. ISel can turn it into 1879 // the x0 strided load if we aren't able to fold away the select. 1880 if (SVT.isFloatingPoint()) 1881 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr, 1882 Ld->getPointerInfo().getWithOffset(Offset), 1883 Ld->getOriginalAlign(), 1884 Ld->getMemOperand()->getFlags()); 1885 else 1886 V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr, 1887 Ld->getPointerInfo().getWithOffset(Offset), SVT, 1888 Ld->getOriginalAlign(), 1889 Ld->getMemOperand()->getFlags()); 1890 DAG.makeEquivalentMemoryOrdering(Ld, V); 1891 1892 unsigned Opc = 1893 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; 1894 SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL); 1895 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 1896 } 1897 1898 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 1899 assert(Lane < (int)NumElts && "Unexpected lane!"); 1900 SDValue Gather = 1901 DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, 1902 DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL); 1903 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 1904 } 1905 } 1906 1907 // Detect shuffles which can be re-expressed as vector selects; these are 1908 // shuffles in which each element in the destination is taken from an element 1909 // at the corresponding index in either source vectors. 1910 bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) { 1911 int MaskIndex = MaskIdx.value(); 1912 return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts; 1913 }); 1914 1915 assert(!V1.isUndef() && "Unexpected shuffle canonicalization"); 1916 1917 SmallVector<SDValue> MaskVals; 1918 // As a backup, shuffles can be lowered via a vrgather instruction, possibly 1919 // merged with a second vrgather. 1920 SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS; 1921 1922 // By default we preserve the original operand order, and use a mask to 1923 // select LHS as true and RHS as false. However, since RVV vector selects may 1924 // feature splats but only on the LHS, we may choose to invert our mask and 1925 // instead select between RHS and LHS. 1926 bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1); 1927 bool InvertMask = IsSelect == SwapOps; 1928 1929 // Now construct the mask that will be used by the vselect or blended 1930 // vrgather operation. For vrgathers, construct the appropriate indices into 1931 // each vector. 1932 for (int MaskIndex : SVN->getMask()) { 1933 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask; 1934 MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT)); 1935 if (!IsSelect) { 1936 bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts; 1937 GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0 1938 ? DAG.getConstant(MaskIndex, DL, XLenVT) 1939 : DAG.getUNDEF(XLenVT)); 1940 GatherIndicesRHS.push_back( 1941 IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT) 1942 : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT)); 1943 } 1944 } 1945 1946 if (SwapOps) { 1947 std::swap(V1, V2); 1948 std::swap(GatherIndicesLHS, GatherIndicesRHS); 1949 } 1950 1951 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle"); 1952 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts); 1953 SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals); 1954 1955 if (IsSelect) 1956 return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2); 1957 1958 if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) { 1959 // On such a large vector we're unable to use i8 as the index type. 1960 // FIXME: We could promote the index to i16 and use vrgatherei16, but that 1961 // may involve vector splitting if we're already at LMUL=8, or our 1962 // user-supplied maximum fixed-length LMUL. 1963 return SDValue(); 1964 } 1965 1966 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL; 1967 MVT IndexVT = VT.changeTypeToInteger(); 1968 // Since we can't introduce illegal index types at this stage, use i16 and 1969 // vrgatherei16 if the corresponding index type for plain vrgather is greater 1970 // than XLenVT. 1971 if (IndexVT.getScalarType().bitsGT(XLenVT)) { 1972 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL; 1973 IndexVT = IndexVT.changeVectorElementType(MVT::i16); 1974 } 1975 1976 MVT IndexContainerVT = 1977 ContainerVT.changeVectorElementType(IndexVT.getScalarType()); 1978 1979 SDValue Gather; 1980 // TODO: This doesn't trigger for i64 vectors on RV32, since there we 1981 // encounter a bitcasted BUILD_VECTOR with low/high i32 values. 1982 if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) { 1983 Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget); 1984 } else { 1985 SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS); 1986 LHSIndices = 1987 convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget); 1988 1989 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 1990 Gather = 1991 DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL); 1992 } 1993 1994 // If a second vector operand is used by this shuffle, blend it in with an 1995 // additional vrgather. 1996 if (!V2.isUndef()) { 1997 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 1998 SelectMask = 1999 convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget); 2000 2001 SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS); 2002 RHSIndices = 2003 convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget); 2004 2005 V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget); 2006 V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL); 2007 Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2, 2008 Gather, VL); 2009 } 2010 2011 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 2012 } 2013 2014 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT, 2015 SDLoc DL, SelectionDAG &DAG, 2016 const RISCVSubtarget &Subtarget) { 2017 if (VT.isScalableVector()) 2018 return DAG.getFPExtendOrRound(Op, DL, VT); 2019 assert(VT.isFixedLengthVector() && 2020 "Unexpected value type for RVV FP extend/round lowering"); 2021 SDValue Mask, VL; 2022 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2023 unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType()) 2024 ? RISCVISD::FP_EXTEND_VL 2025 : RISCVISD::FP_ROUND_VL; 2026 return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL); 2027 } 2028 2029 // While RVV has alignment restrictions, we should always be able to load as a 2030 // legal equivalently-sized byte-typed vector instead. This method is 2031 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If 2032 // the load is already correctly-aligned, it returns SDValue(). 2033 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op, 2034 SelectionDAG &DAG) const { 2035 auto *Load = cast<LoadSDNode>(Op); 2036 assert(Load && Load->getMemoryVT().isVector() && "Expected vector load"); 2037 2038 if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 2039 Load->getMemoryVT(), 2040 *Load->getMemOperand())) 2041 return SDValue(); 2042 2043 SDLoc DL(Op); 2044 MVT VT = Op.getSimpleValueType(); 2045 unsigned EltSizeBits = VT.getScalarSizeInBits(); 2046 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && 2047 "Unexpected unaligned RVV load type"); 2048 MVT NewVT = 2049 MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8)); 2050 assert(NewVT.isValid() && 2051 "Expecting equally-sized RVV vector types to be legal"); 2052 SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(), 2053 Load->getPointerInfo(), Load->getOriginalAlign(), 2054 Load->getMemOperand()->getFlags()); 2055 return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL); 2056 } 2057 2058 // While RVV has alignment restrictions, we should always be able to store as a 2059 // legal equivalently-sized byte-typed vector instead. This method is 2060 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It 2061 // returns SDValue() if the store is already correctly aligned. 2062 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op, 2063 SelectionDAG &DAG) const { 2064 auto *Store = cast<StoreSDNode>(Op); 2065 assert(Store && Store->getValue().getValueType().isVector() && 2066 "Expected vector store"); 2067 2068 if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 2069 Store->getMemoryVT(), 2070 *Store->getMemOperand())) 2071 return SDValue(); 2072 2073 SDLoc DL(Op); 2074 SDValue StoredVal = Store->getValue(); 2075 MVT VT = StoredVal.getSimpleValueType(); 2076 unsigned EltSizeBits = VT.getScalarSizeInBits(); 2077 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && 2078 "Unexpected unaligned RVV store type"); 2079 MVT NewVT = 2080 MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8)); 2081 assert(NewVT.isValid() && 2082 "Expecting equally-sized RVV vector types to be legal"); 2083 StoredVal = DAG.getBitcast(NewVT, StoredVal); 2084 return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(), 2085 Store->getPointerInfo(), Store->getOriginalAlign(), 2086 Store->getMemOperand()->getFlags()); 2087 } 2088 2089 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 2090 SelectionDAG &DAG) const { 2091 switch (Op.getOpcode()) { 2092 default: 2093 report_fatal_error("unimplemented operand"); 2094 case ISD::GlobalAddress: 2095 return lowerGlobalAddress(Op, DAG); 2096 case ISD::BlockAddress: 2097 return lowerBlockAddress(Op, DAG); 2098 case ISD::ConstantPool: 2099 return lowerConstantPool(Op, DAG); 2100 case ISD::JumpTable: 2101 return lowerJumpTable(Op, DAG); 2102 case ISD::GlobalTLSAddress: 2103 return lowerGlobalTLSAddress(Op, DAG); 2104 case ISD::SELECT: 2105 return lowerSELECT(Op, DAG); 2106 case ISD::BRCOND: 2107 return lowerBRCOND(Op, DAG); 2108 case ISD::VASTART: 2109 return lowerVASTART(Op, DAG); 2110 case ISD::FRAMEADDR: 2111 return lowerFRAMEADDR(Op, DAG); 2112 case ISD::RETURNADDR: 2113 return lowerRETURNADDR(Op, DAG); 2114 case ISD::SHL_PARTS: 2115 return lowerShiftLeftParts(Op, DAG); 2116 case ISD::SRA_PARTS: 2117 return lowerShiftRightParts(Op, DAG, true); 2118 case ISD::SRL_PARTS: 2119 return lowerShiftRightParts(Op, DAG, false); 2120 case ISD::BITCAST: { 2121 SDLoc DL(Op); 2122 EVT VT = Op.getValueType(); 2123 SDValue Op0 = Op.getOperand(0); 2124 EVT Op0VT = Op0.getValueType(); 2125 MVT XLenVT = Subtarget.getXLenVT(); 2126 if (VT.isFixedLengthVector()) { 2127 // We can handle fixed length vector bitcasts with a simple replacement 2128 // in isel. 2129 if (Op0VT.isFixedLengthVector()) 2130 return Op; 2131 // When bitcasting from scalar to fixed-length vector, insert the scalar 2132 // into a one-element vector of the result type, and perform a vector 2133 // bitcast. 2134 if (!Op0VT.isVector()) { 2135 auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1); 2136 return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT, 2137 DAG.getUNDEF(BVT), Op0, 2138 DAG.getConstant(0, DL, XLenVT))); 2139 } 2140 return SDValue(); 2141 } 2142 // Custom-legalize bitcasts from fixed-length vector types to scalar types 2143 // thus: bitcast the vector to a one-element vector type whose element type 2144 // is the same as the result type, and extract the first element. 2145 if (!VT.isVector() && Op0VT.isFixedLengthVector()) { 2146 LLVMContext &Context = *DAG.getContext(); 2147 SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0); 2148 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, 2149 DAG.getConstant(0, DL, XLenVT)); 2150 } 2151 if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) { 2152 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0); 2153 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 2154 return FPConv; 2155 } 2156 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() && 2157 Subtarget.hasStdExtF()) { 2158 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 2159 SDValue FPConv = 2160 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 2161 return FPConv; 2162 } 2163 return SDValue(); 2164 } 2165 case ISD::INTRINSIC_WO_CHAIN: 2166 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2167 case ISD::INTRINSIC_W_CHAIN: 2168 return LowerINTRINSIC_W_CHAIN(Op, DAG); 2169 case ISD::BSWAP: 2170 case ISD::BITREVERSE: { 2171 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 2172 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 2173 MVT VT = Op.getSimpleValueType(); 2174 SDLoc DL(Op); 2175 // Start with the maximum immediate value which is the bitwidth - 1. 2176 unsigned Imm = VT.getSizeInBits() - 1; 2177 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 2178 if (Op.getOpcode() == ISD::BSWAP) 2179 Imm &= ~0x7U; 2180 return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0), 2181 DAG.getConstant(Imm, DL, VT)); 2182 } 2183 case ISD::FSHL: 2184 case ISD::FSHR: { 2185 MVT VT = Op.getSimpleValueType(); 2186 assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization"); 2187 SDLoc DL(Op); 2188 if (Op.getOperand(2).getOpcode() == ISD::Constant) 2189 return Op; 2190 // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only 2191 // use log(XLen) bits. Mask the shift amount accordingly. 2192 unsigned ShAmtWidth = Subtarget.getXLen() - 1; 2193 SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), 2194 DAG.getConstant(ShAmtWidth, DL, VT)); 2195 unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR; 2196 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt); 2197 } 2198 case ISD::TRUNCATE: { 2199 SDLoc DL(Op); 2200 MVT VT = Op.getSimpleValueType(); 2201 // Only custom-lower vector truncates 2202 if (!VT.isVector()) 2203 return Op; 2204 2205 // Truncates to mask types are handled differently 2206 if (VT.getVectorElementType() == MVT::i1) 2207 return lowerVectorMaskTrunc(Op, DAG); 2208 2209 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary 2210 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which 2211 // truncate by one power of two at a time. 2212 MVT DstEltVT = VT.getVectorElementType(); 2213 2214 SDValue Src = Op.getOperand(0); 2215 MVT SrcVT = Src.getSimpleValueType(); 2216 MVT SrcEltVT = SrcVT.getVectorElementType(); 2217 2218 assert(DstEltVT.bitsLT(SrcEltVT) && 2219 isPowerOf2_64(DstEltVT.getSizeInBits()) && 2220 isPowerOf2_64(SrcEltVT.getSizeInBits()) && 2221 "Unexpected vector truncate lowering"); 2222 2223 MVT ContainerVT = SrcVT; 2224 if (SrcVT.isFixedLengthVector()) { 2225 ContainerVT = getContainerForFixedLengthVector(SrcVT); 2226 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 2227 } 2228 2229 SDValue Result = Src; 2230 SDValue Mask, VL; 2231 std::tie(Mask, VL) = 2232 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); 2233 LLVMContext &Context = *DAG.getContext(); 2234 const ElementCount Count = ContainerVT.getVectorElementCount(); 2235 do { 2236 SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); 2237 EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); 2238 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, 2239 Mask, VL); 2240 } while (SrcEltVT != DstEltVT); 2241 2242 if (SrcVT.isFixedLengthVector()) 2243 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 2244 2245 return Result; 2246 } 2247 case ISD::ANY_EXTEND: 2248 case ISD::ZERO_EXTEND: 2249 if (Op.getOperand(0).getValueType().isVector() && 2250 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 2251 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); 2252 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); 2253 case ISD::SIGN_EXTEND: 2254 if (Op.getOperand(0).getValueType().isVector() && 2255 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 2256 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); 2257 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); 2258 case ISD::SPLAT_VECTOR_PARTS: 2259 return lowerSPLAT_VECTOR_PARTS(Op, DAG); 2260 case ISD::INSERT_VECTOR_ELT: 2261 return lowerINSERT_VECTOR_ELT(Op, DAG); 2262 case ISD::EXTRACT_VECTOR_ELT: 2263 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 2264 case ISD::VSCALE: { 2265 MVT VT = Op.getSimpleValueType(); 2266 SDLoc DL(Op); 2267 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); 2268 // We define our scalable vector types for lmul=1 to use a 64 bit known 2269 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate 2270 // vscale as VLENB / 8. 2271 assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!"); 2272 if (isa<ConstantSDNode>(Op.getOperand(0))) { 2273 // We assume VLENB is a multiple of 8. We manually choose the best shift 2274 // here because SimplifyDemandedBits isn't always able to simplify it. 2275 uint64_t Val = Op.getConstantOperandVal(0); 2276 if (isPowerOf2_64(Val)) { 2277 uint64_t Log2 = Log2_64(Val); 2278 if (Log2 < 3) 2279 return DAG.getNode(ISD::SRL, DL, VT, VLENB, 2280 DAG.getConstant(3 - Log2, DL, VT)); 2281 if (Log2 > 3) 2282 return DAG.getNode(ISD::SHL, DL, VT, VLENB, 2283 DAG.getConstant(Log2 - 3, DL, VT)); 2284 return VLENB; 2285 } 2286 // If the multiplier is a multiple of 8, scale it down to avoid needing 2287 // to shift the VLENB value. 2288 if ((Val % 8) == 0) 2289 return DAG.getNode(ISD::MUL, DL, VT, VLENB, 2290 DAG.getConstant(Val / 8, DL, VT)); 2291 } 2292 2293 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, 2294 DAG.getConstant(3, DL, VT)); 2295 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); 2296 } 2297 case ISD::FP_EXTEND: { 2298 // RVV can only do fp_extend to types double the size as the source. We 2299 // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going 2300 // via f32. 2301 SDLoc DL(Op); 2302 MVT VT = Op.getSimpleValueType(); 2303 SDValue Src = Op.getOperand(0); 2304 MVT SrcVT = Src.getSimpleValueType(); 2305 2306 // Prepare any fixed-length vector operands. 2307 MVT ContainerVT = VT; 2308 if (SrcVT.isFixedLengthVector()) { 2309 ContainerVT = getContainerForFixedLengthVector(VT); 2310 MVT SrcContainerVT = 2311 ContainerVT.changeVectorElementType(SrcVT.getVectorElementType()); 2312 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 2313 } 2314 2315 if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 || 2316 SrcVT.getVectorElementType() != MVT::f16) { 2317 // For scalable vectors, we only need to close the gap between 2318 // vXf16->vXf64. 2319 if (!VT.isFixedLengthVector()) 2320 return Op; 2321 // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version. 2322 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 2323 return convertFromScalableVector(VT, Src, DAG, Subtarget); 2324 } 2325 2326 MVT InterVT = VT.changeVectorElementType(MVT::f32); 2327 MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32); 2328 SDValue IntermediateExtend = getRVVFPExtendOrRound( 2329 Src, InterVT, InterContainerVT, DL, DAG, Subtarget); 2330 2331 SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT, 2332 DL, DAG, Subtarget); 2333 if (VT.isFixedLengthVector()) 2334 return convertFromScalableVector(VT, Extend, DAG, Subtarget); 2335 return Extend; 2336 } 2337 case ISD::FP_ROUND: { 2338 // RVV can only do fp_round to types half the size as the source. We 2339 // custom-lower f64->f16 rounds via RVV's round-to-odd float 2340 // conversion instruction. 2341 SDLoc DL(Op); 2342 MVT VT = Op.getSimpleValueType(); 2343 SDValue Src = Op.getOperand(0); 2344 MVT SrcVT = Src.getSimpleValueType(); 2345 2346 // Prepare any fixed-length vector operands. 2347 MVT ContainerVT = VT; 2348 if (VT.isFixedLengthVector()) { 2349 MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 2350 ContainerVT = 2351 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 2352 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 2353 } 2354 2355 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 || 2356 SrcVT.getVectorElementType() != MVT::f64) { 2357 // For scalable vectors, we only need to close the gap between 2358 // vXf64<->vXf16. 2359 if (!VT.isFixedLengthVector()) 2360 return Op; 2361 // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version. 2362 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 2363 return convertFromScalableVector(VT, Src, DAG, Subtarget); 2364 } 2365 2366 SDValue Mask, VL; 2367 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2368 2369 MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); 2370 SDValue IntermediateRound = 2371 DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL); 2372 SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT, 2373 DL, DAG, Subtarget); 2374 2375 if (VT.isFixedLengthVector()) 2376 return convertFromScalableVector(VT, Round, DAG, Subtarget); 2377 return Round; 2378 } 2379 case ISD::FP_TO_SINT: 2380 case ISD::FP_TO_UINT: 2381 case ISD::SINT_TO_FP: 2382 case ISD::UINT_TO_FP: { 2383 // RVV can only do fp<->int conversions to types half/double the size as 2384 // the source. We custom-lower any conversions that do two hops into 2385 // sequences. 2386 MVT VT = Op.getSimpleValueType(); 2387 if (!VT.isVector()) 2388 return Op; 2389 SDLoc DL(Op); 2390 SDValue Src = Op.getOperand(0); 2391 MVT EltVT = VT.getVectorElementType(); 2392 MVT SrcVT = Src.getSimpleValueType(); 2393 MVT SrcEltVT = SrcVT.getVectorElementType(); 2394 unsigned EltSize = EltVT.getSizeInBits(); 2395 unsigned SrcEltSize = SrcEltVT.getSizeInBits(); 2396 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && 2397 "Unexpected vector element types"); 2398 2399 bool IsInt2FP = SrcEltVT.isInteger(); 2400 // Widening conversions 2401 if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) { 2402 if (IsInt2FP) { 2403 // Do a regular integer sign/zero extension then convert to float. 2404 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()), 2405 VT.getVectorElementCount()); 2406 unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP 2407 ? ISD::ZERO_EXTEND 2408 : ISD::SIGN_EXTEND; 2409 SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); 2410 return DAG.getNode(Op.getOpcode(), DL, VT, Ext); 2411 } 2412 // FP2Int 2413 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering"); 2414 // Do one doubling fp_extend then complete the operation by converting 2415 // to int. 2416 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 2417 SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); 2418 return DAG.getNode(Op.getOpcode(), DL, VT, FExt); 2419 } 2420 2421 // Narrowing conversions 2422 if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) { 2423 if (IsInt2FP) { 2424 // One narrowing int_to_fp, then an fp_round. 2425 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering"); 2426 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 2427 SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); 2428 return DAG.getFPExtendOrRound(Int2FP, DL, VT); 2429 } 2430 // FP2Int 2431 // One narrowing fp_to_int, then truncate the integer. If the float isn't 2432 // representable by the integer, the result is poison. 2433 MVT IVecVT = 2434 MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2), 2435 VT.getVectorElementCount()); 2436 SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); 2437 return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); 2438 } 2439 2440 // Scalable vectors can exit here. Patterns will handle equally-sized 2441 // conversions halving/doubling ones. 2442 if (!VT.isFixedLengthVector()) 2443 return Op; 2444 2445 // For fixed-length vectors we lower to a custom "VL" node. 2446 unsigned RVVOpc = 0; 2447 switch (Op.getOpcode()) { 2448 default: 2449 llvm_unreachable("Impossible opcode"); 2450 case ISD::FP_TO_SINT: 2451 RVVOpc = RISCVISD::FP_TO_SINT_VL; 2452 break; 2453 case ISD::FP_TO_UINT: 2454 RVVOpc = RISCVISD::FP_TO_UINT_VL; 2455 break; 2456 case ISD::SINT_TO_FP: 2457 RVVOpc = RISCVISD::SINT_TO_FP_VL; 2458 break; 2459 case ISD::UINT_TO_FP: 2460 RVVOpc = RISCVISD::UINT_TO_FP_VL; 2461 break; 2462 } 2463 2464 MVT ContainerVT, SrcContainerVT; 2465 // Derive the reference container type from the larger vector type. 2466 if (SrcEltSize > EltSize) { 2467 SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 2468 ContainerVT = 2469 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 2470 } else { 2471 ContainerVT = getContainerForFixedLengthVector(VT); 2472 SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); 2473 } 2474 2475 SDValue Mask, VL; 2476 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2477 2478 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 2479 Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); 2480 return convertFromScalableVector(VT, Src, DAG, Subtarget); 2481 } 2482 case ISD::VECREDUCE_ADD: 2483 case ISD::VECREDUCE_UMAX: 2484 case ISD::VECREDUCE_SMAX: 2485 case ISD::VECREDUCE_UMIN: 2486 case ISD::VECREDUCE_SMIN: 2487 return lowerVECREDUCE(Op, DAG); 2488 case ISD::VECREDUCE_AND: 2489 case ISD::VECREDUCE_OR: 2490 case ISD::VECREDUCE_XOR: 2491 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 2492 return lowerVectorMaskVECREDUCE(Op, DAG); 2493 return lowerVECREDUCE(Op, DAG); 2494 case ISD::VECREDUCE_FADD: 2495 case ISD::VECREDUCE_SEQ_FADD: 2496 case ISD::VECREDUCE_FMIN: 2497 case ISD::VECREDUCE_FMAX: 2498 return lowerFPVECREDUCE(Op, DAG); 2499 case ISD::INSERT_SUBVECTOR: 2500 return lowerINSERT_SUBVECTOR(Op, DAG); 2501 case ISD::EXTRACT_SUBVECTOR: 2502 return lowerEXTRACT_SUBVECTOR(Op, DAG); 2503 case ISD::STEP_VECTOR: 2504 return lowerSTEP_VECTOR(Op, DAG); 2505 case ISD::VECTOR_REVERSE: 2506 return lowerVECTOR_REVERSE(Op, DAG); 2507 case ISD::BUILD_VECTOR: 2508 return lowerBUILD_VECTOR(Op, DAG, Subtarget); 2509 case ISD::SPLAT_VECTOR: 2510 if (Op.getValueType().getVectorElementType() == MVT::i1) 2511 return lowerVectorMaskSplat(Op, DAG); 2512 return lowerSPLAT_VECTOR(Op, DAG, Subtarget); 2513 case ISD::VECTOR_SHUFFLE: 2514 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 2515 case ISD::CONCAT_VECTORS: { 2516 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is 2517 // better than going through the stack, as the default expansion does. 2518 SDLoc DL(Op); 2519 MVT VT = Op.getSimpleValueType(); 2520 unsigned NumOpElts = 2521 Op.getOperand(0).getSimpleValueType().getVectorMinNumElements(); 2522 SDValue Vec = DAG.getUNDEF(VT); 2523 for (const auto &OpIdx : enumerate(Op->ops())) 2524 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), 2525 DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); 2526 return Vec; 2527 } 2528 case ISD::LOAD: 2529 if (auto V = expandUnalignedRVVLoad(Op, DAG)) 2530 return V; 2531 if (Op.getValueType().isFixedLengthVector()) 2532 return lowerFixedLengthVectorLoadToRVV(Op, DAG); 2533 return Op; 2534 case ISD::STORE: 2535 if (auto V = expandUnalignedRVVStore(Op, DAG)) 2536 return V; 2537 if (Op.getOperand(1).getValueType().isFixedLengthVector()) 2538 return lowerFixedLengthVectorStoreToRVV(Op, DAG); 2539 return Op; 2540 case ISD::MLOAD: 2541 return lowerMLOAD(Op, DAG); 2542 case ISD::MSTORE: 2543 return lowerMSTORE(Op, DAG); 2544 case ISD::SETCC: 2545 return lowerFixedLengthVectorSetccToRVV(Op, DAG); 2546 case ISD::ADD: 2547 return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); 2548 case ISD::SUB: 2549 return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); 2550 case ISD::MUL: 2551 return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); 2552 case ISD::MULHS: 2553 return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); 2554 case ISD::MULHU: 2555 return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); 2556 case ISD::AND: 2557 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, 2558 RISCVISD::AND_VL); 2559 case ISD::OR: 2560 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, 2561 RISCVISD::OR_VL); 2562 case ISD::XOR: 2563 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, 2564 RISCVISD::XOR_VL); 2565 case ISD::SDIV: 2566 return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); 2567 case ISD::SREM: 2568 return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); 2569 case ISD::UDIV: 2570 return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); 2571 case ISD::UREM: 2572 return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); 2573 case ISD::SHL: 2574 case ISD::SRA: 2575 case ISD::SRL: 2576 if (Op.getSimpleValueType().isFixedLengthVector()) 2577 return lowerFixedLengthVectorShiftToRVV(Op, DAG); 2578 // This can be called for an i32 shift amount that needs to be promoted. 2579 assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() && 2580 "Unexpected custom legalisation"); 2581 return SDValue(); 2582 case ISD::SADDSAT: 2583 return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL); 2584 case ISD::UADDSAT: 2585 return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL); 2586 case ISD::SSUBSAT: 2587 return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL); 2588 case ISD::USUBSAT: 2589 return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL); 2590 case ISD::FADD: 2591 return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); 2592 case ISD::FSUB: 2593 return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); 2594 case ISD::FMUL: 2595 return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); 2596 case ISD::FDIV: 2597 return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); 2598 case ISD::FNEG: 2599 return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); 2600 case ISD::FABS: 2601 return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); 2602 case ISD::FSQRT: 2603 return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); 2604 case ISD::FMA: 2605 return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); 2606 case ISD::SMIN: 2607 return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); 2608 case ISD::SMAX: 2609 return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); 2610 case ISD::UMIN: 2611 return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); 2612 case ISD::UMAX: 2613 return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); 2614 case ISD::FMINNUM: 2615 return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL); 2616 case ISD::FMAXNUM: 2617 return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL); 2618 case ISD::ABS: 2619 return lowerABS(Op, DAG); 2620 case ISD::VSELECT: 2621 return lowerFixedLengthVectorSelectToRVV(Op, DAG); 2622 case ISD::FCOPYSIGN: 2623 return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); 2624 case ISD::MGATHER: 2625 return lowerMGATHER(Op, DAG); 2626 case ISD::MSCATTER: 2627 return lowerMSCATTER(Op, DAG); 2628 case ISD::FLT_ROUNDS_: 2629 return lowerGET_ROUNDING(Op, DAG); 2630 case ISD::SET_ROUNDING: 2631 return lowerSET_ROUNDING(Op, DAG); 2632 case ISD::VP_ADD: 2633 return lowerVPOp(Op, DAG, RISCVISD::ADD_VL); 2634 case ISD::VP_SUB: 2635 return lowerVPOp(Op, DAG, RISCVISD::SUB_VL); 2636 case ISD::VP_MUL: 2637 return lowerVPOp(Op, DAG, RISCVISD::MUL_VL); 2638 case ISD::VP_SDIV: 2639 return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL); 2640 case ISD::VP_UDIV: 2641 return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL); 2642 case ISD::VP_SREM: 2643 return lowerVPOp(Op, DAG, RISCVISD::SREM_VL); 2644 case ISD::VP_UREM: 2645 return lowerVPOp(Op, DAG, RISCVISD::UREM_VL); 2646 case ISD::VP_AND: 2647 return lowerVPOp(Op, DAG, RISCVISD::AND_VL); 2648 case ISD::VP_OR: 2649 return lowerVPOp(Op, DAG, RISCVISD::OR_VL); 2650 case ISD::VP_XOR: 2651 return lowerVPOp(Op, DAG, RISCVISD::XOR_VL); 2652 case ISD::VP_ASHR: 2653 return lowerVPOp(Op, DAG, RISCVISD::SRA_VL); 2654 case ISD::VP_LSHR: 2655 return lowerVPOp(Op, DAG, RISCVISD::SRL_VL); 2656 case ISD::VP_SHL: 2657 return lowerVPOp(Op, DAG, RISCVISD::SHL_VL); 2658 case ISD::VP_FADD: 2659 return lowerVPOp(Op, DAG, RISCVISD::FADD_VL); 2660 case ISD::VP_FSUB: 2661 return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL); 2662 case ISD::VP_FMUL: 2663 return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL); 2664 case ISD::VP_FDIV: 2665 return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL); 2666 } 2667 } 2668 2669 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 2670 SelectionDAG &DAG, unsigned Flags) { 2671 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 2672 } 2673 2674 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 2675 SelectionDAG &DAG, unsigned Flags) { 2676 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 2677 Flags); 2678 } 2679 2680 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 2681 SelectionDAG &DAG, unsigned Flags) { 2682 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 2683 N->getOffset(), Flags); 2684 } 2685 2686 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 2687 SelectionDAG &DAG, unsigned Flags) { 2688 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 2689 } 2690 2691 template <class NodeTy> 2692 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 2693 bool IsLocal) const { 2694 SDLoc DL(N); 2695 EVT Ty = getPointerTy(DAG.getDataLayout()); 2696 2697 if (isPositionIndependent()) { 2698 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 2699 if (IsLocal) 2700 // Use PC-relative addressing to access the symbol. This generates the 2701 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 2702 // %pcrel_lo(auipc)). 2703 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 2704 2705 // Use PC-relative addressing to access the GOT for this symbol, then load 2706 // the address from the GOT. This generates the pattern (PseudoLA sym), 2707 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 2708 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 2709 } 2710 2711 switch (getTargetMachine().getCodeModel()) { 2712 default: 2713 report_fatal_error("Unsupported code model for lowering"); 2714 case CodeModel::Small: { 2715 // Generate a sequence for accessing addresses within the first 2 GiB of 2716 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 2717 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 2718 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 2719 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 2720 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 2721 } 2722 case CodeModel::Medium: { 2723 // Generate a sequence for accessing addresses within any 2GiB range within 2724 // the address space. This generates the pattern (PseudoLLA sym), which 2725 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 2726 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 2727 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 2728 } 2729 } 2730 } 2731 2732 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 2733 SelectionDAG &DAG) const { 2734 SDLoc DL(Op); 2735 EVT Ty = Op.getValueType(); 2736 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 2737 int64_t Offset = N->getOffset(); 2738 MVT XLenVT = Subtarget.getXLenVT(); 2739 2740 const GlobalValue *GV = N->getGlobal(); 2741 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 2742 SDValue Addr = getAddr(N, DAG, IsLocal); 2743 2744 // In order to maximise the opportunity for common subexpression elimination, 2745 // emit a separate ADD node for the global address offset instead of folding 2746 // it in the global address node. Later peephole optimisations may choose to 2747 // fold it back in when profitable. 2748 if (Offset != 0) 2749 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 2750 DAG.getConstant(Offset, DL, XLenVT)); 2751 return Addr; 2752 } 2753 2754 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 2755 SelectionDAG &DAG) const { 2756 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 2757 2758 return getAddr(N, DAG); 2759 } 2760 2761 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 2762 SelectionDAG &DAG) const { 2763 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 2764 2765 return getAddr(N, DAG); 2766 } 2767 2768 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, 2769 SelectionDAG &DAG) const { 2770 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 2771 2772 return getAddr(N, DAG); 2773 } 2774 2775 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 2776 SelectionDAG &DAG, 2777 bool UseGOT) const { 2778 SDLoc DL(N); 2779 EVT Ty = getPointerTy(DAG.getDataLayout()); 2780 const GlobalValue *GV = N->getGlobal(); 2781 MVT XLenVT = Subtarget.getXLenVT(); 2782 2783 if (UseGOT) { 2784 // Use PC-relative addressing to access the GOT for this TLS symbol, then 2785 // load the address from the GOT and add the thread pointer. This generates 2786 // the pattern (PseudoLA_TLS_IE sym), which expands to 2787 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 2788 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 2789 SDValue Load = 2790 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 2791 2792 // Add the thread pointer. 2793 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 2794 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 2795 } 2796 2797 // Generate a sequence for accessing the address relative to the thread 2798 // pointer, with the appropriate adjustment for the thread pointer offset. 2799 // This generates the pattern 2800 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 2801 SDValue AddrHi = 2802 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 2803 SDValue AddrAdd = 2804 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 2805 SDValue AddrLo = 2806 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 2807 2808 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 2809 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 2810 SDValue MNAdd = SDValue( 2811 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 2812 0); 2813 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 2814 } 2815 2816 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 2817 SelectionDAG &DAG) const { 2818 SDLoc DL(N); 2819 EVT Ty = getPointerTy(DAG.getDataLayout()); 2820 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 2821 const GlobalValue *GV = N->getGlobal(); 2822 2823 // Use a PC-relative addressing mode to access the global dynamic GOT address. 2824 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 2825 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 2826 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 2827 SDValue Load = 2828 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 2829 2830 // Prepare argument list to generate call. 2831 ArgListTy Args; 2832 ArgListEntry Entry; 2833 Entry.Node = Load; 2834 Entry.Ty = CallTy; 2835 Args.push_back(Entry); 2836 2837 // Setup call to __tls_get_addr. 2838 TargetLowering::CallLoweringInfo CLI(DAG); 2839 CLI.setDebugLoc(DL) 2840 .setChain(DAG.getEntryNode()) 2841 .setLibCallee(CallingConv::C, CallTy, 2842 DAG.getExternalSymbol("__tls_get_addr", Ty), 2843 std::move(Args)); 2844 2845 return LowerCallTo(CLI).first; 2846 } 2847 2848 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 2849 SelectionDAG &DAG) const { 2850 SDLoc DL(Op); 2851 EVT Ty = Op.getValueType(); 2852 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 2853 int64_t Offset = N->getOffset(); 2854 MVT XLenVT = Subtarget.getXLenVT(); 2855 2856 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 2857 2858 if (DAG.getMachineFunction().getFunction().getCallingConv() == 2859 CallingConv::GHC) 2860 report_fatal_error("In GHC calling convention TLS is not supported"); 2861 2862 SDValue Addr; 2863 switch (Model) { 2864 case TLSModel::LocalExec: 2865 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 2866 break; 2867 case TLSModel::InitialExec: 2868 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 2869 break; 2870 case TLSModel::LocalDynamic: 2871 case TLSModel::GeneralDynamic: 2872 Addr = getDynamicTLSAddr(N, DAG); 2873 break; 2874 } 2875 2876 // In order to maximise the opportunity for common subexpression elimination, 2877 // emit a separate ADD node for the global address offset instead of folding 2878 // it in the global address node. Later peephole optimisations may choose to 2879 // fold it back in when profitable. 2880 if (Offset != 0) 2881 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 2882 DAG.getConstant(Offset, DL, XLenVT)); 2883 return Addr; 2884 } 2885 2886 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2887 SDValue CondV = Op.getOperand(0); 2888 SDValue TrueV = Op.getOperand(1); 2889 SDValue FalseV = Op.getOperand(2); 2890 SDLoc DL(Op); 2891 MVT VT = Op.getSimpleValueType(); 2892 MVT XLenVT = Subtarget.getXLenVT(); 2893 2894 // Lower vector SELECTs to VSELECTs by splatting the condition. 2895 if (VT.isVector()) { 2896 MVT SplatCondVT = VT.changeVectorElementType(MVT::i1); 2897 SDValue CondSplat = VT.isScalableVector() 2898 ? DAG.getSplatVector(SplatCondVT, DL, CondV) 2899 : DAG.getSplatBuildVector(SplatCondVT, DL, CondV); 2900 return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV); 2901 } 2902 2903 // If the result type is XLenVT and CondV is the output of a SETCC node 2904 // which also operated on XLenVT inputs, then merge the SETCC node into the 2905 // lowered RISCVISD::SELECT_CC to take advantage of the integer 2906 // compare+branch instructions. i.e.: 2907 // (select (setcc lhs, rhs, cc), truev, falsev) 2908 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 2909 if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC && 2910 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 2911 SDValue LHS = CondV.getOperand(0); 2912 SDValue RHS = CondV.getOperand(1); 2913 const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 2914 ISD::CondCode CCVal = CC->get(); 2915 2916 // Special case for a select of 2 constants that have a diffence of 1. 2917 // Normally this is done by DAGCombine, but if the select is introduced by 2918 // type legalization or op legalization, we miss it. Restricting to SETLT 2919 // case for now because that is what signed saturating add/sub need. 2920 // FIXME: We don't need the condition to be SETLT or even a SETCC, 2921 // but we would probably want to swap the true/false values if the condition 2922 // is SETGE/SETLE to avoid an XORI. 2923 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) && 2924 CCVal == ISD::SETLT) { 2925 const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue(); 2926 const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue(); 2927 if (TrueVal - 1 == FalseVal) 2928 return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV); 2929 if (TrueVal + 1 == FalseVal) 2930 return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV); 2931 } 2932 2933 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 2934 2935 SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT); 2936 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 2937 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 2938 } 2939 2940 // Otherwise: 2941 // (select condv, truev, falsev) 2942 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 2943 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 2944 SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT); 2945 2946 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 2947 2948 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 2949 } 2950 2951 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 2952 SDValue CondV = Op.getOperand(1); 2953 SDLoc DL(Op); 2954 MVT XLenVT = Subtarget.getXLenVT(); 2955 2956 if (CondV.getOpcode() == ISD::SETCC && 2957 CondV.getOperand(0).getValueType() == XLenVT) { 2958 SDValue LHS = CondV.getOperand(0); 2959 SDValue RHS = CondV.getOperand(1); 2960 ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get(); 2961 2962 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 2963 2964 SDValue TargetCC = DAG.getCondCode(CCVal); 2965 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 2966 LHS, RHS, TargetCC, Op.getOperand(2)); 2967 } 2968 2969 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 2970 CondV, DAG.getConstant(0, DL, XLenVT), 2971 DAG.getCondCode(ISD::SETNE), Op.getOperand(2)); 2972 } 2973 2974 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2975 MachineFunction &MF = DAG.getMachineFunction(); 2976 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 2977 2978 SDLoc DL(Op); 2979 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2980 getPointerTy(MF.getDataLayout())); 2981 2982 // vastart just stores the address of the VarArgsFrameIndex slot into the 2983 // memory location argument. 2984 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2985 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 2986 MachinePointerInfo(SV)); 2987 } 2988 2989 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 2990 SelectionDAG &DAG) const { 2991 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 2992 MachineFunction &MF = DAG.getMachineFunction(); 2993 MachineFrameInfo &MFI = MF.getFrameInfo(); 2994 MFI.setFrameAddressIsTaken(true); 2995 Register FrameReg = RI.getFrameRegister(MF); 2996 int XLenInBytes = Subtarget.getXLen() / 8; 2997 2998 EVT VT = Op.getValueType(); 2999 SDLoc DL(Op); 3000 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 3001 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3002 while (Depth--) { 3003 int Offset = -(XLenInBytes * 2); 3004 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 3005 DAG.getIntPtrConstant(Offset, DL)); 3006 FrameAddr = 3007 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 3008 } 3009 return FrameAddr; 3010 } 3011 3012 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 3013 SelectionDAG &DAG) const { 3014 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 3015 MachineFunction &MF = DAG.getMachineFunction(); 3016 MachineFrameInfo &MFI = MF.getFrameInfo(); 3017 MFI.setReturnAddressIsTaken(true); 3018 MVT XLenVT = Subtarget.getXLenVT(); 3019 int XLenInBytes = Subtarget.getXLen() / 8; 3020 3021 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 3022 return SDValue(); 3023 3024 EVT VT = Op.getValueType(); 3025 SDLoc DL(Op); 3026 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3027 if (Depth) { 3028 int Off = -XLenInBytes; 3029 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 3030 SDValue Offset = DAG.getConstant(Off, DL, VT); 3031 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 3032 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 3033 MachinePointerInfo()); 3034 } 3035 3036 // Return the value of the return address register, marking it an implicit 3037 // live-in. 3038 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 3039 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 3040 } 3041 3042 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 3043 SelectionDAG &DAG) const { 3044 SDLoc DL(Op); 3045 SDValue Lo = Op.getOperand(0); 3046 SDValue Hi = Op.getOperand(1); 3047 SDValue Shamt = Op.getOperand(2); 3048 EVT VT = Lo.getValueType(); 3049 3050 // if Shamt-XLEN < 0: // Shamt < XLEN 3051 // Lo = Lo << Shamt 3052 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 3053 // else: 3054 // Lo = 0 3055 // Hi = Lo << (Shamt-XLEN) 3056 3057 SDValue Zero = DAG.getConstant(0, DL, VT); 3058 SDValue One = DAG.getConstant(1, DL, VT); 3059 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 3060 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 3061 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 3062 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 3063 3064 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 3065 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 3066 SDValue ShiftRightLo = 3067 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 3068 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 3069 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 3070 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 3071 3072 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 3073 3074 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 3075 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 3076 3077 SDValue Parts[2] = {Lo, Hi}; 3078 return DAG.getMergeValues(Parts, DL); 3079 } 3080 3081 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 3082 bool IsSRA) const { 3083 SDLoc DL(Op); 3084 SDValue Lo = Op.getOperand(0); 3085 SDValue Hi = Op.getOperand(1); 3086 SDValue Shamt = Op.getOperand(2); 3087 EVT VT = Lo.getValueType(); 3088 3089 // SRA expansion: 3090 // if Shamt-XLEN < 0: // Shamt < XLEN 3091 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 3092 // Hi = Hi >>s Shamt 3093 // else: 3094 // Lo = Hi >>s (Shamt-XLEN); 3095 // Hi = Hi >>s (XLEN-1) 3096 // 3097 // SRL expansion: 3098 // if Shamt-XLEN < 0: // Shamt < XLEN 3099 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 3100 // Hi = Hi >>u Shamt 3101 // else: 3102 // Lo = Hi >>u (Shamt-XLEN); 3103 // Hi = 0; 3104 3105 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 3106 3107 SDValue Zero = DAG.getConstant(0, DL, VT); 3108 SDValue One = DAG.getConstant(1, DL, VT); 3109 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 3110 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 3111 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 3112 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 3113 3114 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 3115 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 3116 SDValue ShiftLeftHi = 3117 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 3118 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 3119 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 3120 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 3121 SDValue HiFalse = 3122 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 3123 3124 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 3125 3126 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 3127 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 3128 3129 SDValue Parts[2] = {Lo, Hi}; 3130 return DAG.getMergeValues(Parts, DL); 3131 } 3132 3133 // Lower splats of i1 types to SETCC. For each mask vector type, we have a 3134 // legal equivalently-sized i8 type, so we can use that as a go-between. 3135 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op, 3136 SelectionDAG &DAG) const { 3137 SDLoc DL(Op); 3138 MVT VT = Op.getSimpleValueType(); 3139 SDValue SplatVal = Op.getOperand(0); 3140 // All-zeros or all-ones splats are handled specially. 3141 if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) { 3142 SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second; 3143 return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL); 3144 } 3145 if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) { 3146 SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second; 3147 return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL); 3148 } 3149 MVT XLenVT = Subtarget.getXLenVT(); 3150 assert(SplatVal.getValueType() == XLenVT && 3151 "Unexpected type for i1 splat value"); 3152 MVT InterVT = VT.changeVectorElementType(MVT::i8); 3153 SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal, 3154 DAG.getConstant(1, DL, XLenVT)); 3155 SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal); 3156 SDValue Zero = DAG.getConstant(0, DL, InterVT); 3157 return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE); 3158 } 3159 3160 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is 3161 // illegal (currently only vXi64 RV32). 3162 // FIXME: We could also catch non-constant sign-extended i32 values and lower 3163 // them to SPLAT_VECTOR_I64 3164 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, 3165 SelectionDAG &DAG) const { 3166 SDLoc DL(Op); 3167 MVT VecVT = Op.getSimpleValueType(); 3168 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && 3169 "Unexpected SPLAT_VECTOR_PARTS lowering"); 3170 3171 assert(Op.getNumOperands() == 2 && "Unexpected number of operands!"); 3172 SDValue Lo = Op.getOperand(0); 3173 SDValue Hi = Op.getOperand(1); 3174 3175 if (VecVT.isFixedLengthVector()) { 3176 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 3177 SDLoc DL(Op); 3178 SDValue Mask, VL; 3179 std::tie(Mask, VL) = 3180 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3181 3182 SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG); 3183 return convertFromScalableVector(VecVT, Res, DAG, Subtarget); 3184 } 3185 3186 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { 3187 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); 3188 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); 3189 // If Hi constant is all the same sign bit as Lo, lower this as a custom 3190 // node in order to try and match RVV vector/scalar instructions. 3191 if ((LoC >> 31) == HiC) 3192 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 3193 } 3194 3195 // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended. 3196 if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo && 3197 isa<ConstantSDNode>(Hi.getOperand(1)) && 3198 Hi.getConstantOperandVal(1) == 31) 3199 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 3200 3201 // Fall back to use a stack store and stride x0 vector load. Use X0 as VL. 3202 return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi, 3203 DAG.getRegister(RISCV::X0, MVT::i64)); 3204 } 3205 3206 // Custom-lower extensions from mask vectors by using a vselect either with 1 3207 // for zero/any-extension or -1 for sign-extension: 3208 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) 3209 // Note that any-extension is lowered identically to zero-extension. 3210 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, 3211 int64_t ExtTrueVal) const { 3212 SDLoc DL(Op); 3213 MVT VecVT = Op.getSimpleValueType(); 3214 SDValue Src = Op.getOperand(0); 3215 // Only custom-lower extensions from mask types 3216 assert(Src.getValueType().isVector() && 3217 Src.getValueType().getVectorElementType() == MVT::i1); 3218 3219 MVT XLenVT = Subtarget.getXLenVT(); 3220 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); 3221 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); 3222 3223 if (VecVT.isScalableVector()) { 3224 // Be careful not to introduce illegal scalar types at this stage, and be 3225 // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is 3226 // illegal and must be expanded. Since we know that the constants are 3227 // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. 3228 bool IsRV32E64 = 3229 !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; 3230 3231 if (!IsRV32E64) { 3232 SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); 3233 SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); 3234 } else { 3235 SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); 3236 SplatTrueVal = 3237 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); 3238 } 3239 3240 return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); 3241 } 3242 3243 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 3244 MVT I1ContainerVT = 3245 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3246 3247 SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); 3248 3249 SDValue Mask, VL; 3250 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3251 3252 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); 3253 SplatTrueVal = 3254 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); 3255 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, 3256 SplatTrueVal, SplatZero, VL); 3257 3258 return convertFromScalableVector(VecVT, Select, DAG, Subtarget); 3259 } 3260 3261 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( 3262 SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { 3263 MVT ExtVT = Op.getSimpleValueType(); 3264 // Only custom-lower extensions from fixed-length vector types. 3265 if (!ExtVT.isFixedLengthVector()) 3266 return Op; 3267 MVT VT = Op.getOperand(0).getSimpleValueType(); 3268 // Grab the canonical container type for the extended type. Infer the smaller 3269 // type from that to ensure the same number of vector elements, as we know 3270 // the LMUL will be sufficient to hold the smaller type. 3271 MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); 3272 // Get the extended container type manually to ensure the same number of 3273 // vector elements between source and dest. 3274 MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 3275 ContainerExtVT.getVectorElementCount()); 3276 3277 SDValue Op1 = 3278 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 3279 3280 SDLoc DL(Op); 3281 SDValue Mask, VL; 3282 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3283 3284 SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); 3285 3286 return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); 3287 } 3288 3289 // Custom-lower truncations from vectors to mask vectors by using a mask and a 3290 // setcc operation: 3291 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) 3292 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, 3293 SelectionDAG &DAG) const { 3294 SDLoc DL(Op); 3295 EVT MaskVT = Op.getValueType(); 3296 // Only expect to custom-lower truncations to mask types 3297 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && 3298 "Unexpected type for vector mask lowering"); 3299 SDValue Src = Op.getOperand(0); 3300 MVT VecVT = Src.getSimpleValueType(); 3301 3302 // If this is a fixed vector, we need to convert it to a scalable vector. 3303 MVT ContainerVT = VecVT; 3304 if (VecVT.isFixedLengthVector()) { 3305 ContainerVT = getContainerForFixedLengthVector(VecVT); 3306 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 3307 } 3308 3309 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); 3310 SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 3311 3312 SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); 3313 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); 3314 3315 if (VecVT.isScalableVector()) { 3316 SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); 3317 return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); 3318 } 3319 3320 SDValue Mask, VL; 3321 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3322 3323 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 3324 SDValue Trunc = 3325 DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); 3326 Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, 3327 DAG.getCondCode(ISD::SETNE), Mask, VL); 3328 return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); 3329 } 3330 3331 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the 3332 // first position of a vector, and that vector is slid up to the insert index. 3333 // By limiting the active vector length to index+1 and merging with the 3334 // original vector (with an undisturbed tail policy for elements >= VL), we 3335 // achieve the desired result of leaving all elements untouched except the one 3336 // at VL-1, which is replaced with the desired value. 3337 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 3338 SelectionDAG &DAG) const { 3339 SDLoc DL(Op); 3340 MVT VecVT = Op.getSimpleValueType(); 3341 SDValue Vec = Op.getOperand(0); 3342 SDValue Val = Op.getOperand(1); 3343 SDValue Idx = Op.getOperand(2); 3344 3345 if (VecVT.getVectorElementType() == MVT::i1) { 3346 // FIXME: For now we just promote to an i8 vector and insert into that, 3347 // but this is probably not optimal. 3348 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); 3349 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); 3350 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx); 3351 return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec); 3352 } 3353 3354 MVT ContainerVT = VecVT; 3355 // If the operand is a fixed-length vector, convert to a scalable one. 3356 if (VecVT.isFixedLengthVector()) { 3357 ContainerVT = getContainerForFixedLengthVector(VecVT); 3358 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3359 } 3360 3361 MVT XLenVT = Subtarget.getXLenVT(); 3362 3363 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 3364 bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64; 3365 // Even i64-element vectors on RV32 can be lowered without scalar 3366 // legalization if the most-significant 32 bits of the value are not affected 3367 // by the sign-extension of the lower 32 bits. 3368 // TODO: We could also catch sign extensions of a 32-bit value. 3369 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) { 3370 const auto *CVal = cast<ConstantSDNode>(Val); 3371 if (isInt<32>(CVal->getSExtValue())) { 3372 IsLegalInsert = true; 3373 Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 3374 } 3375 } 3376 3377 SDValue Mask, VL; 3378 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3379 3380 SDValue ValInVec; 3381 3382 if (IsLegalInsert) { 3383 unsigned Opc = 3384 VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL; 3385 if (isNullConstant(Idx)) { 3386 Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL); 3387 if (!VecVT.isFixedLengthVector()) 3388 return Vec; 3389 return convertFromScalableVector(VecVT, Vec, DAG, Subtarget); 3390 } 3391 ValInVec = 3392 DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL); 3393 } else { 3394 // On RV32, i64-element vectors must be specially handled to place the 3395 // value at element 0, by using two vslide1up instructions in sequence on 3396 // the i32 split lo/hi value. Use an equivalently-sized i32 vector for 3397 // this. 3398 SDValue One = DAG.getConstant(1, DL, XLenVT); 3399 SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero); 3400 SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One); 3401 MVT I32ContainerVT = 3402 MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2); 3403 SDValue I32Mask = 3404 getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first; 3405 // Limit the active VL to two. 3406 SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT); 3407 // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied 3408 // undef doesn't obey the earlyclobber constraint. Just splat a zero value. 3409 ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero, 3410 InsertI64VL); 3411 // First slide in the hi value, then the lo in underneath it. 3412 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, 3413 ValHi, I32Mask, InsertI64VL); 3414 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec, 3415 ValLo, I32Mask, InsertI64VL); 3416 // Bitcast back to the right container type. 3417 ValInVec = DAG.getBitcast(ContainerVT, ValInVec); 3418 } 3419 3420 // Now that the value is in a vector, slide it into position. 3421 SDValue InsertVL = 3422 DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); 3423 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 3424 ValInVec, Idx, Mask, InsertVL); 3425 if (!VecVT.isFixedLengthVector()) 3426 return Slideup; 3427 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 3428 } 3429 3430 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then 3431 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer 3432 // types this is done using VMV_X_S to allow us to glean information about the 3433 // sign bits of the result. 3434 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 3435 SelectionDAG &DAG) const { 3436 SDLoc DL(Op); 3437 SDValue Idx = Op.getOperand(1); 3438 SDValue Vec = Op.getOperand(0); 3439 EVT EltVT = Op.getValueType(); 3440 MVT VecVT = Vec.getSimpleValueType(); 3441 MVT XLenVT = Subtarget.getXLenVT(); 3442 3443 if (VecVT.getVectorElementType() == MVT::i1) { 3444 // FIXME: For now we just promote to an i8 vector and extract from that, 3445 // but this is probably not optimal. 3446 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); 3447 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); 3448 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx); 3449 } 3450 3451 // If this is a fixed vector, we need to convert it to a scalable vector. 3452 MVT ContainerVT = VecVT; 3453 if (VecVT.isFixedLengthVector()) { 3454 ContainerVT = getContainerForFixedLengthVector(VecVT); 3455 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3456 } 3457 3458 // If the index is 0, the vector is already in the right position. 3459 if (!isNullConstant(Idx)) { 3460 // Use a VL of 1 to avoid processing more elements than we need. 3461 SDValue VL = DAG.getConstant(1, DL, XLenVT); 3462 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 3463 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3464 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 3465 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 3466 } 3467 3468 if (!EltVT.isInteger()) { 3469 // Floating-point extracts are handled in TableGen. 3470 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, 3471 DAG.getConstant(0, DL, XLenVT)); 3472 } 3473 3474 SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 3475 return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); 3476 } 3477 3478 // Some RVV intrinsics may claim that they want an integer operand to be 3479 // promoted or expanded. 3480 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG, 3481 const RISCVSubtarget &Subtarget) { 3482 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3483 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && 3484 "Unexpected opcode"); 3485 3486 if (!Subtarget.hasStdExtV()) 3487 return SDValue(); 3488 3489 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; 3490 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); 3491 SDLoc DL(Op); 3492 3493 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 3494 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); 3495 if (!II || !II->SplatOperand) 3496 return SDValue(); 3497 3498 unsigned SplatOp = II->SplatOperand + HasChain; 3499 assert(SplatOp < Op.getNumOperands()); 3500 3501 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 3502 SDValue &ScalarOp = Operands[SplatOp]; 3503 MVT OpVT = ScalarOp.getSimpleValueType(); 3504 MVT XLenVT = Subtarget.getXLenVT(); 3505 3506 // If this isn't a scalar, or its type is XLenVT we're done. 3507 if (!OpVT.isScalarInteger() || OpVT == XLenVT) 3508 return SDValue(); 3509 3510 // Simplest case is that the operand needs to be promoted to XLenVT. 3511 if (OpVT.bitsLT(XLenVT)) { 3512 // If the operand is a constant, sign extend to increase our chances 3513 // of being able to use a .vi instruction. ANY_EXTEND would become a 3514 // a zero extend and the simm5 check in isel would fail. 3515 // FIXME: Should we ignore the upper bits in isel instead? 3516 unsigned ExtOpc = 3517 isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 3518 ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp); 3519 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 3520 } 3521 3522 // Use the previous operand to get the vXi64 VT. The result might be a mask 3523 // VT for compares. Using the previous operand assumes that the previous 3524 // operand will never have a smaller element size than a scalar operand and 3525 // that a widening operation never uses SEW=64. 3526 // NOTE: If this fails the below assert, we can probably just find the 3527 // element count from any operand or result and use it to construct the VT. 3528 assert(II->SplatOperand > 1 && "Unexpected splat operand!"); 3529 MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType(); 3530 3531 // The more complex case is when the scalar is larger than XLenVT. 3532 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 && 3533 VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!"); 3534 3535 // If this is a sign-extended 32-bit constant, we can truncate it and rely 3536 // on the instruction to sign-extend since SEW>XLEN. 3537 if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) { 3538 if (isInt<32>(CVal->getSExtValue())) { 3539 ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 3540 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 3541 } 3542 } 3543 3544 // We need to convert the scalar to a splat vector. 3545 // FIXME: Can we implicitly truncate the scalar if it is known to 3546 // be sign extended? 3547 // VL should be the last operand. 3548 SDValue VL = Op.getOperand(Op.getNumOperands() - 1); 3549 assert(VL.getValueType() == XLenVT); 3550 ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG); 3551 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 3552 } 3553 3554 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 3555 SelectionDAG &DAG) const { 3556 unsigned IntNo = Op.getConstantOperandVal(0); 3557 SDLoc DL(Op); 3558 MVT XLenVT = Subtarget.getXLenVT(); 3559 3560 switch (IntNo) { 3561 default: 3562 break; // Don't custom lower most intrinsics. 3563 case Intrinsic::thread_pointer: { 3564 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3565 return DAG.getRegister(RISCV::X4, PtrVT); 3566 } 3567 case Intrinsic::riscv_orc_b: 3568 // Lower to the GORCI encoding for orc.b. 3569 return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1), 3570 DAG.getConstant(7, DL, XLenVT)); 3571 case Intrinsic::riscv_grev: 3572 case Intrinsic::riscv_gorc: { 3573 unsigned Opc = 3574 IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC; 3575 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); 3576 } 3577 case Intrinsic::riscv_shfl: 3578 case Intrinsic::riscv_unshfl: { 3579 unsigned Opc = 3580 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL; 3581 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); 3582 } 3583 case Intrinsic::riscv_bcompress: 3584 case Intrinsic::riscv_bdecompress: { 3585 unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS 3586 : RISCVISD::BDECOMPRESS; 3587 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); 3588 } 3589 case Intrinsic::riscv_vmv_x_s: 3590 assert(Op.getValueType() == XLenVT && "Unexpected VT!"); 3591 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), 3592 Op.getOperand(1)); 3593 case Intrinsic::riscv_vmv_v_x: 3594 return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2), 3595 Op.getSimpleValueType(), DL, DAG, Subtarget); 3596 case Intrinsic::riscv_vfmv_v_f: 3597 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), 3598 Op.getOperand(1), Op.getOperand(2)); 3599 case Intrinsic::riscv_vmv_s_x: { 3600 SDValue Scalar = Op.getOperand(2); 3601 3602 if (Scalar.getValueType().bitsLE(XLenVT)) { 3603 Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar); 3604 return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(), 3605 Op.getOperand(1), Scalar, Op.getOperand(3)); 3606 } 3607 3608 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!"); 3609 3610 // This is an i64 value that lives in two scalar registers. We have to 3611 // insert this in a convoluted way. First we build vXi64 splat containing 3612 // the/ two values that we assemble using some bit math. Next we'll use 3613 // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask 3614 // to merge element 0 from our splat into the source vector. 3615 // FIXME: This is probably not the best way to do this, but it is 3616 // consistent with INSERT_VECTOR_ELT lowering so it is a good starting 3617 // point. 3618 // sw lo, (a0) 3619 // sw hi, 4(a0) 3620 // vlse vX, (a0) 3621 // 3622 // vid.v vVid 3623 // vmseq.vx mMask, vVid, 0 3624 // vmerge.vvm vDest, vSrc, vVal, mMask 3625 MVT VT = Op.getSimpleValueType(); 3626 SDValue Vec = Op.getOperand(1); 3627 SDValue VL = Op.getOperand(3); 3628 3629 SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG); 3630 SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, 3631 DAG.getConstant(0, DL, MVT::i32), VL); 3632 3633 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 3634 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3635 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 3636 SDValue SelectCond = 3637 DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx, 3638 DAG.getCondCode(ISD::SETEQ), Mask, VL); 3639 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal, 3640 Vec, VL); 3641 } 3642 case Intrinsic::riscv_vslide1up: 3643 case Intrinsic::riscv_vslide1down: 3644 case Intrinsic::riscv_vslide1up_mask: 3645 case Intrinsic::riscv_vslide1down_mask: { 3646 // We need to special case these when the scalar is larger than XLen. 3647 unsigned NumOps = Op.getNumOperands(); 3648 bool IsMasked = NumOps == 6; 3649 unsigned OpOffset = IsMasked ? 1 : 0; 3650 SDValue Scalar = Op.getOperand(2 + OpOffset); 3651 if (Scalar.getValueType().bitsLE(XLenVT)) 3652 break; 3653 3654 // Splatting a sign extended constant is fine. 3655 if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) 3656 if (isInt<32>(CVal->getSExtValue())) 3657 break; 3658 3659 MVT VT = Op.getSimpleValueType(); 3660 assert(VT.getVectorElementType() == MVT::i64 && 3661 Scalar.getValueType() == MVT::i64 && "Unexpected VTs"); 3662 3663 // Convert the vector source to the equivalent nxvXi32 vector. 3664 MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); 3665 SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset)); 3666 3667 SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 3668 DAG.getConstant(0, DL, XLenVT)); 3669 SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 3670 DAG.getConstant(1, DL, XLenVT)); 3671 3672 // Double the VL since we halved SEW. 3673 SDValue VL = Op.getOperand(NumOps - 1); 3674 SDValue I32VL = 3675 DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT)); 3676 3677 MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount()); 3678 SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL); 3679 3680 // Shift the two scalar parts in using SEW=32 slide1up/slide1down 3681 // instructions. 3682 if (IntNo == Intrinsic::riscv_vslide1up || 3683 IntNo == Intrinsic::riscv_vslide1up_mask) { 3684 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi, 3685 I32Mask, I32VL); 3686 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo, 3687 I32Mask, I32VL); 3688 } else { 3689 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo, 3690 I32Mask, I32VL); 3691 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi, 3692 I32Mask, I32VL); 3693 } 3694 3695 // Convert back to nxvXi64. 3696 Vec = DAG.getBitcast(VT, Vec); 3697 3698 if (!IsMasked) 3699 return Vec; 3700 3701 // Apply mask after the operation. 3702 SDValue Mask = Op.getOperand(NumOps - 2); 3703 SDValue MaskedOff = Op.getOperand(1); 3704 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL); 3705 } 3706 } 3707 3708 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); 3709 } 3710 3711 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 3712 SelectionDAG &DAG) const { 3713 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); 3714 } 3715 3716 static MVT getLMUL1VT(MVT VT) { 3717 assert(VT.getVectorElementType().getSizeInBits() <= 64 && 3718 "Unexpected vector MVT"); 3719 return MVT::getScalableVectorVT( 3720 VT.getVectorElementType(), 3721 RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); 3722 } 3723 3724 static unsigned getRVVReductionOp(unsigned ISDOpcode) { 3725 switch (ISDOpcode) { 3726 default: 3727 llvm_unreachable("Unhandled reduction"); 3728 case ISD::VECREDUCE_ADD: 3729 return RISCVISD::VECREDUCE_ADD_VL; 3730 case ISD::VECREDUCE_UMAX: 3731 return RISCVISD::VECREDUCE_UMAX_VL; 3732 case ISD::VECREDUCE_SMAX: 3733 return RISCVISD::VECREDUCE_SMAX_VL; 3734 case ISD::VECREDUCE_UMIN: 3735 return RISCVISD::VECREDUCE_UMIN_VL; 3736 case ISD::VECREDUCE_SMIN: 3737 return RISCVISD::VECREDUCE_SMIN_VL; 3738 case ISD::VECREDUCE_AND: 3739 return RISCVISD::VECREDUCE_AND_VL; 3740 case ISD::VECREDUCE_OR: 3741 return RISCVISD::VECREDUCE_OR_VL; 3742 case ISD::VECREDUCE_XOR: 3743 return RISCVISD::VECREDUCE_XOR_VL; 3744 } 3745 } 3746 3747 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op, 3748 SelectionDAG &DAG) const { 3749 SDLoc DL(Op); 3750 SDValue Vec = Op.getOperand(0); 3751 MVT VecVT = Vec.getSimpleValueType(); 3752 assert((Op.getOpcode() == ISD::VECREDUCE_AND || 3753 Op.getOpcode() == ISD::VECREDUCE_OR || 3754 Op.getOpcode() == ISD::VECREDUCE_XOR) && 3755 "Unexpected reduction lowering"); 3756 3757 MVT XLenVT = Subtarget.getXLenVT(); 3758 assert(Op.getValueType() == XLenVT && 3759 "Expected reduction output to be legalized to XLenVT"); 3760 3761 MVT ContainerVT = VecVT; 3762 if (VecVT.isFixedLengthVector()) { 3763 ContainerVT = getContainerForFixedLengthVector(VecVT); 3764 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3765 } 3766 3767 SDValue Mask, VL; 3768 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3769 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 3770 3771 switch (Op.getOpcode()) { 3772 default: 3773 llvm_unreachable("Unhandled reduction"); 3774 case ISD::VECREDUCE_AND: 3775 // vpopc ~x == 0 3776 Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL); 3777 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); 3778 return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ); 3779 case ISD::VECREDUCE_OR: 3780 // vpopc x != 0 3781 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); 3782 return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE); 3783 case ISD::VECREDUCE_XOR: { 3784 // ((vpopc x) & 1) != 0 3785 SDValue One = DAG.getConstant(1, DL, XLenVT); 3786 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL); 3787 Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One); 3788 return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE); 3789 } 3790 } 3791 } 3792 3793 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, 3794 SelectionDAG &DAG) const { 3795 SDLoc DL(Op); 3796 SDValue Vec = Op.getOperand(0); 3797 EVT VecEVT = Vec.getValueType(); 3798 3799 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode()); 3800 3801 // Due to ordering in legalize types we may have a vector type that needs to 3802 // be split. Do that manually so we can get down to a legal type. 3803 while (getTypeAction(*DAG.getContext(), VecEVT) == 3804 TargetLowering::TypeSplitVector) { 3805 SDValue Lo, Hi; 3806 std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL); 3807 VecEVT = Lo.getValueType(); 3808 Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi); 3809 } 3810 3811 // TODO: The type may need to be widened rather than split. Or widened before 3812 // it can be split. 3813 if (!isTypeLegal(VecEVT)) 3814 return SDValue(); 3815 3816 MVT VecVT = VecEVT.getSimpleVT(); 3817 MVT VecEltVT = VecVT.getVectorElementType(); 3818 unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode()); 3819 3820 MVT ContainerVT = VecVT; 3821 if (VecVT.isFixedLengthVector()) { 3822 ContainerVT = getContainerForFixedLengthVector(VecVT); 3823 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3824 } 3825 3826 MVT M1VT = getLMUL1VT(ContainerVT); 3827 3828 SDValue Mask, VL; 3829 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3830 3831 // FIXME: This is a VLMAX splat which might be too large and can prevent 3832 // vsetvli removal. 3833 SDValue NeutralElem = 3834 DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); 3835 SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem); 3836 SDValue Reduction = 3837 DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL); 3838 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 3839 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 3840 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); 3841 } 3842 3843 // Given a reduction op, this function returns the matching reduction opcode, 3844 // the vector SDValue and the scalar SDValue required to lower this to a 3845 // RISCVISD node. 3846 static std::tuple<unsigned, SDValue, SDValue> 3847 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { 3848 SDLoc DL(Op); 3849 auto Flags = Op->getFlags(); 3850 unsigned Opcode = Op.getOpcode(); 3851 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode); 3852 switch (Opcode) { 3853 default: 3854 llvm_unreachable("Unhandled reduction"); 3855 case ISD::VECREDUCE_FADD: 3856 return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), 3857 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); 3858 case ISD::VECREDUCE_SEQ_FADD: 3859 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1), 3860 Op.getOperand(0)); 3861 case ISD::VECREDUCE_FMIN: 3862 return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0), 3863 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); 3864 case ISD::VECREDUCE_FMAX: 3865 return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0), 3866 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); 3867 } 3868 } 3869 3870 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, 3871 SelectionDAG &DAG) const { 3872 SDLoc DL(Op); 3873 MVT VecEltVT = Op.getSimpleValueType(); 3874 3875 unsigned RVVOpcode; 3876 SDValue VectorVal, ScalarVal; 3877 std::tie(RVVOpcode, VectorVal, ScalarVal) = 3878 getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); 3879 MVT VecVT = VectorVal.getSimpleValueType(); 3880 3881 MVT ContainerVT = VecVT; 3882 if (VecVT.isFixedLengthVector()) { 3883 ContainerVT = getContainerForFixedLengthVector(VecVT); 3884 VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget); 3885 } 3886 3887 MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); 3888 3889 SDValue Mask, VL; 3890 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 3891 3892 // FIXME: This is a VLMAX splat which might be too large and can prevent 3893 // vsetvli removal. 3894 SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); 3895 SDValue Reduction = 3896 DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL); 3897 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 3898 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 3899 } 3900 3901 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 3902 SelectionDAG &DAG) const { 3903 SDValue Vec = Op.getOperand(0); 3904 SDValue SubVec = Op.getOperand(1); 3905 MVT VecVT = Vec.getSimpleValueType(); 3906 MVT SubVecVT = SubVec.getSimpleValueType(); 3907 3908 SDLoc DL(Op); 3909 MVT XLenVT = Subtarget.getXLenVT(); 3910 unsigned OrigIdx = Op.getConstantOperandVal(2); 3911 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 3912 3913 // We don't have the ability to slide mask vectors up indexed by their i1 3914 // elements; the smallest we can do is i8. Often we are able to bitcast to 3915 // equivalent i8 vectors. Note that when inserting a fixed-length vector 3916 // into a scalable one, we might not necessarily have enough scalable 3917 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. 3918 if (SubVecVT.getVectorElementType() == MVT::i1 && 3919 (OrigIdx != 0 || !Vec.isUndef())) { 3920 if (VecVT.getVectorMinNumElements() >= 8 && 3921 SubVecVT.getVectorMinNumElements() >= 8) { 3922 assert(OrigIdx % 8 == 0 && "Invalid index"); 3923 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 3924 SubVecVT.getVectorMinNumElements() % 8 == 0 && 3925 "Unexpected mask vector lowering"); 3926 OrigIdx /= 8; 3927 SubVecVT = 3928 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 3929 SubVecVT.isScalableVector()); 3930 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 3931 VecVT.isScalableVector()); 3932 Vec = DAG.getBitcast(VecVT, Vec); 3933 SubVec = DAG.getBitcast(SubVecVT, SubVec); 3934 } else { 3935 // We can't slide this mask vector up indexed by its i1 elements. 3936 // This poses a problem when we wish to insert a scalable vector which 3937 // can't be re-expressed as a larger type. Just choose the slow path and 3938 // extend to a larger type, then truncate back down. 3939 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 3940 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 3941 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 3942 SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); 3943 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, 3944 Op.getOperand(2)); 3945 SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); 3946 return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); 3947 } 3948 } 3949 3950 // If the subvector vector is a fixed-length type, we cannot use subregister 3951 // manipulation to simplify the codegen; we don't know which register of a 3952 // LMUL group contains the specific subvector as we only know the minimum 3953 // register size. Therefore we must slide the vector group up the full 3954 // amount. 3955 if (SubVecVT.isFixedLengthVector()) { 3956 if (OrigIdx == 0 && Vec.isUndef()) 3957 return Op; 3958 MVT ContainerVT = VecVT; 3959 if (VecVT.isFixedLengthVector()) { 3960 ContainerVT = getContainerForFixedLengthVector(VecVT); 3961 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3962 } 3963 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, 3964 DAG.getUNDEF(ContainerVT), SubVec, 3965 DAG.getConstant(0, DL, XLenVT)); 3966 SDValue Mask = 3967 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 3968 // Set the vector length to only the number of elements we care about. Note 3969 // that for slideup this includes the offset. 3970 SDValue VL = 3971 DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); 3972 SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 3973 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 3974 SubVec, SlideupAmt, Mask, VL); 3975 if (VecVT.isFixedLengthVector()) 3976 Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 3977 return DAG.getBitcast(Op.getValueType(), Slideup); 3978 } 3979 3980 unsigned SubRegIdx, RemIdx; 3981 std::tie(SubRegIdx, RemIdx) = 3982 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 3983 VecVT, SubVecVT, OrigIdx, TRI); 3984 3985 RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); 3986 bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || 3987 SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || 3988 SubVecLMUL == RISCVII::VLMUL::LMUL_F8; 3989 3990 // 1. If the Idx has been completely eliminated and this subvector's size is 3991 // a vector register or a multiple thereof, or the surrounding elements are 3992 // undef, then this is a subvector insert which naturally aligns to a vector 3993 // register. These can easily be handled using subregister manipulation. 3994 // 2. If the subvector is smaller than a vector register, then the insertion 3995 // must preserve the undisturbed elements of the register. We do this by 3996 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type 3997 // (which resolves to a subregister copy), performing a VSLIDEUP to place the 3998 // subvector within the vector register, and an INSERT_SUBVECTOR of that 3999 // LMUL=1 type back into the larger vector (resolving to another subregister 4000 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type 4001 // to avoid allocating a large register group to hold our subvector. 4002 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) 4003 return Op; 4004 4005 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements 4006 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy 4007 // (in our case undisturbed). This means we can set up a subvector insertion 4008 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the 4009 // size of the subvector. 4010 MVT InterSubVT = VecVT; 4011 SDValue AlignedExtract = Vec; 4012 unsigned AlignedIdx = OrigIdx - RemIdx; 4013 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 4014 InterSubVT = getLMUL1VT(VecVT); 4015 // Extract a subvector equal to the nearest full vector register type. This 4016 // should resolve to a EXTRACT_SUBREG instruction. 4017 AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 4018 DAG.getConstant(AlignedIdx, DL, XLenVT)); 4019 } 4020 4021 SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); 4022 // For scalable vectors this must be further multiplied by vscale. 4023 SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); 4024 4025 SDValue Mask, VL; 4026 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 4027 4028 // Construct the vector length corresponding to RemIdx + length(SubVecVT). 4029 VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); 4030 VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); 4031 VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); 4032 4033 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, 4034 DAG.getUNDEF(InterSubVT), SubVec, 4035 DAG.getConstant(0, DL, XLenVT)); 4036 4037 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, 4038 AlignedExtract, SubVec, SlideupAmt, Mask, VL); 4039 4040 // If required, insert this subvector back into the correct vector register. 4041 // This should resolve to an INSERT_SUBREG instruction. 4042 if (VecVT.bitsGT(InterSubVT)) 4043 Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, 4044 DAG.getConstant(AlignedIdx, DL, XLenVT)); 4045 4046 // We might have bitcast from a mask type: cast back to the original type if 4047 // required. 4048 return DAG.getBitcast(Op.getSimpleValueType(), Slideup); 4049 } 4050 4051 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, 4052 SelectionDAG &DAG) const { 4053 SDValue Vec = Op.getOperand(0); 4054 MVT SubVecVT = Op.getSimpleValueType(); 4055 MVT VecVT = Vec.getSimpleValueType(); 4056 4057 SDLoc DL(Op); 4058 MVT XLenVT = Subtarget.getXLenVT(); 4059 unsigned OrigIdx = Op.getConstantOperandVal(1); 4060 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4061 4062 // We don't have the ability to slide mask vectors down indexed by their i1 4063 // elements; the smallest we can do is i8. Often we are able to bitcast to 4064 // equivalent i8 vectors. Note that when extracting a fixed-length vector 4065 // from a scalable one, we might not necessarily have enough scalable 4066 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. 4067 if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { 4068 if (VecVT.getVectorMinNumElements() >= 8 && 4069 SubVecVT.getVectorMinNumElements() >= 8) { 4070 assert(OrigIdx % 8 == 0 && "Invalid index"); 4071 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 4072 SubVecVT.getVectorMinNumElements() % 8 == 0 && 4073 "Unexpected mask vector lowering"); 4074 OrigIdx /= 8; 4075 SubVecVT = 4076 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 4077 SubVecVT.isScalableVector()); 4078 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 4079 VecVT.isScalableVector()); 4080 Vec = DAG.getBitcast(VecVT, Vec); 4081 } else { 4082 // We can't slide this mask vector down, indexed by its i1 elements. 4083 // This poses a problem when we wish to extract a scalable vector which 4084 // can't be re-expressed as a larger type. Just choose the slow path and 4085 // extend to a larger type, then truncate back down. 4086 // TODO: We could probably improve this when extracting certain fixed 4087 // from fixed, where we can extract as i8 and shift the correct element 4088 // right to reach the desired subvector? 4089 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 4090 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 4091 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 4092 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, 4093 Op.getOperand(1)); 4094 SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); 4095 return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); 4096 } 4097 } 4098 4099 // If the subvector vector is a fixed-length type, we cannot use subregister 4100 // manipulation to simplify the codegen; we don't know which register of a 4101 // LMUL group contains the specific subvector as we only know the minimum 4102 // register size. Therefore we must slide the vector group down the full 4103 // amount. 4104 if (SubVecVT.isFixedLengthVector()) { 4105 // With an index of 0 this is a cast-like subvector, which can be performed 4106 // with subregister operations. 4107 if (OrigIdx == 0) 4108 return Op; 4109 MVT ContainerVT = VecVT; 4110 if (VecVT.isFixedLengthVector()) { 4111 ContainerVT = getContainerForFixedLengthVector(VecVT); 4112 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 4113 } 4114 SDValue Mask = 4115 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 4116 // Set the vector length to only the number of elements we care about. This 4117 // avoids sliding down elements we're going to discard straight away. 4118 SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); 4119 SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 4120 SDValue Slidedown = 4121 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 4122 DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); 4123 // Now we can use a cast-like subvector extract to get the result. 4124 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 4125 DAG.getConstant(0, DL, XLenVT)); 4126 return DAG.getBitcast(Op.getValueType(), Slidedown); 4127 } 4128 4129 unsigned SubRegIdx, RemIdx; 4130 std::tie(SubRegIdx, RemIdx) = 4131 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 4132 VecVT, SubVecVT, OrigIdx, TRI); 4133 4134 // If the Idx has been completely eliminated then this is a subvector extract 4135 // which naturally aligns to a vector register. These can easily be handled 4136 // using subregister manipulation. 4137 if (RemIdx == 0) 4138 return Op; 4139 4140 // Else we must shift our vector register directly to extract the subvector. 4141 // Do this using VSLIDEDOWN. 4142 4143 // If the vector type is an LMUL-group type, extract a subvector equal to the 4144 // nearest full vector register type. This should resolve to a EXTRACT_SUBREG 4145 // instruction. 4146 MVT InterSubVT = VecVT; 4147 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 4148 InterSubVT = getLMUL1VT(VecVT); 4149 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 4150 DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); 4151 } 4152 4153 // Slide this vector register down by the desired number of elements in order 4154 // to place the desired subvector starting at element 0. 4155 SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); 4156 // For scalable vectors this must be further multiplied by vscale. 4157 SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); 4158 4159 SDValue Mask, VL; 4160 std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); 4161 SDValue Slidedown = 4162 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, 4163 DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); 4164 4165 // Now the vector is in the right position, extract our final subvector. This 4166 // should resolve to a COPY. 4167 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 4168 DAG.getConstant(0, DL, XLenVT)); 4169 4170 // We might have bitcast from a mask type: cast back to the original type if 4171 // required. 4172 return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); 4173 } 4174 4175 // Lower step_vector to the vid instruction. Any non-identity step value must 4176 // be accounted for my manual expansion. 4177 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op, 4178 SelectionDAG &DAG) const { 4179 SDLoc DL(Op); 4180 MVT VT = Op.getSimpleValueType(); 4181 MVT XLenVT = Subtarget.getXLenVT(); 4182 SDValue Mask, VL; 4183 std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget); 4184 SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 4185 uint64_t StepValImm = Op.getConstantOperandVal(0); 4186 if (StepValImm != 1) { 4187 if (isPowerOf2_64(StepValImm)) { 4188 SDValue StepVal = 4189 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, 4190 DAG.getConstant(Log2_64(StepValImm), DL, XLenVT)); 4191 StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal); 4192 } else { 4193 SDValue StepVal = lowerScalarSplat( 4194 DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT, 4195 DL, DAG, Subtarget); 4196 StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal); 4197 } 4198 } 4199 return StepVec; 4200 } 4201 4202 // Implement vector_reverse using vrgather.vv with indices determined by 4203 // subtracting the id of each element from (VLMAX-1). This will convert 4204 // the indices like so: 4205 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0). 4206 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16. 4207 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op, 4208 SelectionDAG &DAG) const { 4209 SDLoc DL(Op); 4210 MVT VecVT = Op.getSimpleValueType(); 4211 unsigned EltSize = VecVT.getScalarSizeInBits(); 4212 unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); 4213 4214 unsigned MaxVLMAX = 0; 4215 unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits(); 4216 if (VectorBitsMax != 0) 4217 MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock; 4218 4219 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL; 4220 MVT IntVT = VecVT.changeVectorElementTypeToInteger(); 4221 4222 // If this is SEW=8 and VLMAX is unknown or more than 256, we need 4223 // to use vrgatherei16.vv. 4224 // TODO: It's also possible to use vrgatherei16.vv for other types to 4225 // decrease register width for the index calculation. 4226 if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) { 4227 // If this is LMUL=8, we have to split before can use vrgatherei16.vv. 4228 // Reverse each half, then reassemble them in reverse order. 4229 // NOTE: It's also possible that after splitting that VLMAX no longer 4230 // requires vrgatherei16.vv. 4231 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) { 4232 SDValue Lo, Hi; 4233 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 4234 EVT LoVT, HiVT; 4235 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); 4236 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo); 4237 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi); 4238 // Reassemble the low and high pieces reversed. 4239 // FIXME: This is a CONCAT_VECTORS. 4240 SDValue Res = 4241 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi, 4242 DAG.getIntPtrConstant(0, DL)); 4243 return DAG.getNode( 4244 ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo, 4245 DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL)); 4246 } 4247 4248 // Just promote the int type to i16 which will double the LMUL. 4249 IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount()); 4250 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL; 4251 } 4252 4253 MVT XLenVT = Subtarget.getXLenVT(); 4254 SDValue Mask, VL; 4255 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 4256 4257 // Calculate VLMAX-1 for the desired SEW. 4258 unsigned MinElts = VecVT.getVectorMinNumElements(); 4259 SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT, 4260 DAG.getConstant(MinElts, DL, XLenVT)); 4261 SDValue VLMinus1 = 4262 DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT)); 4263 4264 // Splat VLMAX-1 taking care to handle SEW==64 on RV32. 4265 bool IsRV32E64 = 4266 !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64; 4267 SDValue SplatVL; 4268 if (!IsRV32E64) 4269 SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); 4270 else 4271 SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1); 4272 4273 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); 4274 SDValue Indices = 4275 DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL); 4276 4277 return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL); 4278 } 4279 4280 SDValue 4281 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, 4282 SelectionDAG &DAG) const { 4283 SDLoc DL(Op); 4284 auto *Load = cast<LoadSDNode>(Op); 4285 4286 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 4287 Load->getMemoryVT(), 4288 *Load->getMemOperand()) && 4289 "Expecting a correctly-aligned load"); 4290 4291 MVT VT = Op.getSimpleValueType(); 4292 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4293 4294 SDValue VL = 4295 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 4296 4297 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 4298 SDValue NewLoad = DAG.getMemIntrinsicNode( 4299 RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, 4300 Load->getMemoryVT(), Load->getMemOperand()); 4301 4302 SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 4303 return DAG.getMergeValues({Result, Load->getChain()}, DL); 4304 } 4305 4306 SDValue 4307 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, 4308 SelectionDAG &DAG) const { 4309 SDLoc DL(Op); 4310 auto *Store = cast<StoreSDNode>(Op); 4311 4312 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 4313 Store->getMemoryVT(), 4314 *Store->getMemOperand()) && 4315 "Expecting a correctly-aligned store"); 4316 4317 SDValue StoreVal = Store->getValue(); 4318 MVT VT = StoreVal.getSimpleValueType(); 4319 4320 // If the size less than a byte, we need to pad with zeros to make a byte. 4321 if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) { 4322 VT = MVT::v8i1; 4323 StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 4324 DAG.getConstant(0, DL, VT), StoreVal, 4325 DAG.getIntPtrConstant(0, DL)); 4326 } 4327 4328 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4329 4330 SDValue VL = 4331 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 4332 4333 SDValue NewValue = 4334 convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget); 4335 return DAG.getMemIntrinsicNode( 4336 RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), 4337 {Store->getChain(), NewValue, Store->getBasePtr(), VL}, 4338 Store->getMemoryVT(), Store->getMemOperand()); 4339 } 4340 4341 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const { 4342 auto *Load = cast<MaskedLoadSDNode>(Op); 4343 4344 SDLoc DL(Op); 4345 MVT VT = Op.getSimpleValueType(); 4346 MVT XLenVT = Subtarget.getXLenVT(); 4347 4348 SDValue Mask = Load->getMask(); 4349 SDValue PassThru = Load->getPassThru(); 4350 SDValue VL; 4351 4352 MVT ContainerVT = VT; 4353 if (VT.isFixedLengthVector()) { 4354 ContainerVT = getContainerForFixedLengthVector(VT); 4355 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4356 4357 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4358 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 4359 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4360 } else 4361 VL = DAG.getRegister(RISCV::X0, XLenVT); 4362 4363 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 4364 SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT); 4365 SDValue Ops[] = {Load->getChain(), IntID, PassThru, 4366 Load->getBasePtr(), Mask, VL}; 4367 SDValue Result = 4368 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 4369 Load->getMemoryVT(), Load->getMemOperand()); 4370 SDValue Chain = Result.getValue(1); 4371 4372 if (VT.isFixedLengthVector()) 4373 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 4374 4375 return DAG.getMergeValues({Result, Chain}, DL); 4376 } 4377 4378 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const { 4379 auto *Store = cast<MaskedStoreSDNode>(Op); 4380 4381 SDLoc DL(Op); 4382 SDValue Val = Store->getValue(); 4383 SDValue Mask = Store->getMask(); 4384 MVT VT = Val.getSimpleValueType(); 4385 MVT XLenVT = Subtarget.getXLenVT(); 4386 SDValue VL; 4387 4388 MVT ContainerVT = VT; 4389 if (VT.isFixedLengthVector()) { 4390 ContainerVT = getContainerForFixedLengthVector(VT); 4391 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4392 4393 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); 4394 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4395 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4396 } else 4397 VL = DAG.getRegister(RISCV::X0, XLenVT); 4398 4399 SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT); 4400 return DAG.getMemIntrinsicNode( 4401 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), 4402 {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL}, 4403 Store->getMemoryVT(), Store->getMemOperand()); 4404 } 4405 4406 SDValue 4407 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, 4408 SelectionDAG &DAG) const { 4409 MVT InVT = Op.getOperand(0).getSimpleValueType(); 4410 MVT ContainerVT = getContainerForFixedLengthVector(InVT); 4411 4412 MVT VT = Op.getSimpleValueType(); 4413 4414 SDValue Op1 = 4415 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 4416 SDValue Op2 = 4417 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 4418 4419 SDLoc DL(Op); 4420 SDValue VL = 4421 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 4422 4423 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4424 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 4425 4426 SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, 4427 Op.getOperand(2), Mask, VL); 4428 4429 return convertFromScalableVector(VT, Cmp, DAG, Subtarget); 4430 } 4431 4432 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( 4433 SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { 4434 MVT VT = Op.getSimpleValueType(); 4435 4436 if (VT.getVectorElementType() == MVT::i1) 4437 return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); 4438 4439 return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); 4440 } 4441 4442 SDValue 4443 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op, 4444 SelectionDAG &DAG) const { 4445 unsigned Opc; 4446 switch (Op.getOpcode()) { 4447 default: llvm_unreachable("Unexpected opcode!"); 4448 case ISD::SHL: Opc = RISCVISD::SHL_VL; break; 4449 case ISD::SRA: Opc = RISCVISD::SRA_VL; break; 4450 case ISD::SRL: Opc = RISCVISD::SRL_VL; break; 4451 } 4452 4453 return lowerToScalableOp(Op, DAG, Opc); 4454 } 4455 4456 // Lower vector ABS to smax(X, sub(0, X)). 4457 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const { 4458 SDLoc DL(Op); 4459 MVT VT = Op.getSimpleValueType(); 4460 SDValue X = Op.getOperand(0); 4461 4462 assert(VT.isFixedLengthVector() && "Unexpected type"); 4463 4464 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4465 X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); 4466 4467 SDValue Mask, VL; 4468 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 4469 4470 SDValue SplatZero = 4471 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 4472 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 4473 SDValue NegX = 4474 DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL); 4475 SDValue Max = 4476 DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL); 4477 4478 return convertFromScalableVector(VT, Max, DAG, Subtarget); 4479 } 4480 4481 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV( 4482 SDValue Op, SelectionDAG &DAG) const { 4483 SDLoc DL(Op); 4484 MVT VT = Op.getSimpleValueType(); 4485 SDValue Mag = Op.getOperand(0); 4486 SDValue Sign = Op.getOperand(1); 4487 assert(Mag.getValueType() == Sign.getValueType() && 4488 "Can only handle COPYSIGN with matching types."); 4489 4490 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4491 Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget); 4492 Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget); 4493 4494 SDValue Mask, VL; 4495 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 4496 4497 SDValue CopySign = 4498 DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL); 4499 4500 return convertFromScalableVector(VT, CopySign, DAG, Subtarget); 4501 } 4502 4503 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( 4504 SDValue Op, SelectionDAG &DAG) const { 4505 MVT VT = Op.getSimpleValueType(); 4506 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4507 4508 MVT I1ContainerVT = 4509 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4510 4511 SDValue CC = 4512 convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); 4513 SDValue Op1 = 4514 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 4515 SDValue Op2 = 4516 convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); 4517 4518 SDLoc DL(Op); 4519 SDValue Mask, VL; 4520 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 4521 4522 SDValue Select = 4523 DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); 4524 4525 return convertFromScalableVector(VT, Select, DAG, Subtarget); 4526 } 4527 4528 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, 4529 unsigned NewOpc, 4530 bool HasMask) const { 4531 MVT VT = Op.getSimpleValueType(); 4532 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4533 4534 // Create list of operands by converting existing ones to scalable types. 4535 SmallVector<SDValue, 6> Ops; 4536 for (const SDValue &V : Op->op_values()) { 4537 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 4538 4539 // Pass through non-vector operands. 4540 if (!V.getValueType().isVector()) { 4541 Ops.push_back(V); 4542 continue; 4543 } 4544 4545 // "cast" fixed length vector to a scalable vector. 4546 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && 4547 "Only fixed length vectors are supported!"); 4548 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 4549 } 4550 4551 SDLoc DL(Op); 4552 SDValue Mask, VL; 4553 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 4554 if (HasMask) 4555 Ops.push_back(Mask); 4556 Ops.push_back(VL); 4557 4558 SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); 4559 return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); 4560 } 4561 4562 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node: 4563 // * Operands of each node are assumed to be in the same order. 4564 // * The EVL operand is promoted from i32 to i64 on RV64. 4565 // * Fixed-length vectors are converted to their scalable-vector container 4566 // types. 4567 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG, 4568 unsigned RISCVISDOpc) const { 4569 SDLoc DL(Op); 4570 MVT VT = Op.getSimpleValueType(); 4571 SmallVector<SDValue, 4> Ops; 4572 4573 for (const auto &OpIdx : enumerate(Op->ops())) { 4574 SDValue V = OpIdx.value(); 4575 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 4576 // Pass through operands which aren't fixed-length vectors. 4577 if (!V.getValueType().isFixedLengthVector()) { 4578 Ops.push_back(V); 4579 continue; 4580 } 4581 // "cast" fixed length vector to a scalable vector. 4582 MVT OpVT = V.getSimpleValueType(); 4583 MVT ContainerVT = getContainerForFixedLengthVector(OpVT); 4584 assert(useRVVForFixedLengthVectorVT(OpVT) && 4585 "Only fixed length vectors are supported!"); 4586 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 4587 } 4588 4589 if (!VT.isFixedLengthVector()) 4590 return DAG.getNode(RISCVISDOpc, DL, VT, Ops); 4591 4592 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4593 4594 SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops); 4595 4596 return convertFromScalableVector(VT, VPOp, DAG, Subtarget); 4597 } 4598 4599 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to 4600 // a RVV indexed load. The RVV indexed load instructions only support the 4601 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or 4602 // truncated to XLEN and are treated as byte offsets. Any signed or scaled 4603 // indexing is extended to the XLEN value type and scaled accordingly. 4604 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const { 4605 auto *MGN = cast<MaskedGatherSDNode>(Op.getNode()); 4606 SDLoc DL(Op); 4607 4608 SDValue Index = MGN->getIndex(); 4609 SDValue Mask = MGN->getMask(); 4610 SDValue PassThru = MGN->getPassThru(); 4611 4612 MVT VT = Op.getSimpleValueType(); 4613 MVT IndexVT = Index.getSimpleValueType(); 4614 MVT XLenVT = Subtarget.getXLenVT(); 4615 4616 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 4617 "Unexpected VTs!"); 4618 assert(MGN->getBasePtr().getSimpleValueType() == XLenVT && 4619 "Unexpected pointer type"); 4620 // Targets have to explicitly opt-in for extending vector loads. 4621 assert(MGN->getExtensionType() == ISD::NON_EXTLOAD && 4622 "Unexpected extending MGATHER"); 4623 4624 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 4625 // the selection of the masked intrinsics doesn't do this for us. 4626 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 4627 4628 SDValue VL; 4629 MVT ContainerVT = VT; 4630 if (VT.isFixedLengthVector()) { 4631 // We need to use the larger of the result and index type to determine the 4632 // scalable type to use so we don't increase LMUL for any operand/result. 4633 if (VT.bitsGE(IndexVT)) { 4634 ContainerVT = getContainerForFixedLengthVector(VT); 4635 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), 4636 ContainerVT.getVectorElementCount()); 4637 } else { 4638 IndexVT = getContainerForFixedLengthVector(IndexVT); 4639 ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(), 4640 IndexVT.getVectorElementCount()); 4641 } 4642 4643 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); 4644 4645 if (!IsUnmasked) { 4646 MVT MaskVT = 4647 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4648 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4649 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 4650 } 4651 4652 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4653 } else 4654 VL = DAG.getRegister(RISCV::X0, XLenVT); 4655 4656 unsigned IntID = 4657 IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask; 4658 SmallVector<SDValue, 8> Ops{MGN->getChain(), 4659 DAG.getTargetConstant(IntID, DL, XLenVT)}; 4660 if (!IsUnmasked) 4661 Ops.push_back(PassThru); 4662 Ops.push_back(MGN->getBasePtr()); 4663 Ops.push_back(Index); 4664 if (!IsUnmasked) 4665 Ops.push_back(Mask); 4666 Ops.push_back(VL); 4667 4668 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 4669 SDValue Result = 4670 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 4671 MGN->getMemoryVT(), MGN->getMemOperand()); 4672 SDValue Chain = Result.getValue(1); 4673 4674 if (VT.isFixedLengthVector()) 4675 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 4676 4677 return DAG.getMergeValues({Result, Chain}, DL); 4678 } 4679 4680 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to 4681 // a RVV indexed store. The RVV indexed store instructions only support the 4682 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or 4683 // truncated to XLEN and are treated as byte offsets. Any signed or scaled 4684 // indexing is extended to the XLEN value type and scaled accordingly. 4685 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op, 4686 SelectionDAG &DAG) const { 4687 auto *MSN = cast<MaskedScatterSDNode>(Op.getNode()); 4688 SDLoc DL(Op); 4689 SDValue Index = MSN->getIndex(); 4690 SDValue Mask = MSN->getMask(); 4691 SDValue Val = MSN->getValue(); 4692 4693 MVT VT = Val.getSimpleValueType(); 4694 MVT IndexVT = Index.getSimpleValueType(); 4695 MVT XLenVT = Subtarget.getXLenVT(); 4696 4697 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 4698 "Unexpected VTs!"); 4699 assert(MSN->getBasePtr().getSimpleValueType() == XLenVT && 4700 "Unexpected pointer type"); 4701 // Targets have to explicitly opt-in for extending vector loads and 4702 // truncating vector stores. 4703 assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER"); 4704 4705 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 4706 // the selection of the masked intrinsics doesn't do this for us. 4707 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 4708 4709 SDValue VL; 4710 if (VT.isFixedLengthVector()) { 4711 // We need to use the larger of the value and index type to determine the 4712 // scalable type to use so we don't increase LMUL for any operand/result. 4713 MVT ContainerVT; 4714 if (VT.bitsGE(IndexVT)) { 4715 ContainerVT = getContainerForFixedLengthVector(VT); 4716 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), 4717 ContainerVT.getVectorElementCount()); 4718 } else { 4719 IndexVT = getContainerForFixedLengthVector(IndexVT); 4720 ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 4721 IndexVT.getVectorElementCount()); 4722 } 4723 4724 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); 4725 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); 4726 4727 if (!IsUnmasked) { 4728 MVT MaskVT = 4729 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4730 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4731 } 4732 4733 VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4734 } else 4735 VL = DAG.getRegister(RISCV::X0, XLenVT); 4736 4737 unsigned IntID = 4738 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask; 4739 SmallVector<SDValue, 8> Ops{MSN->getChain(), 4740 DAG.getTargetConstant(IntID, DL, XLenVT)}; 4741 Ops.push_back(Val); 4742 Ops.push_back(MSN->getBasePtr()); 4743 Ops.push_back(Index); 4744 if (!IsUnmasked) 4745 Ops.push_back(Mask); 4746 Ops.push_back(VL); 4747 4748 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops, 4749 MSN->getMemoryVT(), MSN->getMemOperand()); 4750 } 4751 4752 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op, 4753 SelectionDAG &DAG) const { 4754 const MVT XLenVT = Subtarget.getXLenVT(); 4755 SDLoc DL(Op); 4756 SDValue Chain = Op->getOperand(0); 4757 SDValue SysRegNo = DAG.getConstant( 4758 RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT); 4759 SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other); 4760 SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo); 4761 4762 // Encoding used for rounding mode in RISCV differs from that used in 4763 // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a 4764 // table, which consists of a sequence of 4-bit fields, each representing 4765 // corresponding FLT_ROUNDS mode. 4766 static const int Table = 4767 (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) | 4768 (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) | 4769 (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) | 4770 (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) | 4771 (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM); 4772 4773 SDValue Shift = 4774 DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT)); 4775 SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT, 4776 DAG.getConstant(Table, DL, XLenVT), Shift); 4777 SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted, 4778 DAG.getConstant(7, DL, XLenVT)); 4779 4780 return DAG.getMergeValues({Masked, Chain}, DL); 4781 } 4782 4783 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op, 4784 SelectionDAG &DAG) const { 4785 const MVT XLenVT = Subtarget.getXLenVT(); 4786 SDLoc DL(Op); 4787 SDValue Chain = Op->getOperand(0); 4788 SDValue RMValue = Op->getOperand(1); 4789 SDValue SysRegNo = DAG.getConstant( 4790 RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT); 4791 4792 // Encoding used for rounding mode in RISCV differs from that used in 4793 // FLT_ROUNDS. To convert it the C rounding mode is used as an index in 4794 // a table, which consists of a sequence of 4-bit fields, each representing 4795 // corresponding RISCV mode. 4796 static const unsigned Table = 4797 (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) | 4798 (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) | 4799 (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) | 4800 (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) | 4801 (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway)); 4802 4803 SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue, 4804 DAG.getConstant(2, DL, XLenVT)); 4805 SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT, 4806 DAG.getConstant(Table, DL, XLenVT), Shift); 4807 RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted, 4808 DAG.getConstant(0x7, DL, XLenVT)); 4809 return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo, 4810 RMValue); 4811 } 4812 4813 // Returns the opcode of the target-specific SDNode that implements the 32-bit 4814 // form of the given Opcode. 4815 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 4816 switch (Opcode) { 4817 default: 4818 llvm_unreachable("Unexpected opcode"); 4819 case ISD::SHL: 4820 return RISCVISD::SLLW; 4821 case ISD::SRA: 4822 return RISCVISD::SRAW; 4823 case ISD::SRL: 4824 return RISCVISD::SRLW; 4825 case ISD::SDIV: 4826 return RISCVISD::DIVW; 4827 case ISD::UDIV: 4828 return RISCVISD::DIVUW; 4829 case ISD::UREM: 4830 return RISCVISD::REMUW; 4831 case ISD::ROTL: 4832 return RISCVISD::ROLW; 4833 case ISD::ROTR: 4834 return RISCVISD::RORW; 4835 case RISCVISD::GREV: 4836 return RISCVISD::GREVW; 4837 case RISCVISD::GORC: 4838 return RISCVISD::GORCW; 4839 } 4840 } 4841 4842 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG 4843 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would 4844 // otherwise be promoted to i64, making it difficult to select the 4845 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of 4846 // type i8/i16/i32 is lost. 4847 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, 4848 unsigned ExtOpc = ISD::ANY_EXTEND) { 4849 SDLoc DL(N); 4850 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 4851 SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); 4852 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); 4853 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 4854 // ReplaceNodeResults requires we maintain the same type for the return value. 4855 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); 4856 } 4857 4858 // Converts the given 32-bit operation to a i64 operation with signed extension 4859 // semantic to reduce the signed extension instructions. 4860 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 4861 SDLoc DL(N); 4862 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 4863 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 4864 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 4865 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 4866 DAG.getValueType(MVT::i32)); 4867 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 4868 } 4869 4870 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 4871 SmallVectorImpl<SDValue> &Results, 4872 SelectionDAG &DAG) const { 4873 SDLoc DL(N); 4874 switch (N->getOpcode()) { 4875 default: 4876 llvm_unreachable("Don't know how to custom type legalize this operation!"); 4877 case ISD::STRICT_FP_TO_SINT: 4878 case ISD::STRICT_FP_TO_UINT: 4879 case ISD::FP_TO_SINT: 4880 case ISD::FP_TO_UINT: { 4881 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4882 "Unexpected custom legalisation"); 4883 bool IsStrict = N->isStrictFPOpcode(); 4884 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT || 4885 N->getOpcode() == ISD::STRICT_FP_TO_SINT; 4886 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 4887 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 4888 TargetLowering::TypeSoftenFloat) { 4889 // FIXME: Support strict FP. 4890 if (IsStrict) 4891 return; 4892 if (!isTypeLegal(Op0.getValueType())) 4893 return; 4894 unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64; 4895 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0); 4896 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 4897 return; 4898 } 4899 // If the FP type needs to be softened, emit a library call using the 'si' 4900 // version. If we left it to default legalization we'd end up with 'di'. If 4901 // the FP type doesn't need to be softened just let generic type 4902 // legalization promote the result type. 4903 RTLIB::Libcall LC; 4904 if (IsSigned) 4905 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 4906 else 4907 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 4908 MakeLibCallOptions CallOptions; 4909 EVT OpVT = Op0.getValueType(); 4910 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 4911 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 4912 SDValue Result; 4913 std::tie(Result, Chain) = 4914 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 4915 Results.push_back(Result); 4916 if (IsStrict) 4917 Results.push_back(Chain); 4918 break; 4919 } 4920 case ISD::READCYCLECOUNTER: { 4921 assert(!Subtarget.is64Bit() && 4922 "READCYCLECOUNTER only has custom type legalization on riscv32"); 4923 4924 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 4925 SDValue RCW = 4926 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 4927 4928 Results.push_back( 4929 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 4930 Results.push_back(RCW.getValue(2)); 4931 break; 4932 } 4933 case ISD::MUL: { 4934 unsigned Size = N->getSimpleValueType(0).getSizeInBits(); 4935 unsigned XLen = Subtarget.getXLen(); 4936 // This multiply needs to be expanded, try to use MULHSU+MUL if possible. 4937 if (Size > XLen) { 4938 assert(Size == (XLen * 2) && "Unexpected custom legalisation"); 4939 SDValue LHS = N->getOperand(0); 4940 SDValue RHS = N->getOperand(1); 4941 APInt HighMask = APInt::getHighBitsSet(Size, XLen); 4942 4943 bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask); 4944 bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask); 4945 // We need exactly one side to be unsigned. 4946 if (LHSIsU == RHSIsU) 4947 return; 4948 4949 auto MakeMULPair = [&](SDValue S, SDValue U) { 4950 MVT XLenVT = Subtarget.getXLenVT(); 4951 S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S); 4952 U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U); 4953 SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U); 4954 SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U); 4955 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi); 4956 }; 4957 4958 bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen; 4959 bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen; 4960 4961 // The other operand should be signed, but still prefer MULH when 4962 // possible. 4963 if (RHSIsU && LHSIsS && !RHSIsS) 4964 Results.push_back(MakeMULPair(LHS, RHS)); 4965 else if (LHSIsU && RHSIsS && !LHSIsS) 4966 Results.push_back(MakeMULPair(RHS, LHS)); 4967 4968 return; 4969 } 4970 LLVM_FALLTHROUGH; 4971 } 4972 case ISD::ADD: 4973 case ISD::SUB: 4974 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4975 "Unexpected custom legalisation"); 4976 if (N->getOperand(1).getOpcode() == ISD::Constant) 4977 return; 4978 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 4979 break; 4980 case ISD::SHL: 4981 case ISD::SRA: 4982 case ISD::SRL: 4983 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4984 "Unexpected custom legalisation"); 4985 if (N->getOperand(1).getOpcode() == ISD::Constant) 4986 return; 4987 Results.push_back(customLegalizeToWOp(N, DAG)); 4988 break; 4989 case ISD::ROTL: 4990 case ISD::ROTR: 4991 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 4992 "Unexpected custom legalisation"); 4993 Results.push_back(customLegalizeToWOp(N, DAG)); 4994 break; 4995 case ISD::CTTZ: 4996 case ISD::CTTZ_ZERO_UNDEF: 4997 case ISD::CTLZ: 4998 case ISD::CTLZ_ZERO_UNDEF: { 4999 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5000 "Unexpected custom legalisation"); 5001 5002 SDValue NewOp0 = 5003 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 5004 bool IsCTZ = 5005 N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF; 5006 unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW; 5007 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0); 5008 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5009 return; 5010 } 5011 case ISD::SDIV: 5012 case ISD::UDIV: 5013 case ISD::UREM: { 5014 MVT VT = N->getSimpleValueType(0); 5015 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 5016 Subtarget.is64Bit() && Subtarget.hasStdExtM() && 5017 "Unexpected custom legalisation"); 5018 // Don't promote division/remainder by constant since we should expand those 5019 // to multiply by magic constant. 5020 // FIXME: What if the expansion is disabled for minsize. 5021 if (N->getOperand(1).getOpcode() == ISD::Constant) 5022 return; 5023 5024 // If the input is i32, use ANY_EXTEND since the W instructions don't read 5025 // the upper 32 bits. For other types we need to sign or zero extend 5026 // based on the opcode. 5027 unsigned ExtOpc = ISD::ANY_EXTEND; 5028 if (VT != MVT::i32) 5029 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND 5030 : ISD::ZERO_EXTEND; 5031 5032 Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); 5033 break; 5034 } 5035 case ISD::UADDO: 5036 case ISD::USUBO: { 5037 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5038 "Unexpected custom legalisation"); 5039 bool IsAdd = N->getOpcode() == ISD::UADDO; 5040 // Create an ADDW or SUBW. 5041 SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 5042 SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5043 SDValue Res = 5044 DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS); 5045 Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res, 5046 DAG.getValueType(MVT::i32)); 5047 5048 // Sign extend the LHS and perform an unsigned compare with the ADDW result. 5049 // Since the inputs are sign extended from i32, this is equivalent to 5050 // comparing the lower 32 bits. 5051 LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 5052 SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS, 5053 IsAdd ? ISD::SETULT : ISD::SETUGT); 5054 5055 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5056 Results.push_back(Overflow); 5057 return; 5058 } 5059 case ISD::UADDSAT: 5060 case ISD::USUBSAT: { 5061 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5062 "Unexpected custom legalisation"); 5063 if (Subtarget.hasStdExtZbb()) { 5064 // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using 5065 // sign extend allows overflow of the lower 32 bits to be detected on 5066 // the promoted size. 5067 SDValue LHS = 5068 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 5069 SDValue RHS = 5070 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1)); 5071 SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS); 5072 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5073 return; 5074 } 5075 5076 // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom 5077 // promotion for UADDO/USUBO. 5078 Results.push_back(expandAddSubSat(N, DAG)); 5079 return; 5080 } 5081 case ISD::BITCAST: { 5082 EVT VT = N->getValueType(0); 5083 assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!"); 5084 SDValue Op0 = N->getOperand(0); 5085 EVT Op0VT = Op0.getValueType(); 5086 MVT XLenVT = Subtarget.getXLenVT(); 5087 if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) { 5088 SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0); 5089 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 5090 } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() && 5091 Subtarget.hasStdExtF()) { 5092 SDValue FPConv = 5093 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 5094 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 5095 } else if (!VT.isVector() && Op0VT.isFixedLengthVector() && 5096 isTypeLegal(Op0VT)) { 5097 // Custom-legalize bitcasts from fixed-length vector types to illegal 5098 // scalar types in order to improve codegen. Bitcast the vector to a 5099 // one-element vector type whose element type is the same as the result 5100 // type, and extract the first element. 5101 LLVMContext &Context = *DAG.getContext(); 5102 SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0); 5103 Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, 5104 DAG.getConstant(0, DL, XLenVT))); 5105 } 5106 break; 5107 } 5108 case RISCVISD::GREV: 5109 case RISCVISD::GORC: { 5110 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5111 "Unexpected custom legalisation"); 5112 assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant"); 5113 // This is similar to customLegalizeToWOp, except that we pass the second 5114 // operand (a TargetConstant) straight through: it is already of type 5115 // XLenVT. 5116 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 5117 SDValue NewOp0 = 5118 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 5119 SDValue NewOp1 = 5120 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5121 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 5122 // ReplaceNodeResults requires we maintain the same type for the return 5123 // value. 5124 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 5125 break; 5126 } 5127 case RISCVISD::SHFL: { 5128 // There is no SHFLIW instruction, but we can just promote the operation. 5129 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5130 "Unexpected custom legalisation"); 5131 assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant"); 5132 SDValue NewOp0 = 5133 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 5134 SDValue NewOp1 = 5135 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5136 SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1); 5137 // ReplaceNodeResults requires we maintain the same type for the return 5138 // value. 5139 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 5140 break; 5141 } 5142 case ISD::BSWAP: 5143 case ISD::BITREVERSE: { 5144 MVT VT = N->getSimpleValueType(0); 5145 MVT XLenVT = Subtarget.getXLenVT(); 5146 assert((VT == MVT::i8 || VT == MVT::i16 || 5147 (VT == MVT::i32 && Subtarget.is64Bit())) && 5148 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 5149 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0)); 5150 unsigned Imm = VT.getSizeInBits() - 1; 5151 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 5152 if (N->getOpcode() == ISD::BSWAP) 5153 Imm &= ~0x7U; 5154 unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV; 5155 SDValue GREVI = 5156 DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT)); 5157 // ReplaceNodeResults requires we maintain the same type for the return 5158 // value. 5159 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI)); 5160 break; 5161 } 5162 case ISD::FSHL: 5163 case ISD::FSHR: { 5164 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5165 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 5166 SDValue NewOp0 = 5167 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 5168 SDValue NewOp1 = 5169 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5170 SDValue NewOp2 = 5171 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 5172 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 5173 // Mask the shift amount to 5 bits. 5174 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 5175 DAG.getConstant(0x1f, DL, MVT::i64)); 5176 unsigned Opc = 5177 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; 5178 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); 5179 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 5180 break; 5181 } 5182 case ISD::EXTRACT_VECTOR_ELT: { 5183 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element 5184 // type is illegal (currently only vXi64 RV32). 5185 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are 5186 // transferred to the destination register. We issue two of these from the 5187 // upper- and lower- halves of the SEW-bit vector element, slid down to the 5188 // first element. 5189 SDValue Vec = N->getOperand(0); 5190 SDValue Idx = N->getOperand(1); 5191 5192 // The vector type hasn't been legalized yet so we can't issue target 5193 // specific nodes if it needs legalization. 5194 // FIXME: We would manually legalize if it's important. 5195 if (!isTypeLegal(Vec.getValueType())) 5196 return; 5197 5198 MVT VecVT = Vec.getSimpleValueType(); 5199 5200 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && 5201 VecVT.getVectorElementType() == MVT::i64 && 5202 "Unexpected EXTRACT_VECTOR_ELT legalization"); 5203 5204 // If this is a fixed vector, we need to convert it to a scalable vector. 5205 MVT ContainerVT = VecVT; 5206 if (VecVT.isFixedLengthVector()) { 5207 ContainerVT = getContainerForFixedLengthVector(VecVT); 5208 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 5209 } 5210 5211 MVT XLenVT = Subtarget.getXLenVT(); 5212 5213 // Use a VL of 1 to avoid processing more elements than we need. 5214 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 5215 SDValue VL = DAG.getConstant(1, DL, XLenVT); 5216 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 5217 5218 // Unless the index is known to be 0, we must slide the vector down to get 5219 // the desired element into index 0. 5220 if (!isNullConstant(Idx)) { 5221 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 5222 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 5223 } 5224 5225 // Extract the lower XLEN bits of the correct vector element. 5226 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 5227 5228 // To extract the upper XLEN bits of the vector element, shift the first 5229 // element right by 32 bits and re-extract the lower XLEN bits. 5230 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 5231 DAG.getConstant(32, DL, XLenVT), VL); 5232 SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, 5233 ThirtyTwoV, Mask, VL); 5234 5235 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 5236 5237 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 5238 break; 5239 } 5240 case ISD::INTRINSIC_WO_CHAIN: { 5241 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 5242 switch (IntNo) { 5243 default: 5244 llvm_unreachable( 5245 "Don't know how to custom type legalize this intrinsic!"); 5246 case Intrinsic::riscv_orc_b: { 5247 // Lower to the GORCI encoding for orc.b with the operand extended. 5248 SDValue NewOp = 5249 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5250 // If Zbp is enabled, use GORCIW which will sign extend the result. 5251 unsigned Opc = 5252 Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC; 5253 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp, 5254 DAG.getConstant(7, DL, MVT::i64)); 5255 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5256 return; 5257 } 5258 case Intrinsic::riscv_grev: 5259 case Intrinsic::riscv_gorc: { 5260 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5261 "Unexpected custom legalisation"); 5262 SDValue NewOp1 = 5263 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5264 SDValue NewOp2 = 5265 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 5266 unsigned Opc = 5267 IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW; 5268 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); 5269 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5270 break; 5271 } 5272 case Intrinsic::riscv_shfl: 5273 case Intrinsic::riscv_unshfl: { 5274 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5275 "Unexpected custom legalisation"); 5276 SDValue NewOp1 = 5277 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5278 SDValue NewOp2 = 5279 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 5280 unsigned Opc = 5281 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW; 5282 if (isa<ConstantSDNode>(N->getOperand(2))) { 5283 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 5284 DAG.getConstant(0xf, DL, MVT::i64)); 5285 Opc = 5286 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL; 5287 } 5288 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); 5289 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5290 break; 5291 } 5292 case Intrinsic::riscv_bcompress: 5293 case Intrinsic::riscv_bdecompress: { 5294 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 5295 "Unexpected custom legalisation"); 5296 SDValue NewOp1 = 5297 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 5298 SDValue NewOp2 = 5299 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 5300 unsigned Opc = IntNo == Intrinsic::riscv_bcompress 5301 ? RISCVISD::BCOMPRESSW 5302 : RISCVISD::BDECOMPRESSW; 5303 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); 5304 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 5305 break; 5306 } 5307 case Intrinsic::riscv_vmv_x_s: { 5308 EVT VT = N->getValueType(0); 5309 MVT XLenVT = Subtarget.getXLenVT(); 5310 if (VT.bitsLT(XLenVT)) { 5311 // Simple case just extract using vmv.x.s and truncate. 5312 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, 5313 Subtarget.getXLenVT(), N->getOperand(1)); 5314 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); 5315 return; 5316 } 5317 5318 assert(VT == MVT::i64 && !Subtarget.is64Bit() && 5319 "Unexpected custom legalization"); 5320 5321 // We need to do the move in two steps. 5322 SDValue Vec = N->getOperand(1); 5323 MVT VecVT = Vec.getSimpleValueType(); 5324 5325 // First extract the lower XLEN bits of the element. 5326 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 5327 5328 // To extract the upper XLEN bits of the vector element, shift the first 5329 // element right by 32 bits and re-extract the lower XLEN bits. 5330 SDValue VL = DAG.getConstant(1, DL, XLenVT); 5331 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 5332 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 5333 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, 5334 DAG.getConstant(32, DL, XLenVT), VL); 5335 SDValue LShr32 = 5336 DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL); 5337 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 5338 5339 Results.push_back( 5340 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 5341 break; 5342 } 5343 } 5344 break; 5345 } 5346 case ISD::VECREDUCE_ADD: 5347 case ISD::VECREDUCE_AND: 5348 case ISD::VECREDUCE_OR: 5349 case ISD::VECREDUCE_XOR: 5350 case ISD::VECREDUCE_SMAX: 5351 case ISD::VECREDUCE_UMAX: 5352 case ISD::VECREDUCE_SMIN: 5353 case ISD::VECREDUCE_UMIN: 5354 if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG)) 5355 Results.push_back(V); 5356 break; 5357 case ISD::FLT_ROUNDS_: { 5358 SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other); 5359 SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0)); 5360 Results.push_back(Res.getValue(0)); 5361 Results.push_back(Res.getValue(1)); 5362 break; 5363 } 5364 } 5365 } 5366 5367 // A structure to hold one of the bit-manipulation patterns below. Together, a 5368 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 5369 // (or (and (shl x, 1), 0xAAAAAAAA), 5370 // (and (srl x, 1), 0x55555555)) 5371 struct RISCVBitmanipPat { 5372 SDValue Op; 5373 unsigned ShAmt; 5374 bool IsSHL; 5375 5376 bool formsPairWith(const RISCVBitmanipPat &Other) const { 5377 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 5378 } 5379 }; 5380 5381 // Matches patterns of the form 5382 // (and (shl x, C2), (C1 << C2)) 5383 // (and (srl x, C2), C1) 5384 // (shl (and x, C1), C2) 5385 // (srl (and x, (C1 << C2)), C2) 5386 // Where C2 is a power of 2 and C1 has at least that many leading zeroes. 5387 // The expected masks for each shift amount are specified in BitmanipMasks where 5388 // BitmanipMasks[log2(C2)] specifies the expected C1 value. 5389 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether 5390 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible 5391 // XLen is 64. 5392 static Optional<RISCVBitmanipPat> 5393 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { 5394 assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && 5395 "Unexpected number of masks"); 5396 Optional<uint64_t> Mask; 5397 // Optionally consume a mask around the shift operation. 5398 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 5399 Mask = Op.getConstantOperandVal(1); 5400 Op = Op.getOperand(0); 5401 } 5402 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 5403 return None; 5404 bool IsSHL = Op.getOpcode() == ISD::SHL; 5405 5406 if (!isa<ConstantSDNode>(Op.getOperand(1))) 5407 return None; 5408 uint64_t ShAmt = Op.getConstantOperandVal(1); 5409 5410 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 5411 if (ShAmt >= Width || !isPowerOf2_64(ShAmt)) 5412 return None; 5413 // If we don't have enough masks for 64 bit, then we must be trying to 5414 // match SHFL so we're only allowed to shift 1/4 of the width. 5415 if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) 5416 return None; 5417 5418 SDValue Src = Op.getOperand(0); 5419 5420 // The expected mask is shifted left when the AND is found around SHL 5421 // patterns. 5422 // ((x >> 1) & 0x55555555) 5423 // ((x << 1) & 0xAAAAAAAA) 5424 bool SHLExpMask = IsSHL; 5425 5426 if (!Mask) { 5427 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 5428 // the mask is all ones: consume that now. 5429 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 5430 Mask = Src.getConstantOperandVal(1); 5431 Src = Src.getOperand(0); 5432 // The expected mask is now in fact shifted left for SRL, so reverse the 5433 // decision. 5434 // ((x & 0xAAAAAAAA) >> 1) 5435 // ((x & 0x55555555) << 1) 5436 SHLExpMask = !SHLExpMask; 5437 } else { 5438 // Use a default shifted mask of all-ones if there's no AND, truncated 5439 // down to the expected width. This simplifies the logic later on. 5440 Mask = maskTrailingOnes<uint64_t>(Width); 5441 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 5442 } 5443 } 5444 5445 unsigned MaskIdx = Log2_32(ShAmt); 5446 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 5447 5448 if (SHLExpMask) 5449 ExpMask <<= ShAmt; 5450 5451 if (Mask != ExpMask) 5452 return None; 5453 5454 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 5455 } 5456 5457 // Matches any of the following bit-manipulation patterns: 5458 // (and (shl x, 1), (0x55555555 << 1)) 5459 // (and (srl x, 1), 0x55555555) 5460 // (shl (and x, 0x55555555), 1) 5461 // (srl (and x, (0x55555555 << 1)), 1) 5462 // where the shift amount and mask may vary thus: 5463 // [1] = 0x55555555 / 0xAAAAAAAA 5464 // [2] = 0x33333333 / 0xCCCCCCCC 5465 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 5466 // [8] = 0x00FF00FF / 0xFF00FF00 5467 // [16] = 0x0000FFFF / 0xFFFFFFFF 5468 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 5469 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { 5470 // These are the unshifted masks which we use to match bit-manipulation 5471 // patterns. They may be shifted left in certain circumstances. 5472 static const uint64_t BitmanipMasks[] = { 5473 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 5474 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; 5475 5476 return matchRISCVBitmanipPat(Op, BitmanipMasks); 5477 } 5478 5479 // Match the following pattern as a GREVI(W) operation 5480 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 5481 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 5482 const RISCVSubtarget &Subtarget) { 5483 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 5484 EVT VT = Op.getValueType(); 5485 5486 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 5487 auto LHS = matchGREVIPat(Op.getOperand(0)); 5488 auto RHS = matchGREVIPat(Op.getOperand(1)); 5489 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 5490 SDLoc DL(Op); 5491 return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op, 5492 DAG.getConstant(LHS->ShAmt, DL, VT)); 5493 } 5494 } 5495 return SDValue(); 5496 } 5497 5498 // Matches any the following pattern as a GORCI(W) operation 5499 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 5500 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 5501 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 5502 // Note that with the variant of 3., 5503 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 5504 // the inner pattern will first be matched as GREVI and then the outer 5505 // pattern will be matched to GORC via the first rule above. 5506 // 4. (or (rotl/rotr x, bitwidth/2), x) 5507 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 5508 const RISCVSubtarget &Subtarget) { 5509 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 5510 EVT VT = Op.getValueType(); 5511 5512 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 5513 SDLoc DL(Op); 5514 SDValue Op0 = Op.getOperand(0); 5515 SDValue Op1 = Op.getOperand(1); 5516 5517 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 5518 if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X && 5519 isa<ConstantSDNode>(Reverse.getOperand(1)) && 5520 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 5521 return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1)); 5522 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 5523 if ((Reverse.getOpcode() == ISD::ROTL || 5524 Reverse.getOpcode() == ISD::ROTR) && 5525 Reverse.getOperand(0) == X && 5526 isa<ConstantSDNode>(Reverse.getOperand(1))) { 5527 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 5528 if (RotAmt == (VT.getSizeInBits() / 2)) 5529 return DAG.getNode(RISCVISD::GORC, DL, VT, X, 5530 DAG.getConstant(RotAmt, DL, VT)); 5531 } 5532 return SDValue(); 5533 }; 5534 5535 // Check for either commutable permutation of (or (GREVI x, shamt), x) 5536 if (SDValue V = MatchOROfReverse(Op0, Op1)) 5537 return V; 5538 if (SDValue V = MatchOROfReverse(Op1, Op0)) 5539 return V; 5540 5541 // OR is commutable so canonicalize its OR operand to the left 5542 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 5543 std::swap(Op0, Op1); 5544 if (Op0.getOpcode() != ISD::OR) 5545 return SDValue(); 5546 SDValue OrOp0 = Op0.getOperand(0); 5547 SDValue OrOp1 = Op0.getOperand(1); 5548 auto LHS = matchGREVIPat(OrOp0); 5549 // OR is commutable so swap the operands and try again: x might have been 5550 // on the left 5551 if (!LHS) { 5552 std::swap(OrOp0, OrOp1); 5553 LHS = matchGREVIPat(OrOp0); 5554 } 5555 auto RHS = matchGREVIPat(Op1); 5556 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 5557 return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op, 5558 DAG.getConstant(LHS->ShAmt, DL, VT)); 5559 } 5560 } 5561 return SDValue(); 5562 } 5563 5564 // Matches any of the following bit-manipulation patterns: 5565 // (and (shl x, 1), (0x22222222 << 1)) 5566 // (and (srl x, 1), 0x22222222) 5567 // (shl (and x, 0x22222222), 1) 5568 // (srl (and x, (0x22222222 << 1)), 1) 5569 // where the shift amount and mask may vary thus: 5570 // [1] = 0x22222222 / 0x44444444 5571 // [2] = 0x0C0C0C0C / 0x3C3C3C3C 5572 // [4] = 0x00F000F0 / 0x0F000F00 5573 // [8] = 0x0000FF00 / 0x00FF0000 5574 // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) 5575 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { 5576 // These are the unshifted masks which we use to match bit-manipulation 5577 // patterns. They may be shifted left in certain circumstances. 5578 static const uint64_t BitmanipMasks[] = { 5579 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, 5580 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; 5581 5582 return matchRISCVBitmanipPat(Op, BitmanipMasks); 5583 } 5584 5585 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) 5586 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, 5587 const RISCVSubtarget &Subtarget) { 5588 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 5589 EVT VT = Op.getValueType(); 5590 5591 if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) 5592 return SDValue(); 5593 5594 SDValue Op0 = Op.getOperand(0); 5595 SDValue Op1 = Op.getOperand(1); 5596 5597 // Or is commutable so canonicalize the second OR to the LHS. 5598 if (Op0.getOpcode() != ISD::OR) 5599 std::swap(Op0, Op1); 5600 if (Op0.getOpcode() != ISD::OR) 5601 return SDValue(); 5602 5603 // We found an inner OR, so our operands are the operands of the inner OR 5604 // and the other operand of the outer OR. 5605 SDValue A = Op0.getOperand(0); 5606 SDValue B = Op0.getOperand(1); 5607 SDValue C = Op1; 5608 5609 auto Match1 = matchSHFLPat(A); 5610 auto Match2 = matchSHFLPat(B); 5611 5612 // If neither matched, we failed. 5613 if (!Match1 && !Match2) 5614 return SDValue(); 5615 5616 // We had at least one match. if one failed, try the remaining C operand. 5617 if (!Match1) { 5618 std::swap(A, C); 5619 Match1 = matchSHFLPat(A); 5620 if (!Match1) 5621 return SDValue(); 5622 } else if (!Match2) { 5623 std::swap(B, C); 5624 Match2 = matchSHFLPat(B); 5625 if (!Match2) 5626 return SDValue(); 5627 } 5628 assert(Match1 && Match2); 5629 5630 // Make sure our matches pair up. 5631 if (!Match1->formsPairWith(*Match2)) 5632 return SDValue(); 5633 5634 // All the remains is to make sure C is an AND with the same input, that masks 5635 // out the bits that are being shuffled. 5636 if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || 5637 C.getOperand(0) != Match1->Op) 5638 return SDValue(); 5639 5640 uint64_t Mask = C.getConstantOperandVal(1); 5641 5642 static const uint64_t BitmanipMasks[] = { 5643 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, 5644 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, 5645 }; 5646 5647 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 5648 unsigned MaskIdx = Log2_32(Match1->ShAmt); 5649 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 5650 5651 if (Mask != ExpMask) 5652 return SDValue(); 5653 5654 SDLoc DL(Op); 5655 return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op, 5656 DAG.getConstant(Match1->ShAmt, DL, VT)); 5657 } 5658 5659 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 5660 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 5661 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 5662 // not undo itself, but they are redundant. 5663 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 5664 SDValue Src = N->getOperand(0); 5665 5666 if (Src.getOpcode() != N->getOpcode()) 5667 return SDValue(); 5668 5669 if (!isa<ConstantSDNode>(N->getOperand(1)) || 5670 !isa<ConstantSDNode>(Src.getOperand(1))) 5671 return SDValue(); 5672 5673 unsigned ShAmt1 = N->getConstantOperandVal(1); 5674 unsigned ShAmt2 = Src.getConstantOperandVal(1); 5675 Src = Src.getOperand(0); 5676 5677 unsigned CombinedShAmt; 5678 if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW) 5679 CombinedShAmt = ShAmt1 | ShAmt2; 5680 else 5681 CombinedShAmt = ShAmt1 ^ ShAmt2; 5682 5683 if (CombinedShAmt == 0) 5684 return Src; 5685 5686 SDLoc DL(N); 5687 return DAG.getNode( 5688 N->getOpcode(), DL, N->getValueType(0), Src, 5689 DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType())); 5690 } 5691 5692 // Combine a constant select operand into its use: 5693 // 5694 // (and (select_cc lhs, rhs, cc, -1, c), x) 5695 // -> (select_cc lhs, rhs, cc, x, (and, x, c)) [AllOnes=1] 5696 // (or (select_cc lhs, rhs, cc, 0, c), x) 5697 // -> (select_cc lhs, rhs, cc, x, (or, x, c)) [AllOnes=0] 5698 // (xor (select_cc lhs, rhs, cc, 0, c), x) 5699 // -> (select_cc lhs, rhs, cc, x, (xor, x, c)) [AllOnes=0] 5700 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 5701 SelectionDAG &DAG, bool AllOnes) { 5702 EVT VT = N->getValueType(0); 5703 5704 if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse()) 5705 return SDValue(); 5706 5707 auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) { 5708 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 5709 }; 5710 5711 bool SwapSelectOps; 5712 SDValue TrueVal = Slct.getOperand(3); 5713 SDValue FalseVal = Slct.getOperand(4); 5714 SDValue NonConstantVal; 5715 if (isZeroOrAllOnes(TrueVal, AllOnes)) { 5716 SwapSelectOps = false; 5717 NonConstantVal = FalseVal; 5718 } else if (isZeroOrAllOnes(FalseVal, AllOnes)) { 5719 SwapSelectOps = true; 5720 NonConstantVal = TrueVal; 5721 } else 5722 return SDValue(); 5723 5724 // Slct is now know to be the desired identity constant when CC is true. 5725 TrueVal = OtherOp; 5726 FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal); 5727 // Unless SwapSelectOps says CC should be false. 5728 if (SwapSelectOps) 5729 std::swap(TrueVal, FalseVal); 5730 5731 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT, 5732 {Slct.getOperand(0), Slct.getOperand(1), 5733 Slct.getOperand(2), TrueVal, FalseVal}); 5734 } 5735 5736 // Attempt combineSelectAndUse on each operand of a commutative operator N. 5737 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG, 5738 bool AllOnes) { 5739 SDValue N0 = N->getOperand(0); 5740 SDValue N1 = N->getOperand(1); 5741 if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes)) 5742 return Result; 5743 if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes)) 5744 return Result; 5745 return SDValue(); 5746 } 5747 5748 static SDValue performANDCombine(SDNode *N, 5749 TargetLowering::DAGCombinerInfo &DCI, 5750 const RISCVSubtarget &Subtarget) { 5751 SelectionDAG &DAG = DCI.DAG; 5752 5753 // fold (and (select_cc lhs, rhs, cc, -1, y), x) -> 5754 // (select lhs, rhs, cc, x, (and x, y)) 5755 return combineSelectCCAndUseCommutative(N, DAG, true); 5756 } 5757 5758 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, 5759 const RISCVSubtarget &Subtarget) { 5760 SelectionDAG &DAG = DCI.DAG; 5761 if (Subtarget.hasStdExtZbp()) { 5762 if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget)) 5763 return GREV; 5764 if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget)) 5765 return GORC; 5766 if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget)) 5767 return SHFL; 5768 } 5769 5770 // fold (or (select_cc lhs, rhs, cc, 0, y), x) -> 5771 // (select lhs, rhs, cc, x, (or x, y)) 5772 return combineSelectCCAndUseCommutative(N, DAG, false); 5773 } 5774 5775 static SDValue performXORCombine(SDNode *N, 5776 TargetLowering::DAGCombinerInfo &DCI, 5777 const RISCVSubtarget &Subtarget) { 5778 SelectionDAG &DAG = DCI.DAG; 5779 5780 // fold (xor (select_cc lhs, rhs, cc, 0, y), x) -> 5781 // (select lhs, rhs, cc, x, (xor x, y)) 5782 return combineSelectCCAndUseCommutative(N, DAG, false); 5783 } 5784 5785 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND 5786 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free 5787 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be 5788 // removed during type legalization leaving an ADD/SUB/MUL use that won't use 5789 // ADDW/SUBW/MULW. 5790 static SDValue performANY_EXTENDCombine(SDNode *N, 5791 TargetLowering::DAGCombinerInfo &DCI, 5792 const RISCVSubtarget &Subtarget) { 5793 if (!Subtarget.is64Bit()) 5794 return SDValue(); 5795 5796 SelectionDAG &DAG = DCI.DAG; 5797 5798 SDValue Src = N->getOperand(0); 5799 EVT VT = N->getValueType(0); 5800 if (VT != MVT::i64 || Src.getValueType() != MVT::i32) 5801 return SDValue(); 5802 5803 // The opcode must be one that can implicitly sign_extend. 5804 // FIXME: Additional opcodes. 5805 switch (Src.getOpcode()) { 5806 default: 5807 return SDValue(); 5808 case ISD::MUL: 5809 if (!Subtarget.hasStdExtM()) 5810 return SDValue(); 5811 LLVM_FALLTHROUGH; 5812 case ISD::ADD: 5813 case ISD::SUB: 5814 break; 5815 } 5816 5817 SmallVector<SDNode *, 4> SetCCs; 5818 for (SDNode::use_iterator UI = Src.getNode()->use_begin(), 5819 UE = Src.getNode()->use_end(); 5820 UI != UE; ++UI) { 5821 SDNode *User = *UI; 5822 if (User == N) 5823 continue; 5824 if (UI.getUse().getResNo() != Src.getResNo()) 5825 continue; 5826 // All i32 setccs are legalized by sign extending operands. 5827 if (User->getOpcode() == ISD::SETCC) { 5828 SetCCs.push_back(User); 5829 continue; 5830 } 5831 // We don't know if we can extend this user. 5832 break; 5833 } 5834 5835 // If we don't have any SetCCs, this isn't worthwhile. 5836 if (SetCCs.empty()) 5837 return SDValue(); 5838 5839 SDLoc DL(N); 5840 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src); 5841 DCI.CombineTo(N, SExt); 5842 5843 // Promote all the setccs. 5844 for (SDNode *SetCC : SetCCs) { 5845 SmallVector<SDValue, 4> Ops; 5846 5847 for (unsigned j = 0; j != 2; ++j) { 5848 SDValue SOp = SetCC->getOperand(j); 5849 if (SOp == Src) 5850 Ops.push_back(SExt); 5851 else 5852 Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp)); 5853 } 5854 5855 Ops.push_back(SetCC->getOperand(2)); 5856 DCI.CombineTo(SetCC, 5857 DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops)); 5858 } 5859 return SDValue(N, 0); 5860 } 5861 5862 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 5863 DAGCombinerInfo &DCI) const { 5864 SelectionDAG &DAG = DCI.DAG; 5865 5866 switch (N->getOpcode()) { 5867 default: 5868 break; 5869 case RISCVISD::SplitF64: { 5870 SDValue Op0 = N->getOperand(0); 5871 // If the input to SplitF64 is just BuildPairF64 then the operation is 5872 // redundant. Instead, use BuildPairF64's operands directly. 5873 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 5874 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 5875 5876 SDLoc DL(N); 5877 5878 // It's cheaper to materialise two 32-bit integers than to load a double 5879 // from the constant pool and transfer it to integer registers through the 5880 // stack. 5881 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 5882 APInt V = C->getValueAPF().bitcastToAPInt(); 5883 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 5884 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 5885 return DCI.CombineTo(N, Lo, Hi); 5886 } 5887 5888 // This is a target-specific version of a DAGCombine performed in 5889 // DAGCombiner::visitBITCAST. It performs the equivalent of: 5890 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 5891 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 5892 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 5893 !Op0.getNode()->hasOneUse()) 5894 break; 5895 SDValue NewSplitF64 = 5896 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 5897 Op0.getOperand(0)); 5898 SDValue Lo = NewSplitF64.getValue(0); 5899 SDValue Hi = NewSplitF64.getValue(1); 5900 APInt SignBit = APInt::getSignMask(32); 5901 if (Op0.getOpcode() == ISD::FNEG) { 5902 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 5903 DAG.getConstant(SignBit, DL, MVT::i32)); 5904 return DCI.CombineTo(N, Lo, NewHi); 5905 } 5906 assert(Op0.getOpcode() == ISD::FABS); 5907 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 5908 DAG.getConstant(~SignBit, DL, MVT::i32)); 5909 return DCI.CombineTo(N, Lo, NewHi); 5910 } 5911 case RISCVISD::SLLW: 5912 case RISCVISD::SRAW: 5913 case RISCVISD::SRLW: 5914 case RISCVISD::ROLW: 5915 case RISCVISD::RORW: { 5916 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 5917 SDValue LHS = N->getOperand(0); 5918 SDValue RHS = N->getOperand(1); 5919 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 5920 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 5921 if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || 5922 SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { 5923 if (N->getOpcode() != ISD::DELETED_NODE) 5924 DCI.AddToWorklist(N); 5925 return SDValue(N, 0); 5926 } 5927 break; 5928 } 5929 case RISCVISD::CLZW: 5930 case RISCVISD::CTZW: { 5931 // Only the lower 32 bits of the first operand are read 5932 SDValue Op0 = N->getOperand(0); 5933 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 5934 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 5935 if (N->getOpcode() != ISD::DELETED_NODE) 5936 DCI.AddToWorklist(N); 5937 return SDValue(N, 0); 5938 } 5939 break; 5940 } 5941 case RISCVISD::FSL: 5942 case RISCVISD::FSR: { 5943 // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. 5944 SDValue ShAmt = N->getOperand(2); 5945 unsigned BitWidth = ShAmt.getValueSizeInBits(); 5946 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 5947 APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1); 5948 if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 5949 if (N->getOpcode() != ISD::DELETED_NODE) 5950 DCI.AddToWorklist(N); 5951 return SDValue(N, 0); 5952 } 5953 break; 5954 } 5955 case RISCVISD::FSLW: 5956 case RISCVISD::FSRW: { 5957 // Only the lower 32 bits of Values and lower 6 bits of shift amount are 5958 // read. 5959 SDValue Op0 = N->getOperand(0); 5960 SDValue Op1 = N->getOperand(1); 5961 SDValue ShAmt = N->getOperand(2); 5962 APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 5963 APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); 5964 if (SimplifyDemandedBits(Op0, OpMask, DCI) || 5965 SimplifyDemandedBits(Op1, OpMask, DCI) || 5966 SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 5967 if (N->getOpcode() != ISD::DELETED_NODE) 5968 DCI.AddToWorklist(N); 5969 return SDValue(N, 0); 5970 } 5971 break; 5972 } 5973 case RISCVISD::GREV: 5974 case RISCVISD::GORC: { 5975 // Only the lower log2(Bitwidth) bits of the the shift amount are read. 5976 SDValue ShAmt = N->getOperand(1); 5977 unsigned BitWidth = ShAmt.getValueSizeInBits(); 5978 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 5979 APInt ShAmtMask(BitWidth, BitWidth - 1); 5980 if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 5981 if (N->getOpcode() != ISD::DELETED_NODE) 5982 DCI.AddToWorklist(N); 5983 return SDValue(N, 0); 5984 } 5985 5986 return combineGREVI_GORCI(N, DCI.DAG); 5987 } 5988 case RISCVISD::GREVW: 5989 case RISCVISD::GORCW: { 5990 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 5991 SDValue LHS = N->getOperand(0); 5992 SDValue RHS = N->getOperand(1); 5993 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 5994 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 5995 if (SimplifyDemandedBits(LHS, LHSMask, DCI) || 5996 SimplifyDemandedBits(RHS, RHSMask, DCI)) { 5997 if (N->getOpcode() != ISD::DELETED_NODE) 5998 DCI.AddToWorklist(N); 5999 return SDValue(N, 0); 6000 } 6001 6002 return combineGREVI_GORCI(N, DCI.DAG); 6003 } 6004 case RISCVISD::SHFL: 6005 case RISCVISD::UNSHFL: { 6006 // Only the lower log2(Bitwidth) bits of the the shift amount are read. 6007 SDValue ShAmt = N->getOperand(1); 6008 unsigned BitWidth = ShAmt.getValueSizeInBits(); 6009 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 6010 APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1); 6011 if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 6012 if (N->getOpcode() != ISD::DELETED_NODE) 6013 DCI.AddToWorklist(N); 6014 return SDValue(N, 0); 6015 } 6016 6017 break; 6018 } 6019 case RISCVISD::SHFLW: 6020 case RISCVISD::UNSHFLW: { 6021 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 6022 SDValue LHS = N->getOperand(0); 6023 SDValue RHS = N->getOperand(1); 6024 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 6025 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4); 6026 if (SimplifyDemandedBits(LHS, LHSMask, DCI) || 6027 SimplifyDemandedBits(RHS, RHSMask, DCI)) { 6028 if (N->getOpcode() != ISD::DELETED_NODE) 6029 DCI.AddToWorklist(N); 6030 return SDValue(N, 0); 6031 } 6032 6033 break; 6034 } 6035 case RISCVISD::BCOMPRESSW: 6036 case RISCVISD::BDECOMPRESSW: { 6037 // Only the lower 32 bits of LHS and RHS are read. 6038 SDValue LHS = N->getOperand(0); 6039 SDValue RHS = N->getOperand(1); 6040 APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 6041 if (SimplifyDemandedBits(LHS, Mask, DCI) || 6042 SimplifyDemandedBits(RHS, Mask, DCI)) { 6043 if (N->getOpcode() != ISD::DELETED_NODE) 6044 DCI.AddToWorklist(N); 6045 return SDValue(N, 0); 6046 } 6047 6048 break; 6049 } 6050 case RISCVISD::FMV_X_ANYEXTW_RV64: { 6051 SDLoc DL(N); 6052 SDValue Op0 = N->getOperand(0); 6053 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 6054 // conversion is unnecessary and can be replaced with an ANY_EXTEND 6055 // of the FMV_W_X_RV64 operand. 6056 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 6057 assert(Op0.getOperand(0).getValueType() == MVT::i64 && 6058 "Unexpected value type!"); 6059 return Op0.getOperand(0); 6060 } 6061 6062 // This is a target-specific version of a DAGCombine performed in 6063 // DAGCombiner::visitBITCAST. It performs the equivalent of: 6064 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 6065 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 6066 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 6067 !Op0.getNode()->hasOneUse()) 6068 break; 6069 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 6070 Op0.getOperand(0)); 6071 APInt SignBit = APInt::getSignMask(32).sext(64); 6072 if (Op0.getOpcode() == ISD::FNEG) 6073 return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 6074 DAG.getConstant(SignBit, DL, MVT::i64)); 6075 6076 assert(Op0.getOpcode() == ISD::FABS); 6077 return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 6078 DAG.getConstant(~SignBit, DL, MVT::i64)); 6079 } 6080 case ISD::AND: 6081 return performANDCombine(N, DCI, Subtarget); 6082 case ISD::OR: 6083 return performORCombine(N, DCI, Subtarget); 6084 case ISD::XOR: 6085 return performXORCombine(N, DCI, Subtarget); 6086 case ISD::ANY_EXTEND: 6087 return performANY_EXTENDCombine(N, DCI, Subtarget); 6088 case ISD::ZERO_EXTEND: 6089 // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during 6090 // type legalization. This is safe because fp_to_uint produces poison if 6091 // it overflows. 6092 if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() && 6093 N->getOperand(0).getOpcode() == ISD::FP_TO_UINT && 6094 isTypeLegal(N->getOperand(0).getOperand(0).getValueType())) 6095 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64, 6096 N->getOperand(0).getOperand(0)); 6097 return SDValue(); 6098 case RISCVISD::SELECT_CC: { 6099 // Transform 6100 SDValue LHS = N->getOperand(0); 6101 SDValue RHS = N->getOperand(1); 6102 auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2)); 6103 if (!ISD::isIntEqualitySetCC(CCVal)) 6104 break; 6105 6106 // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> 6107 // (select_cc X, Y, lt, trueV, falseV) 6108 // Sometimes the setcc is introduced after select_cc has been formed. 6109 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 6110 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 6111 // If we're looking for eq 0 instead of ne 0, we need to invert the 6112 // condition. 6113 bool Invert = CCVal == ISD::SETEQ; 6114 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 6115 if (Invert) 6116 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 6117 6118 SDLoc DL(N); 6119 RHS = LHS.getOperand(1); 6120 LHS = LHS.getOperand(0); 6121 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 6122 6123 SDValue TargetCC = 6124 DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT()); 6125 return DAG.getNode( 6126 RISCVISD::SELECT_CC, DL, N->getValueType(0), 6127 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 6128 } 6129 6130 // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> 6131 // (select_cc X, Y, eq/ne, trueV, falseV) 6132 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 6133 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), 6134 {LHS.getOperand(0), LHS.getOperand(1), 6135 N->getOperand(2), N->getOperand(3), 6136 N->getOperand(4)}); 6137 // (select_cc X, 1, setne, trueV, falseV) -> 6138 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. 6139 // This can occur when legalizing some floating point comparisons. 6140 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 6141 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 6142 SDLoc DL(N); 6143 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 6144 SDValue TargetCC = 6145 DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT()); 6146 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 6147 return DAG.getNode( 6148 RISCVISD::SELECT_CC, DL, N->getValueType(0), 6149 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 6150 } 6151 6152 break; 6153 } 6154 case RISCVISD::BR_CC: { 6155 SDValue LHS = N->getOperand(1); 6156 SDValue RHS = N->getOperand(2); 6157 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get(); 6158 if (!ISD::isIntEqualitySetCC(CCVal)) 6159 break; 6160 6161 // Fold (br_cc (setlt X, Y), 0, ne, dest) -> 6162 // (br_cc X, Y, lt, dest) 6163 // Sometimes the setcc is introduced after br_cc has been formed. 6164 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 6165 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 6166 // If we're looking for eq 0 instead of ne 0, we need to invert the 6167 // condition. 6168 bool Invert = CCVal == ISD::SETEQ; 6169 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 6170 if (Invert) 6171 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 6172 6173 SDLoc DL(N); 6174 RHS = LHS.getOperand(1); 6175 LHS = LHS.getOperand(0); 6176 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 6177 6178 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 6179 N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal), 6180 N->getOperand(4)); 6181 } 6182 6183 // Fold (br_cc (xor X, Y), 0, eq/ne, dest) -> 6184 // (br_cc X, Y, eq/ne, trueV, falseV) 6185 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 6186 return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0), 6187 N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1), 6188 N->getOperand(3), N->getOperand(4)); 6189 6190 // (br_cc X, 1, setne, br_cc) -> 6191 // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1. 6192 // This can occur when legalizing some floating point comparisons. 6193 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 6194 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 6195 SDLoc DL(N); 6196 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 6197 SDValue TargetCC = DAG.getCondCode(CCVal); 6198 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 6199 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 6200 N->getOperand(0), LHS, RHS, TargetCC, 6201 N->getOperand(4)); 6202 } 6203 break; 6204 } 6205 case ISD::FCOPYSIGN: { 6206 EVT VT = N->getValueType(0); 6207 if (!VT.isVector()) 6208 break; 6209 // There is a form of VFSGNJ which injects the negated sign of its second 6210 // operand. Try and bubble any FNEG up after the extend/round to produce 6211 // this optimized pattern. Avoid modifying cases where FP_ROUND and 6212 // TRUNC=1. 6213 SDValue In2 = N->getOperand(1); 6214 // Avoid cases where the extend/round has multiple uses, as duplicating 6215 // those is typically more expensive than removing a fneg. 6216 if (!In2.hasOneUse()) 6217 break; 6218 if (In2.getOpcode() != ISD::FP_EXTEND && 6219 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) 6220 break; 6221 In2 = In2.getOperand(0); 6222 if (In2.getOpcode() != ISD::FNEG) 6223 break; 6224 SDLoc DL(N); 6225 SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); 6226 return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), 6227 DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); 6228 } 6229 case ISD::MGATHER: 6230 case ISD::MSCATTER: { 6231 if (!DCI.isBeforeLegalize()) 6232 break; 6233 MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N); 6234 SDValue Index = MGSN->getIndex(); 6235 EVT IndexVT = Index.getValueType(); 6236 MVT XLenVT = Subtarget.getXLenVT(); 6237 // RISCV indexed loads only support the "unsigned unscaled" addressing 6238 // mode, so anything else must be manually legalized. 6239 bool NeedsIdxLegalization = MGSN->isIndexScaled() || 6240 (MGSN->isIndexSigned() && 6241 IndexVT.getVectorElementType().bitsLT(XLenVT)); 6242 if (!NeedsIdxLegalization) 6243 break; 6244 6245 SDLoc DL(N); 6246 6247 // Any index legalization should first promote to XLenVT, so we don't lose 6248 // bits when scaling. This may create an illegal index type so we let 6249 // LLVM's legalization take care of the splitting. 6250 if (IndexVT.getVectorElementType().bitsLT(XLenVT)) { 6251 IndexVT = IndexVT.changeVectorElementType(XLenVT); 6252 Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND 6253 : ISD::ZERO_EXTEND, 6254 DL, IndexVT, Index); 6255 } 6256 6257 unsigned Scale = N->getConstantOperandVal(5); 6258 if (MGSN->isIndexScaled() && Scale != 1) { 6259 // Manually scale the indices by the element size. 6260 // TODO: Sanitize the scale operand here? 6261 assert(isPowerOf2_32(Scale) && "Expecting power-of-two types"); 6262 SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); 6263 Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); 6264 } 6265 6266 ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; 6267 if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) { 6268 return DAG.getMaskedGather( 6269 N->getVTList(), MGSN->getMemoryVT(), DL, 6270 {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(), 6271 MGSN->getBasePtr(), Index, MGN->getScale()}, 6272 MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); 6273 } 6274 const auto *MSN = cast<MaskedScatterSDNode>(N); 6275 return DAG.getMaskedScatter( 6276 N->getVTList(), MGSN->getMemoryVT(), DL, 6277 {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(), 6278 Index, MGSN->getScale()}, 6279 MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); 6280 } 6281 case RISCVISD::SRA_VL: 6282 case RISCVISD::SRL_VL: 6283 case RISCVISD::SHL_VL: { 6284 SDValue ShAmt = N->getOperand(1); 6285 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { 6286 // We don't need the upper 32 bits of a 64-bit element for a shift amount. 6287 SDLoc DL(N); 6288 SDValue VL = N->getOperand(3); 6289 EVT VT = N->getValueType(0); 6290 ShAmt = 6291 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL); 6292 return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt, 6293 N->getOperand(2), N->getOperand(3)); 6294 } 6295 break; 6296 } 6297 case ISD::SRA: 6298 case ISD::SRL: 6299 case ISD::SHL: { 6300 SDValue ShAmt = N->getOperand(1); 6301 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { 6302 // We don't need the upper 32 bits of a 64-bit element for a shift amount. 6303 SDLoc DL(N); 6304 EVT VT = N->getValueType(0); 6305 ShAmt = 6306 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0)); 6307 return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt); 6308 } 6309 break; 6310 } 6311 case RISCVISD::MUL_VL: { 6312 // Try to form VWMUL or VWMULU. 6313 // FIXME: Look for splat of extended scalar as well. 6314 // FIXME: Support VWMULSU. 6315 SDValue Op0 = N->getOperand(0); 6316 SDValue Op1 = N->getOperand(1); 6317 bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL; 6318 bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL; 6319 if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode()) 6320 return SDValue(); 6321 6322 // Make sure the extends have a single use. 6323 if (!Op0.hasOneUse() || !Op1.hasOneUse()) 6324 return SDValue(); 6325 6326 SDValue Mask = N->getOperand(2); 6327 SDValue VL = N->getOperand(3); 6328 if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask || 6329 Op0.getOperand(2) != VL || Op1.getOperand(2) != VL) 6330 return SDValue(); 6331 6332 Op0 = Op0.getOperand(0); 6333 Op1 = Op1.getOperand(0); 6334 6335 MVT VT = N->getSimpleValueType(0); 6336 MVT NarrowVT = 6337 MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2), 6338 VT.getVectorElementCount()); 6339 6340 SDLoc DL(N); 6341 6342 // Re-introduce narrower extends if needed. 6343 unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL; 6344 if (Op0.getValueType() != NarrowVT) 6345 Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL); 6346 if (Op1.getValueType() != NarrowVT) 6347 Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL); 6348 6349 unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL; 6350 return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL); 6351 } 6352 } 6353 6354 return SDValue(); 6355 } 6356 6357 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 6358 const SDNode *N, CombineLevel Level) const { 6359 // The following folds are only desirable if `(OP _, c1 << c2)` can be 6360 // materialised in fewer instructions than `(OP _, c1)`: 6361 // 6362 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 6363 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 6364 SDValue N0 = N->getOperand(0); 6365 EVT Ty = N0.getValueType(); 6366 if (Ty.isScalarInteger() && 6367 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 6368 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 6369 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 6370 if (C1 && C2) { 6371 const APInt &C1Int = C1->getAPIntValue(); 6372 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 6373 6374 // We can materialise `c1 << c2` into an add immediate, so it's "free", 6375 // and the combine should happen, to potentially allow further combines 6376 // later. 6377 if (ShiftedC1Int.getMinSignedBits() <= 64 && 6378 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 6379 return true; 6380 6381 // We can materialise `c1` in an add immediate, so it's "free", and the 6382 // combine should be prevented. 6383 if (C1Int.getMinSignedBits() <= 64 && 6384 isLegalAddImmediate(C1Int.getSExtValue())) 6385 return false; 6386 6387 // Neither constant will fit into an immediate, so find materialisation 6388 // costs. 6389 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 6390 Subtarget.getFeatureBits(), 6391 /*CompressionCost*/true); 6392 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 6393 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(), 6394 /*CompressionCost*/true); 6395 6396 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 6397 // combine should be prevented. 6398 if (C1Cost < ShiftedC1Cost) 6399 return false; 6400 } 6401 } 6402 return true; 6403 } 6404 6405 bool RISCVTargetLowering::targetShrinkDemandedConstant( 6406 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 6407 TargetLoweringOpt &TLO) const { 6408 // Delay this optimization as late as possible. 6409 if (!TLO.LegalOps) 6410 return false; 6411 6412 EVT VT = Op.getValueType(); 6413 if (VT.isVector()) 6414 return false; 6415 6416 // Only handle AND for now. 6417 if (Op.getOpcode() != ISD::AND) 6418 return false; 6419 6420 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 6421 if (!C) 6422 return false; 6423 6424 const APInt &Mask = C->getAPIntValue(); 6425 6426 // Clear all non-demanded bits initially. 6427 APInt ShrunkMask = Mask & DemandedBits; 6428 6429 // Try to make a smaller immediate by setting undemanded bits. 6430 6431 APInt ExpandedMask = Mask | ~DemandedBits; 6432 6433 auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool { 6434 return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask); 6435 }; 6436 auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool { 6437 if (NewMask == Mask) 6438 return true; 6439 SDLoc DL(Op); 6440 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 6441 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 6442 return TLO.CombineTo(Op, NewOp); 6443 }; 6444 6445 // If the shrunk mask fits in sign extended 12 bits, let the target 6446 // independent code apply it. 6447 if (ShrunkMask.isSignedIntN(12)) 6448 return false; 6449 6450 // Preserve (and X, 0xffff) when zext.h is supported. 6451 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 6452 APInt NewMask = APInt(Mask.getBitWidth(), 0xffff); 6453 if (IsLegalMask(NewMask)) 6454 return UseMask(NewMask); 6455 } 6456 6457 // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern. 6458 if (VT == MVT::i64) { 6459 APInt NewMask = APInt(64, 0xffffffff); 6460 if (IsLegalMask(NewMask)) 6461 return UseMask(NewMask); 6462 } 6463 6464 // For the remaining optimizations, we need to be able to make a negative 6465 // number through a combination of mask and undemanded bits. 6466 if (!ExpandedMask.isNegative()) 6467 return false; 6468 6469 // What is the fewest number of bits we need to represent the negative number. 6470 unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); 6471 6472 // Try to make a 12 bit negative immediate. If that fails try to make a 32 6473 // bit negative immediate unless the shrunk immediate already fits in 32 bits. 6474 APInt NewMask = ShrunkMask; 6475 if (MinSignedBits <= 12) 6476 NewMask.setBitsFrom(11); 6477 else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) 6478 NewMask.setBitsFrom(31); 6479 else 6480 return false; 6481 6482 // Sanity check that our new mask is a subset of the demanded mask. 6483 assert(IsLegalMask(NewMask)); 6484 return UseMask(NewMask); 6485 } 6486 6487 static void computeGREV(APInt &Src, unsigned ShAmt) { 6488 ShAmt &= Src.getBitWidth() - 1; 6489 uint64_t x = Src.getZExtValue(); 6490 if (ShAmt & 1) 6491 x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); 6492 if (ShAmt & 2) 6493 x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); 6494 if (ShAmt & 4) 6495 x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); 6496 if (ShAmt & 8) 6497 x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); 6498 if (ShAmt & 16) 6499 x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); 6500 if (ShAmt & 32) 6501 x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); 6502 Src = x; 6503 } 6504 6505 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 6506 KnownBits &Known, 6507 const APInt &DemandedElts, 6508 const SelectionDAG &DAG, 6509 unsigned Depth) const { 6510 unsigned BitWidth = Known.getBitWidth(); 6511 unsigned Opc = Op.getOpcode(); 6512 assert((Opc >= ISD::BUILTIN_OP_END || 6513 Opc == ISD::INTRINSIC_WO_CHAIN || 6514 Opc == ISD::INTRINSIC_W_CHAIN || 6515 Opc == ISD::INTRINSIC_VOID) && 6516 "Should use MaskedValueIsZero if you don't know whether Op" 6517 " is a target node!"); 6518 6519 Known.resetAll(); 6520 switch (Opc) { 6521 default: break; 6522 case RISCVISD::SELECT_CC: { 6523 Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1); 6524 // If we don't know any bits, early out. 6525 if (Known.isUnknown()) 6526 break; 6527 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1); 6528 6529 // Only known if known in both the LHS and RHS. 6530 Known = KnownBits::commonBits(Known, Known2); 6531 break; 6532 } 6533 case RISCVISD::REMUW: { 6534 KnownBits Known2; 6535 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 6536 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 6537 // We only care about the lower 32 bits. 6538 Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); 6539 // Restore the original width by sign extending. 6540 Known = Known.sext(BitWidth); 6541 break; 6542 } 6543 case RISCVISD::DIVUW: { 6544 KnownBits Known2; 6545 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 6546 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 6547 // We only care about the lower 32 bits. 6548 Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); 6549 // Restore the original width by sign extending. 6550 Known = Known.sext(BitWidth); 6551 break; 6552 } 6553 case RISCVISD::CTZW: { 6554 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 6555 unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros(); 6556 unsigned LowBits = Log2_32(PossibleTZ) + 1; 6557 Known.Zero.setBitsFrom(LowBits); 6558 break; 6559 } 6560 case RISCVISD::CLZW: { 6561 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 6562 unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros(); 6563 unsigned LowBits = Log2_32(PossibleLZ) + 1; 6564 Known.Zero.setBitsFrom(LowBits); 6565 break; 6566 } 6567 case RISCVISD::GREV: 6568 case RISCVISD::GREVW: { 6569 if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 6570 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 6571 if (Opc == RISCVISD::GREVW) 6572 Known = Known.trunc(32); 6573 unsigned ShAmt = C->getZExtValue(); 6574 computeGREV(Known.Zero, ShAmt); 6575 computeGREV(Known.One, ShAmt); 6576 if (Opc == RISCVISD::GREVW) 6577 Known = Known.sext(BitWidth); 6578 } 6579 break; 6580 } 6581 case RISCVISD::READ_VLENB: 6582 // We assume VLENB is at least 16 bytes. 6583 Known.Zero.setLowBits(4); 6584 // We assume VLENB is no more than 65536 / 8 bytes. 6585 Known.Zero.setBitsFrom(14); 6586 break; 6587 case ISD::INTRINSIC_W_CHAIN: { 6588 unsigned IntNo = Op.getConstantOperandVal(1); 6589 switch (IntNo) { 6590 default: 6591 // We can't do anything for most intrinsics. 6592 break; 6593 case Intrinsic::riscv_vsetvli: 6594 case Intrinsic::riscv_vsetvlimax: 6595 // Assume that VL output is positive and would fit in an int32_t. 6596 // TODO: VLEN might be capped at 16 bits in a future V spec update. 6597 if (BitWidth >= 32) 6598 Known.Zero.setBitsFrom(31); 6599 break; 6600 } 6601 break; 6602 } 6603 } 6604 } 6605 6606 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 6607 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 6608 unsigned Depth) const { 6609 switch (Op.getOpcode()) { 6610 default: 6611 break; 6612 case RISCVISD::SLLW: 6613 case RISCVISD::SRAW: 6614 case RISCVISD::SRLW: 6615 case RISCVISD::DIVW: 6616 case RISCVISD::DIVUW: 6617 case RISCVISD::REMUW: 6618 case RISCVISD::ROLW: 6619 case RISCVISD::RORW: 6620 case RISCVISD::GREVW: 6621 case RISCVISD::GORCW: 6622 case RISCVISD::FSLW: 6623 case RISCVISD::FSRW: 6624 case RISCVISD::SHFLW: 6625 case RISCVISD::UNSHFLW: 6626 case RISCVISD::BCOMPRESSW: 6627 case RISCVISD::BDECOMPRESSW: 6628 case RISCVISD::FCVT_W_RV64: 6629 case RISCVISD::FCVT_WU_RV64: 6630 // TODO: As the result is sign-extended, this is conservatively correct. A 6631 // more precise answer could be calculated for SRAW depending on known 6632 // bits in the shift amount. 6633 return 33; 6634 case RISCVISD::SHFL: 6635 case RISCVISD::UNSHFL: { 6636 // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word 6637 // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but 6638 // will stay within the upper 32 bits. If there were more than 32 sign bits 6639 // before there will be at least 33 sign bits after. 6640 if (Op.getValueType() == MVT::i64 && 6641 isa<ConstantSDNode>(Op.getOperand(1)) && 6642 (Op.getConstantOperandVal(1) & 0x10) == 0) { 6643 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 6644 if (Tmp > 32) 6645 return 33; 6646 } 6647 break; 6648 } 6649 case RISCVISD::VMV_X_S: 6650 // The number of sign bits of the scalar result is computed by obtaining the 6651 // element type of the input vector operand, subtracting its width from the 6652 // XLEN, and then adding one (sign bit within the element type). If the 6653 // element type is wider than XLen, the least-significant XLEN bits are 6654 // taken. 6655 if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen()) 6656 return 1; 6657 return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; 6658 } 6659 6660 return 1; 6661 } 6662 6663 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 6664 MachineBasicBlock *BB) { 6665 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 6666 6667 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 6668 // Should the count have wrapped while it was being read, we need to try 6669 // again. 6670 // ... 6671 // read: 6672 // rdcycleh x3 # load high word of cycle 6673 // rdcycle x2 # load low word of cycle 6674 // rdcycleh x4 # load high word of cycle 6675 // bne x3, x4, read # check if high word reads match, otherwise try again 6676 // ... 6677 6678 MachineFunction &MF = *BB->getParent(); 6679 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6680 MachineFunction::iterator It = ++BB->getIterator(); 6681 6682 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 6683 MF.insert(It, LoopMBB); 6684 6685 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 6686 MF.insert(It, DoneMBB); 6687 6688 // Transfer the remainder of BB and its successor edges to DoneMBB. 6689 DoneMBB->splice(DoneMBB->begin(), BB, 6690 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 6691 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 6692 6693 BB->addSuccessor(LoopMBB); 6694 6695 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 6696 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 6697 Register LoReg = MI.getOperand(0).getReg(); 6698 Register HiReg = MI.getOperand(1).getReg(); 6699 DebugLoc DL = MI.getDebugLoc(); 6700 6701 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 6702 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 6703 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 6704 .addReg(RISCV::X0); 6705 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 6706 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 6707 .addReg(RISCV::X0); 6708 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 6709 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 6710 .addReg(RISCV::X0); 6711 6712 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 6713 .addReg(HiReg) 6714 .addReg(ReadAgainReg) 6715 .addMBB(LoopMBB); 6716 6717 LoopMBB->addSuccessor(LoopMBB); 6718 LoopMBB->addSuccessor(DoneMBB); 6719 6720 MI.eraseFromParent(); 6721 6722 return DoneMBB; 6723 } 6724 6725 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 6726 MachineBasicBlock *BB) { 6727 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 6728 6729 MachineFunction &MF = *BB->getParent(); 6730 DebugLoc DL = MI.getDebugLoc(); 6731 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 6732 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 6733 Register LoReg = MI.getOperand(0).getReg(); 6734 Register HiReg = MI.getOperand(1).getReg(); 6735 Register SrcReg = MI.getOperand(2).getReg(); 6736 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 6737 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 6738 6739 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 6740 RI); 6741 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 6742 MachineMemOperand *MMOLo = 6743 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 6744 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 6745 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 6746 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 6747 .addFrameIndex(FI) 6748 .addImm(0) 6749 .addMemOperand(MMOLo); 6750 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 6751 .addFrameIndex(FI) 6752 .addImm(4) 6753 .addMemOperand(MMOHi); 6754 MI.eraseFromParent(); // The pseudo instruction is gone now. 6755 return BB; 6756 } 6757 6758 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 6759 MachineBasicBlock *BB) { 6760 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 6761 "Unexpected instruction"); 6762 6763 MachineFunction &MF = *BB->getParent(); 6764 DebugLoc DL = MI.getDebugLoc(); 6765 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 6766 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 6767 Register DstReg = MI.getOperand(0).getReg(); 6768 Register LoReg = MI.getOperand(1).getReg(); 6769 Register HiReg = MI.getOperand(2).getReg(); 6770 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 6771 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 6772 6773 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 6774 MachineMemOperand *MMOLo = 6775 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 6776 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 6777 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 6778 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 6779 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 6780 .addFrameIndex(FI) 6781 .addImm(0) 6782 .addMemOperand(MMOLo); 6783 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 6784 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 6785 .addFrameIndex(FI) 6786 .addImm(4) 6787 .addMemOperand(MMOHi); 6788 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 6789 MI.eraseFromParent(); // The pseudo instruction is gone now. 6790 return BB; 6791 } 6792 6793 static bool isSelectPseudo(MachineInstr &MI) { 6794 switch (MI.getOpcode()) { 6795 default: 6796 return false; 6797 case RISCV::Select_GPR_Using_CC_GPR: 6798 case RISCV::Select_FPR16_Using_CC_GPR: 6799 case RISCV::Select_FPR32_Using_CC_GPR: 6800 case RISCV::Select_FPR64_Using_CC_GPR: 6801 return true; 6802 } 6803 } 6804 6805 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 6806 MachineBasicBlock *BB) { 6807 // To "insert" Select_* instructions, we actually have to insert the triangle 6808 // control-flow pattern. The incoming instructions know the destination vreg 6809 // to set, the condition code register to branch on, the true/false values to 6810 // select between, and the condcode to use to select the appropriate branch. 6811 // 6812 // We produce the following control flow: 6813 // HeadMBB 6814 // | \ 6815 // | IfFalseMBB 6816 // | / 6817 // TailMBB 6818 // 6819 // When we find a sequence of selects we attempt to optimize their emission 6820 // by sharing the control flow. Currently we only handle cases where we have 6821 // multiple selects with the exact same condition (same LHS, RHS and CC). 6822 // The selects may be interleaved with other instructions if the other 6823 // instructions meet some requirements we deem safe: 6824 // - They are debug instructions. Otherwise, 6825 // - They do not have side-effects, do not access memory and their inputs do 6826 // not depend on the results of the select pseudo-instructions. 6827 // The TrueV/FalseV operands of the selects cannot depend on the result of 6828 // previous selects in the sequence. 6829 // These conditions could be further relaxed. See the X86 target for a 6830 // related approach and more information. 6831 Register LHS = MI.getOperand(1).getReg(); 6832 Register RHS = MI.getOperand(2).getReg(); 6833 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 6834 6835 SmallVector<MachineInstr *, 4> SelectDebugValues; 6836 SmallSet<Register, 4> SelectDests; 6837 SelectDests.insert(MI.getOperand(0).getReg()); 6838 6839 MachineInstr *LastSelectPseudo = &MI; 6840 6841 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 6842 SequenceMBBI != E; ++SequenceMBBI) { 6843 if (SequenceMBBI->isDebugInstr()) 6844 continue; 6845 else if (isSelectPseudo(*SequenceMBBI)) { 6846 if (SequenceMBBI->getOperand(1).getReg() != LHS || 6847 SequenceMBBI->getOperand(2).getReg() != RHS || 6848 SequenceMBBI->getOperand(3).getImm() != CC || 6849 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 6850 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 6851 break; 6852 LastSelectPseudo = &*SequenceMBBI; 6853 SequenceMBBI->collectDebugValues(SelectDebugValues); 6854 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 6855 } else { 6856 if (SequenceMBBI->hasUnmodeledSideEffects() || 6857 SequenceMBBI->mayLoadOrStore()) 6858 break; 6859 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 6860 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 6861 })) 6862 break; 6863 } 6864 } 6865 6866 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 6867 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6868 DebugLoc DL = MI.getDebugLoc(); 6869 MachineFunction::iterator I = ++BB->getIterator(); 6870 6871 MachineBasicBlock *HeadMBB = BB; 6872 MachineFunction *F = BB->getParent(); 6873 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 6874 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 6875 6876 F->insert(I, IfFalseMBB); 6877 F->insert(I, TailMBB); 6878 6879 // Transfer debug instructions associated with the selects to TailMBB. 6880 for (MachineInstr *DebugInstr : SelectDebugValues) { 6881 TailMBB->push_back(DebugInstr->removeFromParent()); 6882 } 6883 6884 // Move all instructions after the sequence to TailMBB. 6885 TailMBB->splice(TailMBB->end(), HeadMBB, 6886 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 6887 // Update machine-CFG edges by transferring all successors of the current 6888 // block to the new block which will contain the Phi nodes for the selects. 6889 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 6890 // Set the successors for HeadMBB. 6891 HeadMBB->addSuccessor(IfFalseMBB); 6892 HeadMBB->addSuccessor(TailMBB); 6893 6894 // Insert appropriate branch. 6895 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 6896 6897 BuildMI(HeadMBB, DL, TII.get(Opcode)) 6898 .addReg(LHS) 6899 .addReg(RHS) 6900 .addMBB(TailMBB); 6901 6902 // IfFalseMBB just falls through to TailMBB. 6903 IfFalseMBB->addSuccessor(TailMBB); 6904 6905 // Create PHIs for all of the select pseudo-instructions. 6906 auto SelectMBBI = MI.getIterator(); 6907 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 6908 auto InsertionPoint = TailMBB->begin(); 6909 while (SelectMBBI != SelectEnd) { 6910 auto Next = std::next(SelectMBBI); 6911 if (isSelectPseudo(*SelectMBBI)) { 6912 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 6913 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 6914 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 6915 .addReg(SelectMBBI->getOperand(4).getReg()) 6916 .addMBB(HeadMBB) 6917 .addReg(SelectMBBI->getOperand(5).getReg()) 6918 .addMBB(IfFalseMBB); 6919 SelectMBBI->eraseFromParent(); 6920 } 6921 SelectMBBI = Next; 6922 } 6923 6924 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 6925 return TailMBB; 6926 } 6927 6928 MachineBasicBlock * 6929 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 6930 MachineBasicBlock *BB) const { 6931 switch (MI.getOpcode()) { 6932 default: 6933 llvm_unreachable("Unexpected instr type to insert"); 6934 case RISCV::ReadCycleWide: 6935 assert(!Subtarget.is64Bit() && 6936 "ReadCycleWrite is only to be used on riscv32"); 6937 return emitReadCycleWidePseudo(MI, BB); 6938 case RISCV::Select_GPR_Using_CC_GPR: 6939 case RISCV::Select_FPR16_Using_CC_GPR: 6940 case RISCV::Select_FPR32_Using_CC_GPR: 6941 case RISCV::Select_FPR64_Using_CC_GPR: 6942 return emitSelectPseudo(MI, BB); 6943 case RISCV::BuildPairF64Pseudo: 6944 return emitBuildPairF64Pseudo(MI, BB); 6945 case RISCV::SplitF64Pseudo: 6946 return emitSplitF64Pseudo(MI, BB); 6947 } 6948 } 6949 6950 // Calling Convention Implementation. 6951 // The expectations for frontend ABI lowering vary from target to target. 6952 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 6953 // details, but this is a longer term goal. For now, we simply try to keep the 6954 // role of the frontend as simple and well-defined as possible. The rules can 6955 // be summarised as: 6956 // * Never split up large scalar arguments. We handle them here. 6957 // * If a hardfloat calling convention is being used, and the struct may be 6958 // passed in a pair of registers (fp+fp, int+fp), and both registers are 6959 // available, then pass as two separate arguments. If either the GPRs or FPRs 6960 // are exhausted, then pass according to the rule below. 6961 // * If a struct could never be passed in registers or directly in a stack 6962 // slot (as it is larger than 2*XLEN and the floating point rules don't 6963 // apply), then pass it using a pointer with the byval attribute. 6964 // * If a struct is less than 2*XLEN, then coerce to either a two-element 6965 // word-sized array or a 2*XLEN scalar (depending on alignment). 6966 // * The frontend can determine whether a struct is returned by reference or 6967 // not based on its size and fields. If it will be returned by reference, the 6968 // frontend must modify the prototype so a pointer with the sret annotation is 6969 // passed as the first argument. This is not necessary for large scalar 6970 // returns. 6971 // * Struct return values and varargs should be coerced to structs containing 6972 // register-size fields in the same situations they would be for fixed 6973 // arguments. 6974 6975 static const MCPhysReg ArgGPRs[] = { 6976 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 6977 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 6978 }; 6979 static const MCPhysReg ArgFPR16s[] = { 6980 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 6981 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 6982 }; 6983 static const MCPhysReg ArgFPR32s[] = { 6984 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 6985 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 6986 }; 6987 static const MCPhysReg ArgFPR64s[] = { 6988 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 6989 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 6990 }; 6991 // This is an interim calling convention and it may be changed in the future. 6992 static const MCPhysReg ArgVRs[] = { 6993 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 6994 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 6995 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 6996 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 6997 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 6998 RISCV::V20M2, RISCV::V22M2}; 6999 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 7000 RISCV::V20M4}; 7001 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 7002 7003 // Pass a 2*XLEN argument that has been split into two XLEN values through 7004 // registers or the stack as necessary. 7005 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 7006 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 7007 MVT ValVT2, MVT LocVT2, 7008 ISD::ArgFlagsTy ArgFlags2) { 7009 unsigned XLenInBytes = XLen / 8; 7010 if (Register Reg = State.AllocateReg(ArgGPRs)) { 7011 // At least one half can be passed via register. 7012 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 7013 VA1.getLocVT(), CCValAssign::Full)); 7014 } else { 7015 // Both halves must be passed on the stack, with proper alignment. 7016 Align StackAlign = 7017 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 7018 State.addLoc( 7019 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 7020 State.AllocateStack(XLenInBytes, StackAlign), 7021 VA1.getLocVT(), CCValAssign::Full)); 7022 State.addLoc(CCValAssign::getMem( 7023 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 7024 LocVT2, CCValAssign::Full)); 7025 return false; 7026 } 7027 7028 if (Register Reg = State.AllocateReg(ArgGPRs)) { 7029 // The second half can also be passed via register. 7030 State.addLoc( 7031 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 7032 } else { 7033 // The second half is passed via the stack, without additional alignment. 7034 State.addLoc(CCValAssign::getMem( 7035 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 7036 LocVT2, CCValAssign::Full)); 7037 } 7038 7039 return false; 7040 } 7041 7042 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, 7043 Optional<unsigned> FirstMaskArgument, 7044 CCState &State, const RISCVTargetLowering &TLI) { 7045 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 7046 if (RC == &RISCV::VRRegClass) { 7047 // Assign the first mask argument to V0. 7048 // This is an interim calling convention and it may be changed in the 7049 // future. 7050 if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue()) 7051 return State.AllocateReg(RISCV::V0); 7052 return State.AllocateReg(ArgVRs); 7053 } 7054 if (RC == &RISCV::VRM2RegClass) 7055 return State.AllocateReg(ArgVRM2s); 7056 if (RC == &RISCV::VRM4RegClass) 7057 return State.AllocateReg(ArgVRM4s); 7058 if (RC == &RISCV::VRM8RegClass) 7059 return State.AllocateReg(ArgVRM8s); 7060 llvm_unreachable("Unhandled register class for ValueType"); 7061 } 7062 7063 // Implements the RISC-V calling convention. Returns true upon failure. 7064 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 7065 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 7066 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 7067 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, 7068 Optional<unsigned> FirstMaskArgument) { 7069 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 7070 assert(XLen == 32 || XLen == 64); 7071 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 7072 7073 // Any return value split in to more than two values can't be returned 7074 // directly. Vectors are returned via the available vector registers. 7075 if (!LocVT.isVector() && IsRet && ValNo > 1) 7076 return true; 7077 7078 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 7079 // variadic argument, or if no F16/F32 argument registers are available. 7080 bool UseGPRForF16_F32 = true; 7081 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 7082 // variadic argument, or if no F64 argument registers are available. 7083 bool UseGPRForF64 = true; 7084 7085 switch (ABI) { 7086 default: 7087 llvm_unreachable("Unexpected ABI"); 7088 case RISCVABI::ABI_ILP32: 7089 case RISCVABI::ABI_LP64: 7090 break; 7091 case RISCVABI::ABI_ILP32F: 7092 case RISCVABI::ABI_LP64F: 7093 UseGPRForF16_F32 = !IsFixed; 7094 break; 7095 case RISCVABI::ABI_ILP32D: 7096 case RISCVABI::ABI_LP64D: 7097 UseGPRForF16_F32 = !IsFixed; 7098 UseGPRForF64 = !IsFixed; 7099 break; 7100 } 7101 7102 // FPR16, FPR32, and FPR64 alias each other. 7103 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 7104 UseGPRForF16_F32 = true; 7105 UseGPRForF64 = true; 7106 } 7107 7108 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 7109 // similar local variables rather than directly checking against the target 7110 // ABI. 7111 7112 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 7113 LocVT = XLenVT; 7114 LocInfo = CCValAssign::BCvt; 7115 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 7116 LocVT = MVT::i64; 7117 LocInfo = CCValAssign::BCvt; 7118 } 7119 7120 // If this is a variadic argument, the RISC-V calling convention requires 7121 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 7122 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 7123 // be used regardless of whether the original argument was split during 7124 // legalisation or not. The argument will not be passed by registers if the 7125 // original type is larger than 2*XLEN, so the register alignment rule does 7126 // not apply. 7127 unsigned TwoXLenInBytes = (2 * XLen) / 8; 7128 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 7129 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 7130 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 7131 // Skip 'odd' register if necessary. 7132 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 7133 State.AllocateReg(ArgGPRs); 7134 } 7135 7136 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 7137 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 7138 State.getPendingArgFlags(); 7139 7140 assert(PendingLocs.size() == PendingArgFlags.size() && 7141 "PendingLocs and PendingArgFlags out of sync"); 7142 7143 // Handle passing f64 on RV32D with a soft float ABI or when floating point 7144 // registers are exhausted. 7145 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 7146 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 7147 "Can't lower f64 if it is split"); 7148 // Depending on available argument GPRS, f64 may be passed in a pair of 7149 // GPRs, split between a GPR and the stack, or passed completely on the 7150 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 7151 // cases. 7152 Register Reg = State.AllocateReg(ArgGPRs); 7153 LocVT = MVT::i32; 7154 if (!Reg) { 7155 unsigned StackOffset = State.AllocateStack(8, Align(8)); 7156 State.addLoc( 7157 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 7158 return false; 7159 } 7160 if (!State.AllocateReg(ArgGPRs)) 7161 State.AllocateStack(4, Align(4)); 7162 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7163 return false; 7164 } 7165 7166 // Fixed-length vectors are located in the corresponding scalable-vector 7167 // container types. 7168 if (ValVT.isFixedLengthVector()) 7169 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 7170 7171 // Split arguments might be passed indirectly, so keep track of the pending 7172 // values. Split vectors are passed via a mix of registers and indirectly, so 7173 // treat them as we would any other argument. 7174 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 7175 LocVT = XLenVT; 7176 LocInfo = CCValAssign::Indirect; 7177 PendingLocs.push_back( 7178 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 7179 PendingArgFlags.push_back(ArgFlags); 7180 if (!ArgFlags.isSplitEnd()) { 7181 return false; 7182 } 7183 } 7184 7185 // If the split argument only had two elements, it should be passed directly 7186 // in registers or on the stack. 7187 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() && 7188 PendingLocs.size() <= 2) { 7189 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 7190 // Apply the normal calling convention rules to the first half of the 7191 // split argument. 7192 CCValAssign VA = PendingLocs[0]; 7193 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 7194 PendingLocs.clear(); 7195 PendingArgFlags.clear(); 7196 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 7197 ArgFlags); 7198 } 7199 7200 // Allocate to a register if possible, or else a stack slot. 7201 Register Reg; 7202 unsigned StoreSizeBytes = XLen / 8; 7203 Align StackAlign = Align(XLen / 8); 7204 7205 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 7206 Reg = State.AllocateReg(ArgFPR16s); 7207 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 7208 Reg = State.AllocateReg(ArgFPR32s); 7209 else if (ValVT == MVT::f64 && !UseGPRForF64) 7210 Reg = State.AllocateReg(ArgFPR64s); 7211 else if (ValVT.isVector()) { 7212 Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI); 7213 if (!Reg) { 7214 // For return values, the vector must be passed fully via registers or 7215 // via the stack. 7216 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 7217 // but we're using all of them. 7218 if (IsRet) 7219 return true; 7220 // Try using a GPR to pass the address 7221 if ((Reg = State.AllocateReg(ArgGPRs))) { 7222 LocVT = XLenVT; 7223 LocInfo = CCValAssign::Indirect; 7224 } else if (ValVT.isScalableVector()) { 7225 report_fatal_error("Unable to pass scalable vector types on the stack"); 7226 } else { 7227 // Pass fixed-length vectors on the stack. 7228 LocVT = ValVT; 7229 StoreSizeBytes = ValVT.getStoreSize(); 7230 // Align vectors to their element sizes, being careful for vXi1 7231 // vectors. 7232 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 7233 } 7234 } 7235 } else { 7236 Reg = State.AllocateReg(ArgGPRs); 7237 } 7238 7239 unsigned StackOffset = 7240 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign); 7241 7242 // If we reach this point and PendingLocs is non-empty, we must be at the 7243 // end of a split argument that must be passed indirectly. 7244 if (!PendingLocs.empty()) { 7245 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 7246 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 7247 7248 for (auto &It : PendingLocs) { 7249 if (Reg) 7250 It.convertToReg(Reg); 7251 else 7252 It.convertToMem(StackOffset); 7253 State.addLoc(It); 7254 } 7255 PendingLocs.clear(); 7256 PendingArgFlags.clear(); 7257 return false; 7258 } 7259 7260 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 7261 (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) && 7262 "Expected an XLenVT or vector types at this stage"); 7263 7264 if (Reg) { 7265 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7266 return false; 7267 } 7268 7269 // When a floating-point value is passed on the stack, no bit-conversion is 7270 // needed. 7271 if (ValVT.isFloatingPoint()) { 7272 LocVT = ValVT; 7273 LocInfo = CCValAssign::Full; 7274 } 7275 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 7276 return false; 7277 } 7278 7279 template <typename ArgTy> 7280 static Optional<unsigned> preAssignMask(const ArgTy &Args) { 7281 for (const auto &ArgIdx : enumerate(Args)) { 7282 MVT ArgVT = ArgIdx.value().VT; 7283 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) 7284 return ArgIdx.index(); 7285 } 7286 return None; 7287 } 7288 7289 void RISCVTargetLowering::analyzeInputArgs( 7290 MachineFunction &MF, CCState &CCInfo, 7291 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet, 7292 RISCVCCAssignFn Fn) const { 7293 unsigned NumArgs = Ins.size(); 7294 FunctionType *FType = MF.getFunction().getFunctionType(); 7295 7296 Optional<unsigned> FirstMaskArgument; 7297 if (Subtarget.hasStdExtV()) 7298 FirstMaskArgument = preAssignMask(Ins); 7299 7300 for (unsigned i = 0; i != NumArgs; ++i) { 7301 MVT ArgVT = Ins[i].VT; 7302 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 7303 7304 Type *ArgTy = nullptr; 7305 if (IsRet) 7306 ArgTy = FType->getReturnType(); 7307 else if (Ins[i].isOrigArg()) 7308 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 7309 7310 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 7311 if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 7312 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, 7313 FirstMaskArgument)) { 7314 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 7315 << EVT(ArgVT).getEVTString() << '\n'); 7316 llvm_unreachable(nullptr); 7317 } 7318 } 7319 } 7320 7321 void RISCVTargetLowering::analyzeOutputArgs( 7322 MachineFunction &MF, CCState &CCInfo, 7323 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 7324 CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const { 7325 unsigned NumArgs = Outs.size(); 7326 7327 Optional<unsigned> FirstMaskArgument; 7328 if (Subtarget.hasStdExtV()) 7329 FirstMaskArgument = preAssignMask(Outs); 7330 7331 for (unsigned i = 0; i != NumArgs; i++) { 7332 MVT ArgVT = Outs[i].VT; 7333 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 7334 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 7335 7336 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 7337 if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 7338 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, 7339 FirstMaskArgument)) { 7340 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 7341 << EVT(ArgVT).getEVTString() << "\n"); 7342 llvm_unreachable(nullptr); 7343 } 7344 } 7345 } 7346 7347 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 7348 // values. 7349 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 7350 const CCValAssign &VA, const SDLoc &DL, 7351 const RISCVSubtarget &Subtarget) { 7352 switch (VA.getLocInfo()) { 7353 default: 7354 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 7355 case CCValAssign::Full: 7356 if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector()) 7357 Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget); 7358 break; 7359 case CCValAssign::BCvt: 7360 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 7361 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 7362 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 7363 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 7364 else 7365 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 7366 break; 7367 } 7368 return Val; 7369 } 7370 7371 // The caller is responsible for loading the full value if the argument is 7372 // passed with CCValAssign::Indirect. 7373 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 7374 const CCValAssign &VA, const SDLoc &DL, 7375 const RISCVTargetLowering &TLI) { 7376 MachineFunction &MF = DAG.getMachineFunction(); 7377 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7378 EVT LocVT = VA.getLocVT(); 7379 SDValue Val; 7380 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); 7381 Register VReg = RegInfo.createVirtualRegister(RC); 7382 RegInfo.addLiveIn(VA.getLocReg(), VReg); 7383 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 7384 7385 if (VA.getLocInfo() == CCValAssign::Indirect) 7386 return Val; 7387 7388 return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget()); 7389 } 7390 7391 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 7392 const CCValAssign &VA, const SDLoc &DL, 7393 const RISCVSubtarget &Subtarget) { 7394 EVT LocVT = VA.getLocVT(); 7395 7396 switch (VA.getLocInfo()) { 7397 default: 7398 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 7399 case CCValAssign::Full: 7400 if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector()) 7401 Val = convertToScalableVector(LocVT, Val, DAG, Subtarget); 7402 break; 7403 case CCValAssign::BCvt: 7404 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 7405 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 7406 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 7407 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 7408 else 7409 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 7410 break; 7411 } 7412 return Val; 7413 } 7414 7415 // The caller is responsible for loading the full value if the argument is 7416 // passed with CCValAssign::Indirect. 7417 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 7418 const CCValAssign &VA, const SDLoc &DL) { 7419 MachineFunction &MF = DAG.getMachineFunction(); 7420 MachineFrameInfo &MFI = MF.getFrameInfo(); 7421 EVT LocVT = VA.getLocVT(); 7422 EVT ValVT = VA.getValVT(); 7423 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 7424 int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(), 7425 /*Immutable=*/true); 7426 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 7427 SDValue Val; 7428 7429 ISD::LoadExtType ExtType; 7430 switch (VA.getLocInfo()) { 7431 default: 7432 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 7433 case CCValAssign::Full: 7434 case CCValAssign::Indirect: 7435 case CCValAssign::BCvt: 7436 ExtType = ISD::NON_EXTLOAD; 7437 break; 7438 } 7439 Val = DAG.getExtLoad( 7440 ExtType, DL, LocVT, Chain, FIN, 7441 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 7442 return Val; 7443 } 7444 7445 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 7446 const CCValAssign &VA, const SDLoc &DL) { 7447 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 7448 "Unexpected VA"); 7449 MachineFunction &MF = DAG.getMachineFunction(); 7450 MachineFrameInfo &MFI = MF.getFrameInfo(); 7451 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7452 7453 if (VA.isMemLoc()) { 7454 // f64 is passed on the stack. 7455 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 7456 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 7457 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 7458 MachinePointerInfo::getFixedStack(MF, FI)); 7459 } 7460 7461 assert(VA.isRegLoc() && "Expected register VA assignment"); 7462 7463 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 7464 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 7465 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 7466 SDValue Hi; 7467 if (VA.getLocReg() == RISCV::X17) { 7468 // Second half of f64 is passed on the stack. 7469 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 7470 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 7471 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 7472 MachinePointerInfo::getFixedStack(MF, FI)); 7473 } else { 7474 // Second half of f64 is passed in another GPR. 7475 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 7476 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 7477 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 7478 } 7479 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 7480 } 7481 7482 // FastCC has less than 1% performance improvement for some particular 7483 // benchmark. But theoretically, it may has benenfit for some cases. 7484 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, 7485 unsigned ValNo, MVT ValVT, MVT LocVT, 7486 CCValAssign::LocInfo LocInfo, 7487 ISD::ArgFlagsTy ArgFlags, CCState &State, 7488 bool IsFixed, bool IsRet, Type *OrigTy, 7489 const RISCVTargetLowering &TLI, 7490 Optional<unsigned> FirstMaskArgument) { 7491 7492 // X5 and X6 might be used for save-restore libcall. 7493 static const MCPhysReg GPRList[] = { 7494 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 7495 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 7496 RISCV::X29, RISCV::X30, RISCV::X31}; 7497 7498 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 7499 if (unsigned Reg = State.AllocateReg(GPRList)) { 7500 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7501 return false; 7502 } 7503 } 7504 7505 if (LocVT == MVT::f16) { 7506 static const MCPhysReg FPR16List[] = { 7507 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 7508 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 7509 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 7510 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 7511 if (unsigned Reg = State.AllocateReg(FPR16List)) { 7512 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7513 return false; 7514 } 7515 } 7516 7517 if (LocVT == MVT::f32) { 7518 static const MCPhysReg FPR32List[] = { 7519 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 7520 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 7521 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 7522 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 7523 if (unsigned Reg = State.AllocateReg(FPR32List)) { 7524 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7525 return false; 7526 } 7527 } 7528 7529 if (LocVT == MVT::f64) { 7530 static const MCPhysReg FPR64List[] = { 7531 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 7532 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 7533 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 7534 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 7535 if (unsigned Reg = State.AllocateReg(FPR64List)) { 7536 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7537 return false; 7538 } 7539 } 7540 7541 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 7542 unsigned Offset4 = State.AllocateStack(4, Align(4)); 7543 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 7544 return false; 7545 } 7546 7547 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 7548 unsigned Offset5 = State.AllocateStack(8, Align(8)); 7549 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 7550 return false; 7551 } 7552 7553 if (LocVT.isVector()) { 7554 if (unsigned Reg = 7555 allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) { 7556 // Fixed-length vectors are located in the corresponding scalable-vector 7557 // container types. 7558 if (ValVT.isFixedLengthVector()) 7559 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 7560 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7561 } else { 7562 // Try and pass the address via a "fast" GPR. 7563 if (unsigned GPRReg = State.AllocateReg(GPRList)) { 7564 LocInfo = CCValAssign::Indirect; 7565 LocVT = TLI.getSubtarget().getXLenVT(); 7566 State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo)); 7567 } else if (ValVT.isFixedLengthVector()) { 7568 auto StackAlign = 7569 MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 7570 unsigned StackOffset = 7571 State.AllocateStack(ValVT.getStoreSize(), StackAlign); 7572 State.addLoc( 7573 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 7574 } else { 7575 // Can't pass scalable vectors on the stack. 7576 return true; 7577 } 7578 } 7579 7580 return false; 7581 } 7582 7583 return true; // CC didn't match. 7584 } 7585 7586 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 7587 CCValAssign::LocInfo LocInfo, 7588 ISD::ArgFlagsTy ArgFlags, CCState &State) { 7589 7590 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 7591 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 7592 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 7593 static const MCPhysReg GPRList[] = { 7594 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 7595 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 7596 if (unsigned Reg = State.AllocateReg(GPRList)) { 7597 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7598 return false; 7599 } 7600 } 7601 7602 if (LocVT == MVT::f32) { 7603 // Pass in STG registers: F1, ..., F6 7604 // fs0 ... fs5 7605 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 7606 RISCV::F18_F, RISCV::F19_F, 7607 RISCV::F20_F, RISCV::F21_F}; 7608 if (unsigned Reg = State.AllocateReg(FPR32List)) { 7609 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7610 return false; 7611 } 7612 } 7613 7614 if (LocVT == MVT::f64) { 7615 // Pass in STG registers: D1, ..., D6 7616 // fs6 ... fs11 7617 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 7618 RISCV::F24_D, RISCV::F25_D, 7619 RISCV::F26_D, RISCV::F27_D}; 7620 if (unsigned Reg = State.AllocateReg(FPR64List)) { 7621 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 7622 return false; 7623 } 7624 } 7625 7626 report_fatal_error("No registers left in GHC calling convention"); 7627 return true; 7628 } 7629 7630 // Transform physical registers into virtual registers. 7631 SDValue RISCVTargetLowering::LowerFormalArguments( 7632 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 7633 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 7634 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 7635 7636 MachineFunction &MF = DAG.getMachineFunction(); 7637 7638 switch (CallConv) { 7639 default: 7640 report_fatal_error("Unsupported calling convention"); 7641 case CallingConv::C: 7642 case CallingConv::Fast: 7643 break; 7644 case CallingConv::GHC: 7645 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 7646 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 7647 report_fatal_error( 7648 "GHC calling convention requires the F and D instruction set extensions"); 7649 } 7650 7651 const Function &Func = MF.getFunction(); 7652 if (Func.hasFnAttribute("interrupt")) { 7653 if (!Func.arg_empty()) 7654 report_fatal_error( 7655 "Functions with the interrupt attribute cannot have arguments!"); 7656 7657 StringRef Kind = 7658 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 7659 7660 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 7661 report_fatal_error( 7662 "Function interrupt attribute argument not supported!"); 7663 } 7664 7665 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7666 MVT XLenVT = Subtarget.getXLenVT(); 7667 unsigned XLenInBytes = Subtarget.getXLen() / 8; 7668 // Used with vargs to acumulate store chains. 7669 std::vector<SDValue> OutChains; 7670 7671 // Assign locations to all of the incoming arguments. 7672 SmallVector<CCValAssign, 16> ArgLocs; 7673 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 7674 7675 if (CallConv == CallingConv::GHC) 7676 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 7677 else 7678 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false, 7679 CallConv == CallingConv::Fast ? CC_RISCV_FastCC 7680 : CC_RISCV); 7681 7682 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 7683 CCValAssign &VA = ArgLocs[i]; 7684 SDValue ArgValue; 7685 // Passing f64 on RV32D with a soft float ABI must be handled as a special 7686 // case. 7687 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 7688 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 7689 else if (VA.isRegLoc()) 7690 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); 7691 else 7692 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 7693 7694 if (VA.getLocInfo() == CCValAssign::Indirect) { 7695 // If the original argument was split and passed by reference (e.g. i128 7696 // on RV32), we need to load all parts of it here (using the same 7697 // address). Vectors may be partly split to registers and partly to the 7698 // stack, in which case the base address is partly offset and subsequent 7699 // stores are relative to that. 7700 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 7701 MachinePointerInfo())); 7702 unsigned ArgIndex = Ins[i].OrigArgIndex; 7703 unsigned ArgPartOffset = Ins[i].PartOffset; 7704 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 7705 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 7706 CCValAssign &PartVA = ArgLocs[i + 1]; 7707 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset; 7708 SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); 7709 if (PartVA.getValVT().isScalableVector()) 7710 Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); 7711 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset); 7712 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 7713 MachinePointerInfo())); 7714 ++i; 7715 } 7716 continue; 7717 } 7718 InVals.push_back(ArgValue); 7719 } 7720 7721 if (IsVarArg) { 7722 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 7723 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 7724 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 7725 MachineFrameInfo &MFI = MF.getFrameInfo(); 7726 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 7727 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 7728 7729 // Offset of the first variable argument from stack pointer, and size of 7730 // the vararg save area. For now, the varargs save area is either zero or 7731 // large enough to hold a0-a7. 7732 int VaArgOffset, VarArgsSaveSize; 7733 7734 // If all registers are allocated, then all varargs must be passed on the 7735 // stack and we don't need to save any argregs. 7736 if (ArgRegs.size() == Idx) { 7737 VaArgOffset = CCInfo.getNextStackOffset(); 7738 VarArgsSaveSize = 0; 7739 } else { 7740 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 7741 VaArgOffset = -VarArgsSaveSize; 7742 } 7743 7744 // Record the frame index of the first variable argument 7745 // which is a value necessary to VASTART. 7746 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 7747 RVFI->setVarArgsFrameIndex(FI); 7748 7749 // If saving an odd number of registers then create an extra stack slot to 7750 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 7751 // offsets to even-numbered registered remain 2*XLEN-aligned. 7752 if (Idx % 2) { 7753 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 7754 VarArgsSaveSize += XLenInBytes; 7755 } 7756 7757 // Copy the integer registers that may have been used for passing varargs 7758 // to the vararg save area. 7759 for (unsigned I = Idx; I < ArgRegs.size(); 7760 ++I, VaArgOffset += XLenInBytes) { 7761 const Register Reg = RegInfo.createVirtualRegister(RC); 7762 RegInfo.addLiveIn(ArgRegs[I], Reg); 7763 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 7764 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 7765 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 7766 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 7767 MachinePointerInfo::getFixedStack(MF, FI)); 7768 cast<StoreSDNode>(Store.getNode()) 7769 ->getMemOperand() 7770 ->setValue((Value *)nullptr); 7771 OutChains.push_back(Store); 7772 } 7773 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 7774 } 7775 7776 // All stores are grouped in one node to allow the matching between 7777 // the size of Ins and InVals. This only happens for vararg functions. 7778 if (!OutChains.empty()) { 7779 OutChains.push_back(Chain); 7780 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 7781 } 7782 7783 return Chain; 7784 } 7785 7786 /// isEligibleForTailCallOptimization - Check whether the call is eligible 7787 /// for tail call optimization. 7788 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 7789 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 7790 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 7791 const SmallVector<CCValAssign, 16> &ArgLocs) const { 7792 7793 auto &Callee = CLI.Callee; 7794 auto CalleeCC = CLI.CallConv; 7795 auto &Outs = CLI.Outs; 7796 auto &Caller = MF.getFunction(); 7797 auto CallerCC = Caller.getCallingConv(); 7798 7799 // Exception-handling functions need a special set of instructions to 7800 // indicate a return to the hardware. Tail-calling another function would 7801 // probably break this. 7802 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 7803 // should be expanded as new function attributes are introduced. 7804 if (Caller.hasFnAttribute("interrupt")) 7805 return false; 7806 7807 // Do not tail call opt if the stack is used to pass parameters. 7808 if (CCInfo.getNextStackOffset() != 0) 7809 return false; 7810 7811 // Do not tail call opt if any parameters need to be passed indirectly. 7812 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 7813 // passed indirectly. So the address of the value will be passed in a 7814 // register, or if not available, then the address is put on the stack. In 7815 // order to pass indirectly, space on the stack often needs to be allocated 7816 // in order to store the value. In this case the CCInfo.getNextStackOffset() 7817 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 7818 // are passed CCValAssign::Indirect. 7819 for (auto &VA : ArgLocs) 7820 if (VA.getLocInfo() == CCValAssign::Indirect) 7821 return false; 7822 7823 // Do not tail call opt if either caller or callee uses struct return 7824 // semantics. 7825 auto IsCallerStructRet = Caller.hasStructRetAttr(); 7826 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 7827 if (IsCallerStructRet || IsCalleeStructRet) 7828 return false; 7829 7830 // Externally-defined functions with weak linkage should not be 7831 // tail-called. The behaviour of branch instructions in this situation (as 7832 // used for tail calls) is implementation-defined, so we cannot rely on the 7833 // linker replacing the tail call with a return. 7834 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 7835 const GlobalValue *GV = G->getGlobal(); 7836 if (GV->hasExternalWeakLinkage()) 7837 return false; 7838 } 7839 7840 // The callee has to preserve all registers the caller needs to preserve. 7841 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 7842 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 7843 if (CalleeCC != CallerCC) { 7844 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 7845 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 7846 return false; 7847 } 7848 7849 // Byval parameters hand the function a pointer directly into the stack area 7850 // we want to reuse during a tail call. Working around this *is* possible 7851 // but less efficient and uglier in LowerCall. 7852 for (auto &Arg : Outs) 7853 if (Arg.Flags.isByVal()) 7854 return false; 7855 7856 return true; 7857 } 7858 7859 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) { 7860 return DAG.getDataLayout().getPrefTypeAlign( 7861 VT.getTypeForEVT(*DAG.getContext())); 7862 } 7863 7864 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 7865 // and output parameter nodes. 7866 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 7867 SmallVectorImpl<SDValue> &InVals) const { 7868 SelectionDAG &DAG = CLI.DAG; 7869 SDLoc &DL = CLI.DL; 7870 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 7871 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 7872 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 7873 SDValue Chain = CLI.Chain; 7874 SDValue Callee = CLI.Callee; 7875 bool &IsTailCall = CLI.IsTailCall; 7876 CallingConv::ID CallConv = CLI.CallConv; 7877 bool IsVarArg = CLI.IsVarArg; 7878 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7879 MVT XLenVT = Subtarget.getXLenVT(); 7880 7881 MachineFunction &MF = DAG.getMachineFunction(); 7882 7883 // Analyze the operands of the call, assigning locations to each operand. 7884 SmallVector<CCValAssign, 16> ArgLocs; 7885 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 7886 7887 if (CallConv == CallingConv::GHC) 7888 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 7889 else 7890 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI, 7891 CallConv == CallingConv::Fast ? CC_RISCV_FastCC 7892 : CC_RISCV); 7893 7894 // Check if it's really possible to do a tail call. 7895 if (IsTailCall) 7896 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 7897 7898 if (IsTailCall) 7899 ++NumTailCalls; 7900 else if (CLI.CB && CLI.CB->isMustTailCall()) 7901 report_fatal_error("failed to perform tail call elimination on a call " 7902 "site marked musttail"); 7903 7904 // Get a count of how many bytes are to be pushed on the stack. 7905 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 7906 7907 // Create local copies for byval args 7908 SmallVector<SDValue, 8> ByValArgs; 7909 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 7910 ISD::ArgFlagsTy Flags = Outs[i].Flags; 7911 if (!Flags.isByVal()) 7912 continue; 7913 7914 SDValue Arg = OutVals[i]; 7915 unsigned Size = Flags.getByValSize(); 7916 Align Alignment = Flags.getNonZeroByValAlign(); 7917 7918 int FI = 7919 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 7920 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 7921 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 7922 7923 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 7924 /*IsVolatile=*/false, 7925 /*AlwaysInline=*/false, IsTailCall, 7926 MachinePointerInfo(), MachinePointerInfo()); 7927 ByValArgs.push_back(FIPtr); 7928 } 7929 7930 if (!IsTailCall) 7931 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 7932 7933 // Copy argument values to their designated locations. 7934 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 7935 SmallVector<SDValue, 8> MemOpChains; 7936 SDValue StackPtr; 7937 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 7938 CCValAssign &VA = ArgLocs[i]; 7939 SDValue ArgValue = OutVals[i]; 7940 ISD::ArgFlagsTy Flags = Outs[i].Flags; 7941 7942 // Handle passing f64 on RV32D with a soft float ABI as a special case. 7943 bool IsF64OnRV32DSoftABI = 7944 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 7945 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 7946 SDValue SplitF64 = DAG.getNode( 7947 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 7948 SDValue Lo = SplitF64.getValue(0); 7949 SDValue Hi = SplitF64.getValue(1); 7950 7951 Register RegLo = VA.getLocReg(); 7952 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 7953 7954 if (RegLo == RISCV::X17) { 7955 // Second half of f64 is passed on the stack. 7956 // Work out the address of the stack slot. 7957 if (!StackPtr.getNode()) 7958 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 7959 // Emit the store. 7960 MemOpChains.push_back( 7961 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 7962 } else { 7963 // Second half of f64 is passed in another GPR. 7964 assert(RegLo < RISCV::X31 && "Invalid register pair"); 7965 Register RegHigh = RegLo + 1; 7966 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 7967 } 7968 continue; 7969 } 7970 7971 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 7972 // as any other MemLoc. 7973 7974 // Promote the value if needed. 7975 // For now, only handle fully promoted and indirect arguments. 7976 if (VA.getLocInfo() == CCValAssign::Indirect) { 7977 // Store the argument in a stack slot and pass its address. 7978 Align StackAlign = 7979 std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG), 7980 getPrefTypeAlign(ArgValue.getValueType(), DAG)); 7981 TypeSize StoredSize = ArgValue.getValueType().getStoreSize(); 7982 // If the original argument was split (e.g. i128), we need 7983 // to store the required parts of it here (and pass just one address). 7984 // Vectors may be partly split to registers and partly to the stack, in 7985 // which case the base address is partly offset and subsequent stores are 7986 // relative to that. 7987 unsigned ArgIndex = Outs[i].OrigArgIndex; 7988 unsigned ArgPartOffset = Outs[i].PartOffset; 7989 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 7990 // Calculate the total size to store. We don't have access to what we're 7991 // actually storing other than performing the loop and collecting the 7992 // info. 7993 SmallVector<std::pair<SDValue, SDValue>> Parts; 7994 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 7995 SDValue PartValue = OutVals[i + 1]; 7996 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset; 7997 SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); 7998 EVT PartVT = PartValue.getValueType(); 7999 if (PartVT.isScalableVector()) 8000 Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); 8001 StoredSize += PartVT.getStoreSize(); 8002 StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG)); 8003 Parts.push_back(std::make_pair(PartValue, Offset)); 8004 ++i; 8005 } 8006 SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign); 8007 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 8008 MemOpChains.push_back( 8009 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 8010 MachinePointerInfo::getFixedStack(MF, FI))); 8011 for (const auto &Part : Parts) { 8012 SDValue PartValue = Part.first; 8013 SDValue PartOffset = Part.second; 8014 SDValue Address = 8015 DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset); 8016 MemOpChains.push_back( 8017 DAG.getStore(Chain, DL, PartValue, Address, 8018 MachinePointerInfo::getFixedStack(MF, FI))); 8019 } 8020 ArgValue = SpillSlot; 8021 } else { 8022 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget); 8023 } 8024 8025 // Use local copy if it is a byval arg. 8026 if (Flags.isByVal()) 8027 ArgValue = ByValArgs[j++]; 8028 8029 if (VA.isRegLoc()) { 8030 // Queue up the argument copies and emit them at the end. 8031 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 8032 } else { 8033 assert(VA.isMemLoc() && "Argument not register or memory"); 8034 assert(!IsTailCall && "Tail call not allowed if stack is used " 8035 "for passing parameters"); 8036 8037 // Work out the address of the stack slot. 8038 if (!StackPtr.getNode()) 8039 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 8040 SDValue Address = 8041 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 8042 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 8043 8044 // Emit the store. 8045 MemOpChains.push_back( 8046 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 8047 } 8048 } 8049 8050 // Join the stores, which are independent of one another. 8051 if (!MemOpChains.empty()) 8052 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 8053 8054 SDValue Glue; 8055 8056 // Build a sequence of copy-to-reg nodes, chained and glued together. 8057 for (auto &Reg : RegsToPass) { 8058 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 8059 Glue = Chain.getValue(1); 8060 } 8061 8062 // Validate that none of the argument registers have been marked as 8063 // reserved, if so report an error. Do the same for the return address if this 8064 // is not a tailcall. 8065 validateCCReservedRegs(RegsToPass, MF); 8066 if (!IsTailCall && 8067 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 8068 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 8069 MF.getFunction(), 8070 "Return address register required, but has been reserved."}); 8071 8072 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 8073 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 8074 // split it and then direct call can be matched by PseudoCALL. 8075 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 8076 const GlobalValue *GV = S->getGlobal(); 8077 8078 unsigned OpFlags = RISCVII::MO_CALL; 8079 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 8080 OpFlags = RISCVII::MO_PLT; 8081 8082 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 8083 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 8084 unsigned OpFlags = RISCVII::MO_CALL; 8085 8086 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 8087 nullptr)) 8088 OpFlags = RISCVII::MO_PLT; 8089 8090 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 8091 } 8092 8093 // The first call operand is the chain and the second is the target address. 8094 SmallVector<SDValue, 8> Ops; 8095 Ops.push_back(Chain); 8096 Ops.push_back(Callee); 8097 8098 // Add argument registers to the end of the list so that they are 8099 // known live into the call. 8100 for (auto &Reg : RegsToPass) 8101 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 8102 8103 if (!IsTailCall) { 8104 // Add a register mask operand representing the call-preserved registers. 8105 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 8106 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 8107 assert(Mask && "Missing call preserved mask for calling convention"); 8108 Ops.push_back(DAG.getRegisterMask(Mask)); 8109 } 8110 8111 // Glue the call to the argument copies, if any. 8112 if (Glue.getNode()) 8113 Ops.push_back(Glue); 8114 8115 // Emit the call. 8116 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 8117 8118 if (IsTailCall) { 8119 MF.getFrameInfo().setHasTailCall(); 8120 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 8121 } 8122 8123 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 8124 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 8125 Glue = Chain.getValue(1); 8126 8127 // Mark the end of the call, which is glued to the call itself. 8128 Chain = DAG.getCALLSEQ_END(Chain, 8129 DAG.getConstant(NumBytes, DL, PtrVT, true), 8130 DAG.getConstant(0, DL, PtrVT, true), 8131 Glue, DL); 8132 Glue = Chain.getValue(1); 8133 8134 // Assign locations to each value returned by this call. 8135 SmallVector<CCValAssign, 16> RVLocs; 8136 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 8137 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV); 8138 8139 // Copy all of the result registers out of their specified physreg. 8140 for (auto &VA : RVLocs) { 8141 // Copy the value out 8142 SDValue RetValue = 8143 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 8144 // Glue the RetValue to the end of the call sequence 8145 Chain = RetValue.getValue(1); 8146 Glue = RetValue.getValue(2); 8147 8148 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 8149 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 8150 SDValue RetValue2 = 8151 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 8152 Chain = RetValue2.getValue(1); 8153 Glue = RetValue2.getValue(2); 8154 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 8155 RetValue2); 8156 } 8157 8158 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget); 8159 8160 InVals.push_back(RetValue); 8161 } 8162 8163 return Chain; 8164 } 8165 8166 bool RISCVTargetLowering::CanLowerReturn( 8167 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 8168 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 8169 SmallVector<CCValAssign, 16> RVLocs; 8170 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 8171 8172 Optional<unsigned> FirstMaskArgument; 8173 if (Subtarget.hasStdExtV()) 8174 FirstMaskArgument = preAssignMask(Outs); 8175 8176 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 8177 MVT VT = Outs[i].VT; 8178 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 8179 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 8180 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 8181 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, 8182 *this, FirstMaskArgument)) 8183 return false; 8184 } 8185 return true; 8186 } 8187 8188 SDValue 8189 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 8190 bool IsVarArg, 8191 const SmallVectorImpl<ISD::OutputArg> &Outs, 8192 const SmallVectorImpl<SDValue> &OutVals, 8193 const SDLoc &DL, SelectionDAG &DAG) const { 8194 const MachineFunction &MF = DAG.getMachineFunction(); 8195 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 8196 8197 // Stores the assignment of the return value to a location. 8198 SmallVector<CCValAssign, 16> RVLocs; 8199 8200 // Info about the registers and stack slot. 8201 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 8202 *DAG.getContext()); 8203 8204 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 8205 nullptr, CC_RISCV); 8206 8207 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 8208 report_fatal_error("GHC functions return void only"); 8209 8210 SDValue Glue; 8211 SmallVector<SDValue, 4> RetOps(1, Chain); 8212 8213 // Copy the result values into the output registers. 8214 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 8215 SDValue Val = OutVals[i]; 8216 CCValAssign &VA = RVLocs[i]; 8217 assert(VA.isRegLoc() && "Can only return in registers!"); 8218 8219 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 8220 // Handle returning f64 on RV32D with a soft float ABI. 8221 assert(VA.isRegLoc() && "Expected return via registers"); 8222 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 8223 DAG.getVTList(MVT::i32, MVT::i32), Val); 8224 SDValue Lo = SplitF64.getValue(0); 8225 SDValue Hi = SplitF64.getValue(1); 8226 Register RegLo = VA.getLocReg(); 8227 assert(RegLo < RISCV::X31 && "Invalid register pair"); 8228 Register RegHi = RegLo + 1; 8229 8230 if (STI.isRegisterReservedByUser(RegLo) || 8231 STI.isRegisterReservedByUser(RegHi)) 8232 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 8233 MF.getFunction(), 8234 "Return value register required, but has been reserved."}); 8235 8236 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 8237 Glue = Chain.getValue(1); 8238 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 8239 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 8240 Glue = Chain.getValue(1); 8241 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 8242 } else { 8243 // Handle a 'normal' return. 8244 Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget); 8245 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 8246 8247 if (STI.isRegisterReservedByUser(VA.getLocReg())) 8248 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 8249 MF.getFunction(), 8250 "Return value register required, but has been reserved."}); 8251 8252 // Guarantee that all emitted copies are stuck together. 8253 Glue = Chain.getValue(1); 8254 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 8255 } 8256 } 8257 8258 RetOps[0] = Chain; // Update chain. 8259 8260 // Add the glue node if we have it. 8261 if (Glue.getNode()) { 8262 RetOps.push_back(Glue); 8263 } 8264 8265 unsigned RetOpc = RISCVISD::RET_FLAG; 8266 // Interrupt service routines use different return instructions. 8267 const Function &Func = DAG.getMachineFunction().getFunction(); 8268 if (Func.hasFnAttribute("interrupt")) { 8269 if (!Func.getReturnType()->isVoidTy()) 8270 report_fatal_error( 8271 "Functions with the interrupt attribute must have void return type!"); 8272 8273 MachineFunction &MF = DAG.getMachineFunction(); 8274 StringRef Kind = 8275 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 8276 8277 if (Kind == "user") 8278 RetOpc = RISCVISD::URET_FLAG; 8279 else if (Kind == "supervisor") 8280 RetOpc = RISCVISD::SRET_FLAG; 8281 else 8282 RetOpc = RISCVISD::MRET_FLAG; 8283 } 8284 8285 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 8286 } 8287 8288 void RISCVTargetLowering::validateCCReservedRegs( 8289 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 8290 MachineFunction &MF) const { 8291 const Function &F = MF.getFunction(); 8292 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 8293 8294 if (llvm::any_of(Regs, [&STI](auto Reg) { 8295 return STI.isRegisterReservedByUser(Reg.first); 8296 })) 8297 F.getContext().diagnose(DiagnosticInfoUnsupported{ 8298 F, "Argument register required, but has been reserved."}); 8299 } 8300 8301 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 8302 return CI->isTailCall(); 8303 } 8304 8305 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 8306 #define NODE_NAME_CASE(NODE) \ 8307 case RISCVISD::NODE: \ 8308 return "RISCVISD::" #NODE; 8309 // clang-format off 8310 switch ((RISCVISD::NodeType)Opcode) { 8311 case RISCVISD::FIRST_NUMBER: 8312 break; 8313 NODE_NAME_CASE(RET_FLAG) 8314 NODE_NAME_CASE(URET_FLAG) 8315 NODE_NAME_CASE(SRET_FLAG) 8316 NODE_NAME_CASE(MRET_FLAG) 8317 NODE_NAME_CASE(CALL) 8318 NODE_NAME_CASE(SELECT_CC) 8319 NODE_NAME_CASE(BR_CC) 8320 NODE_NAME_CASE(BuildPairF64) 8321 NODE_NAME_CASE(SplitF64) 8322 NODE_NAME_CASE(TAIL) 8323 NODE_NAME_CASE(MULHSU) 8324 NODE_NAME_CASE(SLLW) 8325 NODE_NAME_CASE(SRAW) 8326 NODE_NAME_CASE(SRLW) 8327 NODE_NAME_CASE(DIVW) 8328 NODE_NAME_CASE(DIVUW) 8329 NODE_NAME_CASE(REMUW) 8330 NODE_NAME_CASE(ROLW) 8331 NODE_NAME_CASE(RORW) 8332 NODE_NAME_CASE(CLZW) 8333 NODE_NAME_CASE(CTZW) 8334 NODE_NAME_CASE(FSLW) 8335 NODE_NAME_CASE(FSRW) 8336 NODE_NAME_CASE(FSL) 8337 NODE_NAME_CASE(FSR) 8338 NODE_NAME_CASE(FMV_H_X) 8339 NODE_NAME_CASE(FMV_X_ANYEXTH) 8340 NODE_NAME_CASE(FMV_W_X_RV64) 8341 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 8342 NODE_NAME_CASE(FCVT_W_RV64) 8343 NODE_NAME_CASE(FCVT_WU_RV64) 8344 NODE_NAME_CASE(READ_CYCLE_WIDE) 8345 NODE_NAME_CASE(GREV) 8346 NODE_NAME_CASE(GREVW) 8347 NODE_NAME_CASE(GORC) 8348 NODE_NAME_CASE(GORCW) 8349 NODE_NAME_CASE(SHFL) 8350 NODE_NAME_CASE(SHFLW) 8351 NODE_NAME_CASE(UNSHFL) 8352 NODE_NAME_CASE(UNSHFLW) 8353 NODE_NAME_CASE(BCOMPRESS) 8354 NODE_NAME_CASE(BCOMPRESSW) 8355 NODE_NAME_CASE(BDECOMPRESS) 8356 NODE_NAME_CASE(BDECOMPRESSW) 8357 NODE_NAME_CASE(VMV_V_X_VL) 8358 NODE_NAME_CASE(VFMV_V_F_VL) 8359 NODE_NAME_CASE(VMV_X_S) 8360 NODE_NAME_CASE(VMV_S_X_VL) 8361 NODE_NAME_CASE(VFMV_S_F_VL) 8362 NODE_NAME_CASE(SPLAT_VECTOR_I64) 8363 NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL) 8364 NODE_NAME_CASE(READ_VLENB) 8365 NODE_NAME_CASE(TRUNCATE_VECTOR_VL) 8366 NODE_NAME_CASE(VSLIDEUP_VL) 8367 NODE_NAME_CASE(VSLIDE1UP_VL) 8368 NODE_NAME_CASE(VSLIDEDOWN_VL) 8369 NODE_NAME_CASE(VSLIDE1DOWN_VL) 8370 NODE_NAME_CASE(VID_VL) 8371 NODE_NAME_CASE(VFNCVT_ROD_VL) 8372 NODE_NAME_CASE(VECREDUCE_ADD_VL) 8373 NODE_NAME_CASE(VECREDUCE_UMAX_VL) 8374 NODE_NAME_CASE(VECREDUCE_SMAX_VL) 8375 NODE_NAME_CASE(VECREDUCE_UMIN_VL) 8376 NODE_NAME_CASE(VECREDUCE_SMIN_VL) 8377 NODE_NAME_CASE(VECREDUCE_AND_VL) 8378 NODE_NAME_CASE(VECREDUCE_OR_VL) 8379 NODE_NAME_CASE(VECREDUCE_XOR_VL) 8380 NODE_NAME_CASE(VECREDUCE_FADD_VL) 8381 NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL) 8382 NODE_NAME_CASE(VECREDUCE_FMIN_VL) 8383 NODE_NAME_CASE(VECREDUCE_FMAX_VL) 8384 NODE_NAME_CASE(ADD_VL) 8385 NODE_NAME_CASE(AND_VL) 8386 NODE_NAME_CASE(MUL_VL) 8387 NODE_NAME_CASE(OR_VL) 8388 NODE_NAME_CASE(SDIV_VL) 8389 NODE_NAME_CASE(SHL_VL) 8390 NODE_NAME_CASE(SREM_VL) 8391 NODE_NAME_CASE(SRA_VL) 8392 NODE_NAME_CASE(SRL_VL) 8393 NODE_NAME_CASE(SUB_VL) 8394 NODE_NAME_CASE(UDIV_VL) 8395 NODE_NAME_CASE(UREM_VL) 8396 NODE_NAME_CASE(XOR_VL) 8397 NODE_NAME_CASE(SADDSAT_VL) 8398 NODE_NAME_CASE(UADDSAT_VL) 8399 NODE_NAME_CASE(SSUBSAT_VL) 8400 NODE_NAME_CASE(USUBSAT_VL) 8401 NODE_NAME_CASE(FADD_VL) 8402 NODE_NAME_CASE(FSUB_VL) 8403 NODE_NAME_CASE(FMUL_VL) 8404 NODE_NAME_CASE(FDIV_VL) 8405 NODE_NAME_CASE(FNEG_VL) 8406 NODE_NAME_CASE(FABS_VL) 8407 NODE_NAME_CASE(FSQRT_VL) 8408 NODE_NAME_CASE(FMA_VL) 8409 NODE_NAME_CASE(FCOPYSIGN_VL) 8410 NODE_NAME_CASE(SMIN_VL) 8411 NODE_NAME_CASE(SMAX_VL) 8412 NODE_NAME_CASE(UMIN_VL) 8413 NODE_NAME_CASE(UMAX_VL) 8414 NODE_NAME_CASE(FMINNUM_VL) 8415 NODE_NAME_CASE(FMAXNUM_VL) 8416 NODE_NAME_CASE(MULHS_VL) 8417 NODE_NAME_CASE(MULHU_VL) 8418 NODE_NAME_CASE(FP_TO_SINT_VL) 8419 NODE_NAME_CASE(FP_TO_UINT_VL) 8420 NODE_NAME_CASE(SINT_TO_FP_VL) 8421 NODE_NAME_CASE(UINT_TO_FP_VL) 8422 NODE_NAME_CASE(FP_EXTEND_VL) 8423 NODE_NAME_CASE(FP_ROUND_VL) 8424 NODE_NAME_CASE(VWMUL_VL) 8425 NODE_NAME_CASE(VWMULU_VL) 8426 NODE_NAME_CASE(SETCC_VL) 8427 NODE_NAME_CASE(VSELECT_VL) 8428 NODE_NAME_CASE(VMAND_VL) 8429 NODE_NAME_CASE(VMOR_VL) 8430 NODE_NAME_CASE(VMXOR_VL) 8431 NODE_NAME_CASE(VMCLR_VL) 8432 NODE_NAME_CASE(VMSET_VL) 8433 NODE_NAME_CASE(VRGATHER_VX_VL) 8434 NODE_NAME_CASE(VRGATHER_VV_VL) 8435 NODE_NAME_CASE(VRGATHEREI16_VV_VL) 8436 NODE_NAME_CASE(VSEXT_VL) 8437 NODE_NAME_CASE(VZEXT_VL) 8438 NODE_NAME_CASE(VPOPC_VL) 8439 NODE_NAME_CASE(VLE_VL) 8440 NODE_NAME_CASE(VSE_VL) 8441 NODE_NAME_CASE(READ_CSR) 8442 NODE_NAME_CASE(WRITE_CSR) 8443 NODE_NAME_CASE(SWAP_CSR) 8444 } 8445 // clang-format on 8446 return nullptr; 8447 #undef NODE_NAME_CASE 8448 } 8449 8450 /// getConstraintType - Given a constraint letter, return the type of 8451 /// constraint it is for this target. 8452 RISCVTargetLowering::ConstraintType 8453 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 8454 if (Constraint.size() == 1) { 8455 switch (Constraint[0]) { 8456 default: 8457 break; 8458 case 'f': 8459 case 'v': 8460 return C_RegisterClass; 8461 case 'I': 8462 case 'J': 8463 case 'K': 8464 return C_Immediate; 8465 case 'A': 8466 return C_Memory; 8467 case 'S': // A symbolic address 8468 return C_Other; 8469 } 8470 } 8471 return TargetLowering::getConstraintType(Constraint); 8472 } 8473 8474 std::pair<unsigned, const TargetRegisterClass *> 8475 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 8476 StringRef Constraint, 8477 MVT VT) const { 8478 // First, see if this is a constraint that directly corresponds to a 8479 // RISCV register class. 8480 if (Constraint.size() == 1) { 8481 switch (Constraint[0]) { 8482 case 'r': 8483 return std::make_pair(0U, &RISCV::GPRRegClass); 8484 case 'f': 8485 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 8486 return std::make_pair(0U, &RISCV::FPR16RegClass); 8487 if (Subtarget.hasStdExtF() && VT == MVT::f32) 8488 return std::make_pair(0U, &RISCV::FPR32RegClass); 8489 if (Subtarget.hasStdExtD() && VT == MVT::f64) 8490 return std::make_pair(0U, &RISCV::FPR64RegClass); 8491 break; 8492 case 'v': 8493 for (const auto *RC : 8494 {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass, 8495 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 8496 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) 8497 return std::make_pair(0U, RC); 8498 } 8499 break; 8500 default: 8501 break; 8502 } 8503 } 8504 8505 // Clang will correctly decode the usage of register name aliases into their 8506 // official names. However, other frontends like `rustc` do not. This allows 8507 // users of these frontends to use the ABI names for registers in LLVM-style 8508 // register constraints. 8509 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 8510 .Case("{zero}", RISCV::X0) 8511 .Case("{ra}", RISCV::X1) 8512 .Case("{sp}", RISCV::X2) 8513 .Case("{gp}", RISCV::X3) 8514 .Case("{tp}", RISCV::X4) 8515 .Case("{t0}", RISCV::X5) 8516 .Case("{t1}", RISCV::X6) 8517 .Case("{t2}", RISCV::X7) 8518 .Cases("{s0}", "{fp}", RISCV::X8) 8519 .Case("{s1}", RISCV::X9) 8520 .Case("{a0}", RISCV::X10) 8521 .Case("{a1}", RISCV::X11) 8522 .Case("{a2}", RISCV::X12) 8523 .Case("{a3}", RISCV::X13) 8524 .Case("{a4}", RISCV::X14) 8525 .Case("{a5}", RISCV::X15) 8526 .Case("{a6}", RISCV::X16) 8527 .Case("{a7}", RISCV::X17) 8528 .Case("{s2}", RISCV::X18) 8529 .Case("{s3}", RISCV::X19) 8530 .Case("{s4}", RISCV::X20) 8531 .Case("{s5}", RISCV::X21) 8532 .Case("{s6}", RISCV::X22) 8533 .Case("{s7}", RISCV::X23) 8534 .Case("{s8}", RISCV::X24) 8535 .Case("{s9}", RISCV::X25) 8536 .Case("{s10}", RISCV::X26) 8537 .Case("{s11}", RISCV::X27) 8538 .Case("{t3}", RISCV::X28) 8539 .Case("{t4}", RISCV::X29) 8540 .Case("{t5}", RISCV::X30) 8541 .Case("{t6}", RISCV::X31) 8542 .Default(RISCV::NoRegister); 8543 if (XRegFromAlias != RISCV::NoRegister) 8544 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 8545 8546 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 8547 // TableGen record rather than the AsmName to choose registers for InlineAsm 8548 // constraints, plus we want to match those names to the widest floating point 8549 // register type available, manually select floating point registers here. 8550 // 8551 // The second case is the ABI name of the register, so that frontends can also 8552 // use the ABI names in register constraint lists. 8553 if (Subtarget.hasStdExtF()) { 8554 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 8555 .Cases("{f0}", "{ft0}", RISCV::F0_F) 8556 .Cases("{f1}", "{ft1}", RISCV::F1_F) 8557 .Cases("{f2}", "{ft2}", RISCV::F2_F) 8558 .Cases("{f3}", "{ft3}", RISCV::F3_F) 8559 .Cases("{f4}", "{ft4}", RISCV::F4_F) 8560 .Cases("{f5}", "{ft5}", RISCV::F5_F) 8561 .Cases("{f6}", "{ft6}", RISCV::F6_F) 8562 .Cases("{f7}", "{ft7}", RISCV::F7_F) 8563 .Cases("{f8}", "{fs0}", RISCV::F8_F) 8564 .Cases("{f9}", "{fs1}", RISCV::F9_F) 8565 .Cases("{f10}", "{fa0}", RISCV::F10_F) 8566 .Cases("{f11}", "{fa1}", RISCV::F11_F) 8567 .Cases("{f12}", "{fa2}", RISCV::F12_F) 8568 .Cases("{f13}", "{fa3}", RISCV::F13_F) 8569 .Cases("{f14}", "{fa4}", RISCV::F14_F) 8570 .Cases("{f15}", "{fa5}", RISCV::F15_F) 8571 .Cases("{f16}", "{fa6}", RISCV::F16_F) 8572 .Cases("{f17}", "{fa7}", RISCV::F17_F) 8573 .Cases("{f18}", "{fs2}", RISCV::F18_F) 8574 .Cases("{f19}", "{fs3}", RISCV::F19_F) 8575 .Cases("{f20}", "{fs4}", RISCV::F20_F) 8576 .Cases("{f21}", "{fs5}", RISCV::F21_F) 8577 .Cases("{f22}", "{fs6}", RISCV::F22_F) 8578 .Cases("{f23}", "{fs7}", RISCV::F23_F) 8579 .Cases("{f24}", "{fs8}", RISCV::F24_F) 8580 .Cases("{f25}", "{fs9}", RISCV::F25_F) 8581 .Cases("{f26}", "{fs10}", RISCV::F26_F) 8582 .Cases("{f27}", "{fs11}", RISCV::F27_F) 8583 .Cases("{f28}", "{ft8}", RISCV::F28_F) 8584 .Cases("{f29}", "{ft9}", RISCV::F29_F) 8585 .Cases("{f30}", "{ft10}", RISCV::F30_F) 8586 .Cases("{f31}", "{ft11}", RISCV::F31_F) 8587 .Default(RISCV::NoRegister); 8588 if (FReg != RISCV::NoRegister) { 8589 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 8590 if (Subtarget.hasStdExtD()) { 8591 unsigned RegNo = FReg - RISCV::F0_F; 8592 unsigned DReg = RISCV::F0_D + RegNo; 8593 return std::make_pair(DReg, &RISCV::FPR64RegClass); 8594 } 8595 return std::make_pair(FReg, &RISCV::FPR32RegClass); 8596 } 8597 } 8598 8599 if (Subtarget.hasStdExtV()) { 8600 Register VReg = StringSwitch<Register>(Constraint.lower()) 8601 .Case("{v0}", RISCV::V0) 8602 .Case("{v1}", RISCV::V1) 8603 .Case("{v2}", RISCV::V2) 8604 .Case("{v3}", RISCV::V3) 8605 .Case("{v4}", RISCV::V4) 8606 .Case("{v5}", RISCV::V5) 8607 .Case("{v6}", RISCV::V6) 8608 .Case("{v7}", RISCV::V7) 8609 .Case("{v8}", RISCV::V8) 8610 .Case("{v9}", RISCV::V9) 8611 .Case("{v10}", RISCV::V10) 8612 .Case("{v11}", RISCV::V11) 8613 .Case("{v12}", RISCV::V12) 8614 .Case("{v13}", RISCV::V13) 8615 .Case("{v14}", RISCV::V14) 8616 .Case("{v15}", RISCV::V15) 8617 .Case("{v16}", RISCV::V16) 8618 .Case("{v17}", RISCV::V17) 8619 .Case("{v18}", RISCV::V18) 8620 .Case("{v19}", RISCV::V19) 8621 .Case("{v20}", RISCV::V20) 8622 .Case("{v21}", RISCV::V21) 8623 .Case("{v22}", RISCV::V22) 8624 .Case("{v23}", RISCV::V23) 8625 .Case("{v24}", RISCV::V24) 8626 .Case("{v25}", RISCV::V25) 8627 .Case("{v26}", RISCV::V26) 8628 .Case("{v27}", RISCV::V27) 8629 .Case("{v28}", RISCV::V28) 8630 .Case("{v29}", RISCV::V29) 8631 .Case("{v30}", RISCV::V30) 8632 .Case("{v31}", RISCV::V31) 8633 .Default(RISCV::NoRegister); 8634 if (VReg != RISCV::NoRegister) { 8635 if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) 8636 return std::make_pair(VReg, &RISCV::VMRegClass); 8637 if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy)) 8638 return std::make_pair(VReg, &RISCV::VRRegClass); 8639 for (const auto *RC : 8640 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 8641 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) { 8642 VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC); 8643 return std::make_pair(VReg, RC); 8644 } 8645 } 8646 } 8647 } 8648 8649 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 8650 } 8651 8652 unsigned 8653 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 8654 // Currently only support length 1 constraints. 8655 if (ConstraintCode.size() == 1) { 8656 switch (ConstraintCode[0]) { 8657 case 'A': 8658 return InlineAsm::Constraint_A; 8659 default: 8660 break; 8661 } 8662 } 8663 8664 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 8665 } 8666 8667 void RISCVTargetLowering::LowerAsmOperandForConstraint( 8668 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 8669 SelectionDAG &DAG) const { 8670 // Currently only support length 1 constraints. 8671 if (Constraint.length() == 1) { 8672 switch (Constraint[0]) { 8673 case 'I': 8674 // Validate & create a 12-bit signed immediate operand. 8675 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 8676 uint64_t CVal = C->getSExtValue(); 8677 if (isInt<12>(CVal)) 8678 Ops.push_back( 8679 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 8680 } 8681 return; 8682 case 'J': 8683 // Validate & create an integer zero operand. 8684 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 8685 if (C->getZExtValue() == 0) 8686 Ops.push_back( 8687 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 8688 return; 8689 case 'K': 8690 // Validate & create a 5-bit unsigned immediate operand. 8691 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 8692 uint64_t CVal = C->getZExtValue(); 8693 if (isUInt<5>(CVal)) 8694 Ops.push_back( 8695 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 8696 } 8697 return; 8698 case 'S': 8699 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 8700 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 8701 GA->getValueType(0))); 8702 } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 8703 Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(), 8704 BA->getValueType(0))); 8705 } 8706 return; 8707 default: 8708 break; 8709 } 8710 } 8711 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 8712 } 8713 8714 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder, 8715 Instruction *Inst, 8716 AtomicOrdering Ord) const { 8717 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 8718 return Builder.CreateFence(Ord); 8719 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 8720 return Builder.CreateFence(AtomicOrdering::Release); 8721 return nullptr; 8722 } 8723 8724 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder, 8725 Instruction *Inst, 8726 AtomicOrdering Ord) const { 8727 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 8728 return Builder.CreateFence(AtomicOrdering::Acquire); 8729 return nullptr; 8730 } 8731 8732 TargetLowering::AtomicExpansionKind 8733 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 8734 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 8735 // point operations can't be used in an lr/sc sequence without breaking the 8736 // forward-progress guarantee. 8737 if (AI->isFloatingPointOperation()) 8738 return AtomicExpansionKind::CmpXChg; 8739 8740 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 8741 if (Size == 8 || Size == 16) 8742 return AtomicExpansionKind::MaskedIntrinsic; 8743 return AtomicExpansionKind::None; 8744 } 8745 8746 static Intrinsic::ID 8747 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 8748 if (XLen == 32) { 8749 switch (BinOp) { 8750 default: 8751 llvm_unreachable("Unexpected AtomicRMW BinOp"); 8752 case AtomicRMWInst::Xchg: 8753 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 8754 case AtomicRMWInst::Add: 8755 return Intrinsic::riscv_masked_atomicrmw_add_i32; 8756 case AtomicRMWInst::Sub: 8757 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 8758 case AtomicRMWInst::Nand: 8759 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 8760 case AtomicRMWInst::Max: 8761 return Intrinsic::riscv_masked_atomicrmw_max_i32; 8762 case AtomicRMWInst::Min: 8763 return Intrinsic::riscv_masked_atomicrmw_min_i32; 8764 case AtomicRMWInst::UMax: 8765 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 8766 case AtomicRMWInst::UMin: 8767 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 8768 } 8769 } 8770 8771 if (XLen == 64) { 8772 switch (BinOp) { 8773 default: 8774 llvm_unreachable("Unexpected AtomicRMW BinOp"); 8775 case AtomicRMWInst::Xchg: 8776 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 8777 case AtomicRMWInst::Add: 8778 return Intrinsic::riscv_masked_atomicrmw_add_i64; 8779 case AtomicRMWInst::Sub: 8780 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 8781 case AtomicRMWInst::Nand: 8782 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 8783 case AtomicRMWInst::Max: 8784 return Intrinsic::riscv_masked_atomicrmw_max_i64; 8785 case AtomicRMWInst::Min: 8786 return Intrinsic::riscv_masked_atomicrmw_min_i64; 8787 case AtomicRMWInst::UMax: 8788 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 8789 case AtomicRMWInst::UMin: 8790 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 8791 } 8792 } 8793 8794 llvm_unreachable("Unexpected XLen\n"); 8795 } 8796 8797 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 8798 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 8799 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 8800 unsigned XLen = Subtarget.getXLen(); 8801 Value *Ordering = 8802 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 8803 Type *Tys[] = {AlignedAddr->getType()}; 8804 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 8805 AI->getModule(), 8806 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 8807 8808 if (XLen == 64) { 8809 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 8810 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 8811 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 8812 } 8813 8814 Value *Result; 8815 8816 // Must pass the shift amount needed to sign extend the loaded value prior 8817 // to performing a signed comparison for min/max. ShiftAmt is the number of 8818 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 8819 // is the number of bits to left+right shift the value in order to 8820 // sign-extend. 8821 if (AI->getOperation() == AtomicRMWInst::Min || 8822 AI->getOperation() == AtomicRMWInst::Max) { 8823 const DataLayout &DL = AI->getModule()->getDataLayout(); 8824 unsigned ValWidth = 8825 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 8826 Value *SextShamt = 8827 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 8828 Result = Builder.CreateCall(LrwOpScwLoop, 8829 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 8830 } else { 8831 Result = 8832 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 8833 } 8834 8835 if (XLen == 64) 8836 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 8837 return Result; 8838 } 8839 8840 TargetLowering::AtomicExpansionKind 8841 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 8842 AtomicCmpXchgInst *CI) const { 8843 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 8844 if (Size == 8 || Size == 16) 8845 return AtomicExpansionKind::MaskedIntrinsic; 8846 return AtomicExpansionKind::None; 8847 } 8848 8849 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 8850 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 8851 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 8852 unsigned XLen = Subtarget.getXLen(); 8853 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 8854 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 8855 if (XLen == 64) { 8856 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 8857 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 8858 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 8859 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 8860 } 8861 Type *Tys[] = {AlignedAddr->getType()}; 8862 Function *MaskedCmpXchg = 8863 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 8864 Value *Result = Builder.CreateCall( 8865 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 8866 if (XLen == 64) 8867 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 8868 return Result; 8869 } 8870 8871 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const { 8872 return false; 8873 } 8874 8875 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 8876 EVT VT) const { 8877 VT = VT.getScalarType(); 8878 8879 if (!VT.isSimple()) 8880 return false; 8881 8882 switch (VT.getSimpleVT().SimpleTy) { 8883 case MVT::f16: 8884 return Subtarget.hasStdExtZfh(); 8885 case MVT::f32: 8886 return Subtarget.hasStdExtF(); 8887 case MVT::f64: 8888 return Subtarget.hasStdExtD(); 8889 default: 8890 break; 8891 } 8892 8893 return false; 8894 } 8895 8896 Register RISCVTargetLowering::getExceptionPointerRegister( 8897 const Constant *PersonalityFn) const { 8898 return RISCV::X10; 8899 } 8900 8901 Register RISCVTargetLowering::getExceptionSelectorRegister( 8902 const Constant *PersonalityFn) const { 8903 return RISCV::X11; 8904 } 8905 8906 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 8907 // Return false to suppress the unnecessary extensions if the LibCall 8908 // arguments or return value is f32 type for LP64 ABI. 8909 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 8910 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 8911 return false; 8912 8913 return true; 8914 } 8915 8916 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 8917 if (Subtarget.is64Bit() && Type == MVT::i32) 8918 return true; 8919 8920 return IsSigned; 8921 } 8922 8923 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 8924 SDValue C) const { 8925 // Check integral scalar types. 8926 if (VT.isScalarInteger()) { 8927 // Omit the optimization if the sub target has the M extension and the data 8928 // size exceeds XLen. 8929 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) 8930 return false; 8931 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 8932 // Break the MUL to a SLLI and an ADD/SUB. 8933 const APInt &Imm = ConstNode->getAPIntValue(); 8934 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 8935 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) 8936 return true; 8937 // Omit the following optimization if the sub target has the M extension 8938 // and the data size >= XLen. 8939 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) 8940 return false; 8941 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs 8942 // a pair of LUI/ADDI. 8943 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { 8944 APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); 8945 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || 8946 (1 - ImmS).isPowerOf2()) 8947 return true; 8948 } 8949 } 8950 } 8951 8952 return false; 8953 } 8954 8955 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( 8956 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 8957 bool *Fast) const { 8958 if (!VT.isVector()) 8959 return false; 8960 8961 EVT ElemVT = VT.getVectorElementType(); 8962 if (Alignment >= ElemVT.getStoreSize()) { 8963 if (Fast) 8964 *Fast = true; 8965 return true; 8966 } 8967 8968 return false; 8969 } 8970 8971 bool RISCVTargetLowering::splitValueIntoRegisterParts( 8972 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 8973 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 8974 bool IsABIRegCopy = CC.hasValue(); 8975 EVT ValueVT = Val.getValueType(); 8976 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { 8977 // Cast the f16 to i16, extend to i32, pad with ones to make a float nan, 8978 // and cast to f32. 8979 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val); 8980 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val); 8981 Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val, 8982 DAG.getConstant(0xFFFF0000, DL, MVT::i32)); 8983 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val); 8984 Parts[0] = Val; 8985 return true; 8986 } 8987 8988 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 8989 LLVMContext &Context = *DAG.getContext(); 8990 EVT ValueEltVT = ValueVT.getVectorElementType(); 8991 EVT PartEltVT = PartVT.getVectorElementType(); 8992 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 8993 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 8994 if (PartVTBitSize % ValueVTBitSize == 0) { 8995 // If the element types are different, bitcast to the same element type of 8996 // PartVT first. 8997 if (ValueEltVT != PartEltVT) { 8998 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); 8999 assert(Count != 0 && "The number of element should not be zero."); 9000 EVT SameEltTypeVT = 9001 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); 9002 Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val); 9003 } 9004 Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT), 9005 Val, DAG.getConstant(0, DL, Subtarget.getXLenVT())); 9006 Parts[0] = Val; 9007 return true; 9008 } 9009 } 9010 return false; 9011 } 9012 9013 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( 9014 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 9015 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 9016 bool IsABIRegCopy = CC.hasValue(); 9017 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { 9018 SDValue Val = Parts[0]; 9019 9020 // Cast the f32 to i32, truncate to i16, and cast back to f16. 9021 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val); 9022 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val); 9023 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val); 9024 return Val; 9025 } 9026 9027 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 9028 LLVMContext &Context = *DAG.getContext(); 9029 SDValue Val = Parts[0]; 9030 EVT ValueEltVT = ValueVT.getVectorElementType(); 9031 EVT PartEltVT = PartVT.getVectorElementType(); 9032 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 9033 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 9034 if (PartVTBitSize % ValueVTBitSize == 0) { 9035 EVT SameEltTypeVT = ValueVT; 9036 // If the element types are different, convert it to the same element type 9037 // of PartVT. 9038 if (ValueEltVT != PartEltVT) { 9039 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits(); 9040 assert(Count != 0 && "The number of element should not be zero."); 9041 SameEltTypeVT = 9042 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true); 9043 } 9044 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val, 9045 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 9046 if (ValueEltVT != PartEltVT) 9047 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); 9048 return Val; 9049 } 9050 } 9051 return SDValue(); 9052 } 9053 9054 #define GET_REGISTER_MATCHER 9055 #include "RISCVGenAsmMatcher.inc" 9056 9057 Register 9058 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 9059 const MachineFunction &MF) const { 9060 Register Reg = MatchRegisterAltName(RegName); 9061 if (Reg == RISCV::NoRegister) 9062 Reg = MatchRegisterName(RegName); 9063 if (Reg == RISCV::NoRegister) 9064 report_fatal_error( 9065 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 9066 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 9067 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 9068 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 9069 StringRef(RegName) + "\".")); 9070 return Reg; 9071 } 9072 9073 namespace llvm { 9074 namespace RISCVVIntrinsicsTable { 9075 9076 #define GET_RISCVVIntrinsicsTable_IMPL 9077 #include "RISCVGenSearchableTables.inc" 9078 9079 } // namespace RISCVVIntrinsicsTable 9080 9081 } // namespace llvm 9082