1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/CallingConvLower.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/CodeGen/ValueTypes.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "llvm/IR/DiagnosticPrinter.h" 32 #include "llvm/IR/IntrinsicsRISCV.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/KnownBits.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 39 using namespace llvm; 40 41 #define DEBUG_TYPE "riscv-lower" 42 43 STATISTIC(NumTailCalls, "Number of tail calls"); 44 45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 46 const RISCVSubtarget &STI) 47 : TargetLowering(TM), Subtarget(STI) { 48 49 if (Subtarget.isRV32E()) 50 report_fatal_error("Codegen not yet implemented for RV32E"); 51 52 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 53 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 54 55 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 56 !Subtarget.hasStdExtF()) { 57 errs() << "Hard-float 'f' ABI can't be used for a target that " 58 "doesn't support the F instruction set extension (ignoring " 59 "target-abi)\n"; 60 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 61 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 62 !Subtarget.hasStdExtD()) { 63 errs() << "Hard-float 'd' ABI can't be used for a target that " 64 "doesn't support the D instruction set extension (ignoring " 65 "target-abi)\n"; 66 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 67 } 68 69 switch (ABI) { 70 default: 71 report_fatal_error("Don't know how to lower this ABI"); 72 case RISCVABI::ABI_ILP32: 73 case RISCVABI::ABI_ILP32F: 74 case RISCVABI::ABI_ILP32D: 75 case RISCVABI::ABI_LP64: 76 case RISCVABI::ABI_LP64F: 77 case RISCVABI::ABI_LP64D: 78 break; 79 } 80 81 MVT XLenVT = Subtarget.getXLenVT(); 82 83 // Set up the register classes. 84 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 85 86 if (Subtarget.hasStdExtZfh()) 87 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 88 if (Subtarget.hasStdExtF()) 89 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 90 if (Subtarget.hasStdExtD()) 91 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 92 93 static const MVT::SimpleValueType BoolVecVTs[] = { 94 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, 95 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; 96 static const MVT::SimpleValueType IntVecVTs[] = { 97 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, 98 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, 99 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, 100 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, 101 MVT::nxv4i64, MVT::nxv8i64}; 102 static const MVT::SimpleValueType F16VecVTs[] = { 103 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, 104 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; 105 static const MVT::SimpleValueType F32VecVTs[] = { 106 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; 107 static const MVT::SimpleValueType F64VecVTs[] = { 108 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; 109 110 if (Subtarget.hasStdExtV()) { 111 auto addRegClassForRVV = [this](MVT VT) { 112 unsigned Size = VT.getSizeInBits().getKnownMinValue(); 113 assert(Size <= 512 && isPowerOf2_32(Size)); 114 const TargetRegisterClass *RC; 115 if (Size <= 64) 116 RC = &RISCV::VRRegClass; 117 else if (Size == 128) 118 RC = &RISCV::VRM2RegClass; 119 else if (Size == 256) 120 RC = &RISCV::VRM4RegClass; 121 else 122 RC = &RISCV::VRM8RegClass; 123 124 addRegisterClass(VT, RC); 125 }; 126 127 for (MVT VT : BoolVecVTs) 128 addRegClassForRVV(VT); 129 for (MVT VT : IntVecVTs) 130 addRegClassForRVV(VT); 131 132 if (Subtarget.hasStdExtZfh()) 133 for (MVT VT : F16VecVTs) 134 addRegClassForRVV(VT); 135 136 if (Subtarget.hasStdExtF()) 137 for (MVT VT : F32VecVTs) 138 addRegClassForRVV(VT); 139 140 if (Subtarget.hasStdExtD()) 141 for (MVT VT : F64VecVTs) 142 addRegClassForRVV(VT); 143 144 if (Subtarget.useRVVForFixedLengthVectors()) { 145 auto addRegClassForFixedVectors = [this](MVT VT) { 146 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 147 const TargetRegisterClass *RC; 148 if (LMul == 1) 149 RC = &RISCV::VRRegClass; 150 else if (LMul == 2) 151 RC = &RISCV::VRM2RegClass; 152 else if (LMul == 4) 153 RC = &RISCV::VRM4RegClass; 154 else if (LMul == 8) 155 RC = &RISCV::VRM8RegClass; 156 else 157 llvm_unreachable("Unexpected LMul!"); 158 159 addRegisterClass(VT, RC); 160 }; 161 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) 162 if (useRVVForFixedLengthVectorVT(VT)) 163 addRegClassForFixedVectors(VT); 164 165 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) 166 if (useRVVForFixedLengthVectorVT(VT)) 167 addRegClassForFixedVectors(VT); 168 } 169 } 170 171 // Compute derived properties from the register classes. 172 computeRegisterProperties(STI.getRegisterInfo()); 173 174 setStackPointerRegisterToSaveRestore(RISCV::X2); 175 176 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) 177 setLoadExtAction(N, XLenVT, MVT::i1, Promote); 178 179 // TODO: add all necessary setOperationAction calls. 180 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 181 182 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 183 setOperationAction(ISD::BR_CC, XLenVT, Expand); 184 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 185 186 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 187 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 188 189 setOperationAction(ISD::VASTART, MVT::Other, Custom); 190 setOperationAction(ISD::VAARG, MVT::Other, Expand); 191 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 192 setOperationAction(ISD::VAEND, MVT::Other, Expand); 193 194 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 195 if (!Subtarget.hasStdExtZbb()) { 196 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 197 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 198 } 199 200 if (Subtarget.is64Bit()) { 201 setOperationAction(ISD::ADD, MVT::i32, Custom); 202 setOperationAction(ISD::SUB, MVT::i32, Custom); 203 setOperationAction(ISD::SHL, MVT::i32, Custom); 204 setOperationAction(ISD::SRA, MVT::i32, Custom); 205 setOperationAction(ISD::SRL, MVT::i32, Custom); 206 } 207 208 if (!Subtarget.hasStdExtM()) { 209 setOperationAction(ISD::MUL, XLenVT, Expand); 210 setOperationAction(ISD::MULHS, XLenVT, Expand); 211 setOperationAction(ISD::MULHU, XLenVT, Expand); 212 setOperationAction(ISD::SDIV, XLenVT, Expand); 213 setOperationAction(ISD::UDIV, XLenVT, Expand); 214 setOperationAction(ISD::SREM, XLenVT, Expand); 215 setOperationAction(ISD::UREM, XLenVT, Expand); 216 } 217 218 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) { 219 setOperationAction(ISD::MUL, MVT::i32, Custom); 220 221 setOperationAction(ISD::SDIV, MVT::i8, Custom); 222 setOperationAction(ISD::UDIV, MVT::i8, Custom); 223 setOperationAction(ISD::UREM, MVT::i8, Custom); 224 setOperationAction(ISD::SDIV, MVT::i16, Custom); 225 setOperationAction(ISD::UDIV, MVT::i16, Custom); 226 setOperationAction(ISD::UREM, MVT::i16, Custom); 227 setOperationAction(ISD::SDIV, MVT::i32, Custom); 228 setOperationAction(ISD::UDIV, MVT::i32, Custom); 229 setOperationAction(ISD::UREM, MVT::i32, Custom); 230 } 231 232 setOperationAction(ISD::SDIVREM, XLenVT, Expand); 233 setOperationAction(ISD::UDIVREM, XLenVT, Expand); 234 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand); 235 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand); 236 237 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom); 238 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom); 239 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom); 240 241 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 242 if (Subtarget.is64Bit()) { 243 setOperationAction(ISD::ROTL, MVT::i32, Custom); 244 setOperationAction(ISD::ROTR, MVT::i32, Custom); 245 } 246 } else { 247 setOperationAction(ISD::ROTL, XLenVT, Expand); 248 setOperationAction(ISD::ROTR, XLenVT, Expand); 249 } 250 251 if (Subtarget.hasStdExtZbp()) { 252 // Custom lower bswap/bitreverse so we can convert them to GREVI to enable 253 // more combining. 254 setOperationAction(ISD::BITREVERSE, XLenVT, Custom); 255 setOperationAction(ISD::BSWAP, XLenVT, Custom); 256 257 if (Subtarget.is64Bit()) { 258 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom); 259 setOperationAction(ISD::BSWAP, MVT::i32, Custom); 260 } 261 } else { 262 // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll 263 // pattern match it directly in isel. 264 setOperationAction(ISD::BSWAP, XLenVT, 265 Subtarget.hasStdExtZbb() ? Legal : Expand); 266 } 267 268 if (Subtarget.hasStdExtZbb()) { 269 setOperationAction(ISD::SMIN, XLenVT, Legal); 270 setOperationAction(ISD::SMAX, XLenVT, Legal); 271 setOperationAction(ISD::UMIN, XLenVT, Legal); 272 setOperationAction(ISD::UMAX, XLenVT, Legal); 273 } else { 274 setOperationAction(ISD::CTTZ, XLenVT, Expand); 275 setOperationAction(ISD::CTLZ, XLenVT, Expand); 276 setOperationAction(ISD::CTPOP, XLenVT, Expand); 277 } 278 279 if (Subtarget.hasStdExtZbt()) { 280 setOperationAction(ISD::FSHL, XLenVT, Custom); 281 setOperationAction(ISD::FSHR, XLenVT, Custom); 282 setOperationAction(ISD::SELECT, XLenVT, Legal); 283 284 if (Subtarget.is64Bit()) { 285 setOperationAction(ISD::FSHL, MVT::i32, Custom); 286 setOperationAction(ISD::FSHR, MVT::i32, Custom); 287 } 288 } else { 289 setOperationAction(ISD::SELECT, XLenVT, Custom); 290 } 291 292 ISD::CondCode FPCCToExpand[] = { 293 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 294 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 295 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 296 297 ISD::NodeType FPOpToExpand[] = { 298 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, 299 ISD::FP_TO_FP16}; 300 301 if (Subtarget.hasStdExtZfh()) 302 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 303 304 if (Subtarget.hasStdExtZfh()) { 305 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 306 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 307 for (auto CC : FPCCToExpand) 308 setCondCodeAction(CC, MVT::f16, Expand); 309 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 310 setOperationAction(ISD::SELECT, MVT::f16, Custom); 311 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 312 for (auto Op : FPOpToExpand) 313 setOperationAction(Op, MVT::f16, Expand); 314 } 315 316 if (Subtarget.hasStdExtF()) { 317 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 318 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 319 for (auto CC : FPCCToExpand) 320 setCondCodeAction(CC, MVT::f32, Expand); 321 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 322 setOperationAction(ISD::SELECT, MVT::f32, Custom); 323 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 324 for (auto Op : FPOpToExpand) 325 setOperationAction(Op, MVT::f32, Expand); 326 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 327 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 328 } 329 330 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 331 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 332 333 if (Subtarget.hasStdExtD()) { 334 setOperationAction(ISD::FMINNUM, MVT::f64, Legal); 335 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal); 336 for (auto CC : FPCCToExpand) 337 setCondCodeAction(CC, MVT::f64, Expand); 338 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 339 setOperationAction(ISD::SELECT, MVT::f64, Custom); 340 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 341 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 342 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 343 for (auto Op : FPOpToExpand) 344 setOperationAction(Op, MVT::f64, Expand); 345 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 346 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 347 } 348 349 if (Subtarget.is64Bit()) { 350 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 351 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 352 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom); 353 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); 354 } 355 356 setOperationAction(ISD::GlobalAddress, XLenVT, Custom); 357 setOperationAction(ISD::BlockAddress, XLenVT, Custom); 358 setOperationAction(ISD::ConstantPool, XLenVT, Custom); 359 setOperationAction(ISD::JumpTable, XLenVT, Custom); 360 361 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 362 363 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 364 // Unfortunately this can't be determined just from the ISA naming string. 365 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 366 Subtarget.is64Bit() ? Legal : Custom); 367 368 setOperationAction(ISD::TRAP, MVT::Other, Legal); 369 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); 370 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 371 372 if (Subtarget.hasStdExtA()) { 373 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 374 setMinCmpXchgSizeInBits(32); 375 } else { 376 setMaxAtomicSizeInBitsSupported(0); 377 } 378 379 setBooleanContents(ZeroOrOneBooleanContent); 380 381 if (Subtarget.hasStdExtV()) { 382 setBooleanVectorContents(ZeroOrOneBooleanContent); 383 384 setOperationAction(ISD::VSCALE, XLenVT, Custom); 385 386 // RVV intrinsics may have illegal operands. 387 // We also need to custom legalize vmv.x.s. 388 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); 389 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom); 390 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom); 391 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom); 392 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 393 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); 394 395 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 396 397 if (Subtarget.is64Bit()) { 398 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 399 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 400 } else { 401 // We must custom-lower certain vXi64 operations on RV32 due to the vector 402 // element type being illegal. 403 setOperationAction(ISD::SPLAT_VECTOR, MVT::i64, Custom); 404 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom); 405 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom); 406 407 setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom); 408 setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom); 409 setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom); 410 setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom); 411 setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom); 412 setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom); 413 setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom); 414 setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom); 415 } 416 417 for (MVT VT : BoolVecVTs) { 418 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 419 420 // Mask VTs are custom-expanded into a series of standard nodes 421 setOperationAction(ISD::TRUNCATE, VT, Custom); 422 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 423 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 424 } 425 426 for (MVT VT : IntVecVTs) { 427 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 428 429 setOperationAction(ISD::SMIN, VT, Legal); 430 setOperationAction(ISD::SMAX, VT, Legal); 431 setOperationAction(ISD::UMIN, VT, Legal); 432 setOperationAction(ISD::UMAX, VT, Legal); 433 434 setOperationAction(ISD::ROTL, VT, Expand); 435 setOperationAction(ISD::ROTR, VT, Expand); 436 437 // Custom-lower extensions and truncations from/to mask types. 438 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 439 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 440 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 441 442 // RVV has native int->float & float->int conversions where the 443 // element type sizes are within one power-of-two of each other. Any 444 // wider distances between type sizes have to be lowered as sequences 445 // which progressively narrow the gap in stages. 446 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 447 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 448 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 449 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 450 451 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" 452 // nodes which truncate by one power of two at a time. 453 setOperationAction(ISD::TRUNCATE, VT, Custom); 454 455 // Custom-lower insert/extract operations to simplify patterns. 456 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 457 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 458 459 // Custom-lower reduction operations to set up the corresponding custom 460 // nodes' operands. 461 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 462 setOperationAction(ISD::VECREDUCE_AND, VT, Custom); 463 setOperationAction(ISD::VECREDUCE_OR, VT, Custom); 464 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom); 465 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 466 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 467 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 468 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 469 470 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 471 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 472 } 473 474 // Expand various CCs to best match the RVV ISA, which natively supports UNE 475 // but no other unordered comparisons, and supports all ordered comparisons 476 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization 477 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), 478 // and we pattern-match those back to the "original", swapping operands once 479 // more. This way we catch both operations and both "vf" and "fv" forms with 480 // fewer patterns. 481 ISD::CondCode VFPCCToExpand[] = { 482 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 483 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, 484 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, 485 }; 486 487 // Sets common operation actions on RVV floating-point vector types. 488 const auto SetCommonVFPActions = [&](MVT VT) { 489 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 490 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type 491 // sizes are within one power-of-two of each other. Therefore conversions 492 // between vXf16 and vXf64 must be lowered as sequences which convert via 493 // vXf32. 494 setOperationAction(ISD::FP_ROUND, VT, Custom); 495 setOperationAction(ISD::FP_EXTEND, VT, Custom); 496 // Custom-lower insert/extract operations to simplify patterns. 497 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 498 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 499 // Expand various condition codes (explained above). 500 for (auto CC : VFPCCToExpand) 501 setCondCodeAction(CC, VT, Expand); 502 503 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom); 504 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); 505 setOperationAction(ISD::FCOPYSIGN, VT, Legal); 506 507 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 508 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 509 }; 510 511 if (Subtarget.hasStdExtZfh()) 512 for (MVT VT : F16VecVTs) 513 SetCommonVFPActions(VT); 514 515 if (Subtarget.hasStdExtF()) 516 for (MVT VT : F32VecVTs) 517 SetCommonVFPActions(VT); 518 519 if (Subtarget.hasStdExtD()) 520 for (MVT VT : F64VecVTs) 521 SetCommonVFPActions(VT); 522 523 if (Subtarget.useRVVForFixedLengthVectors()) { 524 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 525 if (!useRVVForFixedLengthVectorVT(VT)) 526 continue; 527 528 // By default everything must be expanded. 529 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 530 setOperationAction(Op, VT, Expand); 531 for (MVT OtherVT : MVT::fixedlen_vector_valuetypes()) 532 setTruncStoreAction(VT, OtherVT, Expand); 533 534 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 535 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 536 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 537 538 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 539 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); 540 541 setOperationAction(ISD::LOAD, VT, Custom); 542 setOperationAction(ISD::STORE, VT, Custom); 543 544 setOperationAction(ISD::SETCC, VT, Custom); 545 546 setOperationAction(ISD::TRUNCATE, VT, Custom); 547 548 // Operations below are different for between masks and other vectors. 549 if (VT.getVectorElementType() == MVT::i1) { 550 setOperationAction(ISD::AND, VT, Custom); 551 setOperationAction(ISD::OR, VT, Custom); 552 setOperationAction(ISD::XOR, VT, Custom); 553 continue; 554 } 555 556 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 557 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 558 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 559 560 setOperationAction(ISD::ADD, VT, Custom); 561 setOperationAction(ISD::MUL, VT, Custom); 562 setOperationAction(ISD::SUB, VT, Custom); 563 setOperationAction(ISD::AND, VT, Custom); 564 setOperationAction(ISD::OR, VT, Custom); 565 setOperationAction(ISD::XOR, VT, Custom); 566 setOperationAction(ISD::SDIV, VT, Custom); 567 setOperationAction(ISD::SREM, VT, Custom); 568 setOperationAction(ISD::UDIV, VT, Custom); 569 setOperationAction(ISD::UREM, VT, Custom); 570 setOperationAction(ISD::SHL, VT, Custom); 571 setOperationAction(ISD::SRA, VT, Custom); 572 setOperationAction(ISD::SRL, VT, Custom); 573 574 setOperationAction(ISD::SMIN, VT, Custom); 575 setOperationAction(ISD::SMAX, VT, Custom); 576 setOperationAction(ISD::UMIN, VT, Custom); 577 setOperationAction(ISD::UMAX, VT, Custom); 578 579 setOperationAction(ISD::MULHS, VT, Custom); 580 setOperationAction(ISD::MULHU, VT, Custom); 581 582 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 583 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 584 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 585 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 586 587 setOperationAction(ISD::VSELECT, VT, Custom); 588 589 setOperationAction(ISD::ANY_EXTEND, VT, Custom); 590 setOperationAction(ISD::SIGN_EXTEND, VT, Custom); 591 setOperationAction(ISD::ZERO_EXTEND, VT, Custom); 592 593 setOperationAction(ISD::BITCAST, VT, Custom); 594 } 595 596 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { 597 if (!useRVVForFixedLengthVectorVT(VT)) 598 continue; 599 600 // By default everything must be expanded. 601 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 602 setOperationAction(Op, VT, Expand); 603 for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { 604 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 605 setTruncStoreAction(VT, OtherVT, Expand); 606 } 607 608 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 609 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom); 610 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 611 612 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 613 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 614 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 615 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 616 617 setOperationAction(ISD::LOAD, VT, Custom); 618 setOperationAction(ISD::STORE, VT, Custom); 619 setOperationAction(ISD::FADD, VT, Custom); 620 setOperationAction(ISD::FSUB, VT, Custom); 621 setOperationAction(ISD::FMUL, VT, Custom); 622 setOperationAction(ISD::FDIV, VT, Custom); 623 setOperationAction(ISD::FNEG, VT, Custom); 624 setOperationAction(ISD::FABS, VT, Custom); 625 setOperationAction(ISD::FSQRT, VT, Custom); 626 setOperationAction(ISD::FMA, VT, Custom); 627 628 setOperationAction(ISD::FP_ROUND, VT, Custom); 629 setOperationAction(ISD::FP_EXTEND, VT, Custom); 630 631 for (auto CC : VFPCCToExpand) 632 setCondCodeAction(CC, VT, Expand); 633 634 setOperationAction(ISD::VSELECT, VT, Custom); 635 636 setOperationAction(ISD::BITCAST, VT, Custom); 637 } 638 } 639 } 640 641 // Function alignments. 642 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 643 setMinFunctionAlignment(FunctionAlignment); 644 setPrefFunctionAlignment(FunctionAlignment); 645 646 setMinimumJumpTableEntries(5); 647 648 // Jumps are expensive, compared to logic 649 setJumpIsExpensive(); 650 651 // We can use any register for comparisons 652 setHasMultipleConditionRegisters(); 653 654 setTargetDAGCombine(ISD::SETCC); 655 if (Subtarget.hasStdExtZbp()) { 656 setTargetDAGCombine(ISD::OR); 657 } 658 if (Subtarget.hasStdExtV()) 659 setTargetDAGCombine(ISD::FCOPYSIGN); 660 } 661 662 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, 663 LLVMContext &Context, 664 EVT VT) const { 665 if (!VT.isVector()) 666 return getPointerTy(DL); 667 if (Subtarget.hasStdExtV() && 668 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) 669 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); 670 return VT.changeVectorElementTypeToInteger(); 671 } 672 673 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 674 const CallInst &I, 675 MachineFunction &MF, 676 unsigned Intrinsic) const { 677 switch (Intrinsic) { 678 default: 679 return false; 680 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 681 case Intrinsic::riscv_masked_atomicrmw_add_i32: 682 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 683 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 684 case Intrinsic::riscv_masked_atomicrmw_max_i32: 685 case Intrinsic::riscv_masked_atomicrmw_min_i32: 686 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 687 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 688 case Intrinsic::riscv_masked_cmpxchg_i32: 689 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 690 Info.opc = ISD::INTRINSIC_W_CHAIN; 691 Info.memVT = MVT::getVT(PtrTy->getElementType()); 692 Info.ptrVal = I.getArgOperand(0); 693 Info.offset = 0; 694 Info.align = Align(4); 695 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 696 MachineMemOperand::MOVolatile; 697 return true; 698 } 699 } 700 701 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 702 const AddrMode &AM, Type *Ty, 703 unsigned AS, 704 Instruction *I) const { 705 // No global is ever allowed as a base. 706 if (AM.BaseGV) 707 return false; 708 709 // Require a 12-bit signed offset. 710 if (!isInt<12>(AM.BaseOffs)) 711 return false; 712 713 switch (AM.Scale) { 714 case 0: // "r+i" or just "i", depending on HasBaseReg. 715 break; 716 case 1: 717 if (!AM.HasBaseReg) // allow "r+i". 718 break; 719 return false; // disallow "r+r" or "r+r+i". 720 default: 721 return false; 722 } 723 724 return true; 725 } 726 727 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 728 return isInt<12>(Imm); 729 } 730 731 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 732 return isInt<12>(Imm); 733 } 734 735 // On RV32, 64-bit integers are split into their high and low parts and held 736 // in two different registers, so the trunc is free since the low register can 737 // just be used. 738 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 739 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 740 return false; 741 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 742 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 743 return (SrcBits == 64 && DestBits == 32); 744 } 745 746 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 747 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 748 !SrcVT.isInteger() || !DstVT.isInteger()) 749 return false; 750 unsigned SrcBits = SrcVT.getSizeInBits(); 751 unsigned DestBits = DstVT.getSizeInBits(); 752 return (SrcBits == 64 && DestBits == 32); 753 } 754 755 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 756 // Zexts are free if they can be combined with a load. 757 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 758 EVT MemVT = LD->getMemoryVT(); 759 if ((MemVT == MVT::i8 || MemVT == MVT::i16 || 760 (Subtarget.is64Bit() && MemVT == MVT::i32)) && 761 (LD->getExtensionType() == ISD::NON_EXTLOAD || 762 LD->getExtensionType() == ISD::ZEXTLOAD)) 763 return true; 764 } 765 766 return TargetLowering::isZExtFree(Val, VT2); 767 } 768 769 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 770 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 771 } 772 773 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 774 return Subtarget.hasStdExtZbb(); 775 } 776 777 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 778 return Subtarget.hasStdExtZbb(); 779 } 780 781 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 782 bool ForCodeSize) const { 783 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 784 return false; 785 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 786 return false; 787 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 788 return false; 789 if (Imm.isNegZero()) 790 return false; 791 return Imm.isZero(); 792 } 793 794 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 795 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 796 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 797 (VT == MVT::f64 && Subtarget.hasStdExtD()); 798 } 799 800 // Changes the condition code and swaps operands if necessary, so the SetCC 801 // operation matches one of the comparisons supported directly in the RISC-V 802 // ISA. 803 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 804 switch (CC) { 805 default: 806 break; 807 case ISD::SETGT: 808 case ISD::SETLE: 809 case ISD::SETUGT: 810 case ISD::SETULE: 811 CC = ISD::getSetCCSwappedOperands(CC); 812 std::swap(LHS, RHS); 813 break; 814 } 815 } 816 817 // Return the RISC-V branch opcode that matches the given DAG integer 818 // condition code. The CondCode must be one of those supported by the RISC-V 819 // ISA (see normaliseSetCC). 820 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) { 821 switch (CC) { 822 default: 823 llvm_unreachable("Unsupported CondCode"); 824 case ISD::SETEQ: 825 return RISCV::BEQ; 826 case ISD::SETNE: 827 return RISCV::BNE; 828 case ISD::SETLT: 829 return RISCV::BLT; 830 case ISD::SETGE: 831 return RISCV::BGE; 832 case ISD::SETULT: 833 return RISCV::BLTU; 834 case ISD::SETUGE: 835 return RISCV::BGEU; 836 } 837 } 838 839 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) { 840 assert(VT.isScalableVector() && "Expecting a scalable vector type"); 841 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); 842 if (VT.getVectorElementType() == MVT::i1) 843 KnownSize *= 8; 844 845 switch (KnownSize) { 846 default: 847 llvm_unreachable("Invalid LMUL."); 848 case 8: 849 return RISCVVLMUL::LMUL_F8; 850 case 16: 851 return RISCVVLMUL::LMUL_F4; 852 case 32: 853 return RISCVVLMUL::LMUL_F2; 854 case 64: 855 return RISCVVLMUL::LMUL_1; 856 case 128: 857 return RISCVVLMUL::LMUL_2; 858 case 256: 859 return RISCVVLMUL::LMUL_4; 860 case 512: 861 return RISCVVLMUL::LMUL_8; 862 } 863 } 864 865 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) { 866 switch (LMul) { 867 default: 868 llvm_unreachable("Invalid LMUL."); 869 case RISCVVLMUL::LMUL_F8: 870 case RISCVVLMUL::LMUL_F4: 871 case RISCVVLMUL::LMUL_F2: 872 case RISCVVLMUL::LMUL_1: 873 return RISCV::VRRegClassID; 874 case RISCVVLMUL::LMUL_2: 875 return RISCV::VRM2RegClassID; 876 case RISCVVLMUL::LMUL_4: 877 return RISCV::VRM4RegClassID; 878 case RISCVVLMUL::LMUL_8: 879 return RISCV::VRM8RegClassID; 880 } 881 } 882 883 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { 884 RISCVVLMUL LMUL = getLMUL(VT); 885 if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 || 886 LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) { 887 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 888 "Unexpected subreg numbering"); 889 return RISCV::sub_vrm1_0 + Index; 890 } 891 if (LMUL == RISCVVLMUL::LMUL_2) { 892 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 893 "Unexpected subreg numbering"); 894 return RISCV::sub_vrm2_0 + Index; 895 } 896 if (LMUL == RISCVVLMUL::LMUL_4) { 897 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 898 "Unexpected subreg numbering"); 899 return RISCV::sub_vrm4_0 + Index; 900 } 901 llvm_unreachable("Invalid vector type."); 902 } 903 904 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { 905 if (VT.getVectorElementType() == MVT::i1) 906 return RISCV::VRRegClassID; 907 return getRegClassIDForLMUL(getLMUL(VT)); 908 } 909 910 // Attempt to decompose a subvector insert/extract between VecVT and 911 // SubVecVT via subregister indices. Returns the subregister index that 912 // can perform the subvector insert/extract with the given element index, as 913 // well as the index corresponding to any leftover subvectors that must be 914 // further inserted/extracted within the register class for SubVecVT. 915 std::pair<unsigned, unsigned> 916 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 917 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, 918 const RISCVRegisterInfo *TRI) { 919 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && 920 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && 921 RISCV::VRM2RegClassID > RISCV::VRRegClassID), 922 "Register classes not ordered"); 923 unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); 924 unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); 925 // Try to compose a subregister index that takes us from the incoming 926 // LMUL>1 register class down to the outgoing one. At each step we half 927 // the LMUL: 928 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 929 // Note that this is not guaranteed to find a subregister index, such as 930 // when we are extracting from one VR type to another. 931 unsigned SubRegIdx = RISCV::NoSubRegister; 932 for (const unsigned RCID : 933 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) 934 if (VecRegClassID > RCID && SubRegClassID <= RCID) { 935 VecVT = VecVT.getHalfNumVectorElementsVT(); 936 bool IsHi = 937 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); 938 SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, 939 getSubregIndexByMVT(VecVT, IsHi)); 940 if (IsHi) 941 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); 942 } 943 return {SubRegIdx, InsertExtractIdx}; 944 } 945 946 // Return the largest legal scalable vector type that matches VT's element type. 947 MVT RISCVTargetLowering::getContainerForFixedLengthVector( 948 const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget) { 949 assert(VT.isFixedLengthVector() && TLI.isTypeLegal(VT) && 950 "Expected legal fixed length vector!"); 951 952 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 953 assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!"); 954 955 MVT EltVT = VT.getVectorElementType(); 956 switch (EltVT.SimpleTy) { 957 default: 958 llvm_unreachable("unexpected element type for RVV container"); 959 case MVT::i1: { 960 // Masks are calculated assuming 8-bit elements since that's when we need 961 // the most elements. 962 unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8; 963 return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock); 964 } 965 case MVT::i8: 966 case MVT::i16: 967 case MVT::i32: 968 case MVT::i64: 969 case MVT::f16: 970 case MVT::f32: 971 case MVT::f64: { 972 unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits(); 973 return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock); 974 } 975 } 976 } 977 978 MVT RISCVTargetLowering::getContainerForFixedLengthVector( 979 SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) { 980 return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, 981 Subtarget); 982 } 983 984 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { 985 return getContainerForFixedLengthVector(*this, VT, getSubtarget()); 986 } 987 988 // Grow V to consume an entire RVV register. 989 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 990 const RISCVSubtarget &Subtarget) { 991 assert(VT.isScalableVector() && 992 "Expected to convert into a scalable vector!"); 993 assert(V.getValueType().isFixedLengthVector() && 994 "Expected a fixed length vector operand!"); 995 SDLoc DL(V); 996 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 997 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); 998 } 999 1000 // Shrink V so it's just big enough to maintain a VT's worth of data. 1001 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1002 const RISCVSubtarget &Subtarget) { 1003 assert(VT.isFixedLengthVector() && 1004 "Expected to convert into a fixed length vector!"); 1005 assert(V.getValueType().isScalableVector() && 1006 "Expected a scalable vector operand!"); 1007 SDLoc DL(V); 1008 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1009 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); 1010 } 1011 1012 // Gets the two common "VL" operands: an all-ones mask and the vector length. 1013 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is 1014 // the vector type that it is contained in. 1015 static std::pair<SDValue, SDValue> 1016 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, 1017 const RISCVSubtarget &Subtarget) { 1018 assert(ContainerVT.isScalableVector() && "Expecting scalable container type"); 1019 MVT XLenVT = Subtarget.getXLenVT(); 1020 SDValue VL = VecVT.isFixedLengthVector() 1021 ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) 1022 : DAG.getRegister(RISCV::X0, XLenVT); 1023 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 1024 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 1025 return {Mask, VL}; 1026 } 1027 1028 // As above but assuming the given type is a scalable vector type. 1029 static std::pair<SDValue, SDValue> 1030 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, 1031 const RISCVSubtarget &Subtarget) { 1032 assert(VecVT.isScalableVector() && "Expecting a scalable vector"); 1033 return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); 1034 } 1035 1036 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few 1037 // of either is (currently) supported. This can get us into an infinite loop 1038 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR 1039 // as a ..., etc. 1040 // Until either (or both) of these can reliably lower any node, reporting that 1041 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks 1042 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, 1043 // which is not desirable. 1044 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( 1045 EVT VT, unsigned DefinedValues) const { 1046 return false; 1047 } 1048 1049 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 1050 const RISCVSubtarget &Subtarget) { 1051 MVT VT = Op.getSimpleValueType(); 1052 assert(VT.isFixedLengthVector() && "Unexpected vector!"); 1053 1054 MVT ContainerVT = 1055 RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget); 1056 1057 SDLoc DL(Op); 1058 SDValue Mask, VL; 1059 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1060 1061 if (VT.getVectorElementType() == MVT::i1) { 1062 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 1063 SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); 1064 return convertFromScalableVector(VT, VMClr, DAG, Subtarget); 1065 } 1066 1067 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 1068 SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 1069 return convertFromScalableVector(VT, VMSet, DAG, Subtarget); 1070 } 1071 1072 return SDValue(); 1073 } 1074 1075 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 1076 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL 1077 : RISCVISD::VMV_V_X_VL; 1078 Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL); 1079 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 1080 } 1081 1082 // Try and match an index sequence, which we can lower directly to the vid 1083 // instruction. An all-undef vector is matched by getSplatValue, above. 1084 if (VT.isInteger()) { 1085 bool IsVID = true; 1086 for (unsigned i = 0, e = Op.getNumOperands(); i < e && IsVID; i++) 1087 IsVID &= Op.getOperand(i).isUndef() || 1088 (isa<ConstantSDNode>(Op.getOperand(i)) && 1089 Op.getConstantOperandVal(i) == i); 1090 1091 if (IsVID) { 1092 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); 1093 return convertFromScalableVector(VT, VID, DAG, Subtarget); 1094 } 1095 } 1096 1097 return SDValue(); 1098 } 1099 1100 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 1101 const RISCVSubtarget &Subtarget) { 1102 SDValue V1 = Op.getOperand(0); 1103 SDLoc DL(Op); 1104 MVT VT = Op.getSimpleValueType(); 1105 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 1106 1107 if (SVN->isSplat()) { 1108 int Lane = SVN->getSplatIndex(); 1109 if (Lane >= 0) { 1110 MVT ContainerVT = RISCVTargetLowering::getContainerForFixedLengthVector( 1111 DAG, VT, Subtarget); 1112 1113 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 1114 assert(Lane < (int)VT.getVectorNumElements() && "Unexpected lane!"); 1115 1116 SDValue Mask, VL; 1117 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1118 MVT XLenVT = Subtarget.getXLenVT(); 1119 SDValue Gather = 1120 DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, 1121 DAG.getConstant(Lane, DL, XLenVT), Mask, VL); 1122 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 1123 } 1124 } 1125 1126 return SDValue(); 1127 } 1128 1129 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT, 1130 SDLoc DL, SelectionDAG &DAG, 1131 const RISCVSubtarget &Subtarget) { 1132 if (VT.isScalableVector()) 1133 return DAG.getFPExtendOrRound(Op, DL, VT); 1134 assert(VT.isFixedLengthVector() && 1135 "Unexpected value type for RVV FP extend/round lowering"); 1136 SDValue Mask, VL; 1137 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1138 unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType()) 1139 ? RISCVISD::FP_EXTEND_VL 1140 : RISCVISD::FP_ROUND_VL; 1141 return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL); 1142 } 1143 1144 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 1145 SelectionDAG &DAG) const { 1146 switch (Op.getOpcode()) { 1147 default: 1148 report_fatal_error("unimplemented operand"); 1149 case ISD::GlobalAddress: 1150 return lowerGlobalAddress(Op, DAG); 1151 case ISD::BlockAddress: 1152 return lowerBlockAddress(Op, DAG); 1153 case ISD::ConstantPool: 1154 return lowerConstantPool(Op, DAG); 1155 case ISD::JumpTable: 1156 return lowerJumpTable(Op, DAG); 1157 case ISD::GlobalTLSAddress: 1158 return lowerGlobalTLSAddress(Op, DAG); 1159 case ISD::SELECT: 1160 return lowerSELECT(Op, DAG); 1161 case ISD::VASTART: 1162 return lowerVASTART(Op, DAG); 1163 case ISD::FRAMEADDR: 1164 return lowerFRAMEADDR(Op, DAG); 1165 case ISD::RETURNADDR: 1166 return lowerRETURNADDR(Op, DAG); 1167 case ISD::SHL_PARTS: 1168 return lowerShiftLeftParts(Op, DAG); 1169 case ISD::SRA_PARTS: 1170 return lowerShiftRightParts(Op, DAG, true); 1171 case ISD::SRL_PARTS: 1172 return lowerShiftRightParts(Op, DAG, false); 1173 case ISD::BITCAST: { 1174 SDValue Op0 = Op.getOperand(0); 1175 // We can handle fixed length vector bitcasts with a simple replacement 1176 // in isel. 1177 if (Op.getValueType().isFixedLengthVector()) { 1178 if (Op0.getValueType().isFixedLengthVector()) 1179 return Op; 1180 return SDValue(); 1181 } 1182 assert(((Subtarget.is64Bit() && Subtarget.hasStdExtF()) || 1183 Subtarget.hasStdExtZfh()) && 1184 "Unexpected custom legalisation"); 1185 SDLoc DL(Op); 1186 if (Op.getValueType() == MVT::f16 && Subtarget.hasStdExtZfh()) { 1187 if (Op0.getValueType() != MVT::i16) 1188 return SDValue(); 1189 SDValue NewOp0 = 1190 DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), Op0); 1191 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 1192 return FPConv; 1193 } else if (Op.getValueType() == MVT::f32 && Subtarget.is64Bit() && 1194 Subtarget.hasStdExtF()) { 1195 if (Op0.getValueType() != MVT::i32) 1196 return SDValue(); 1197 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 1198 SDValue FPConv = 1199 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 1200 return FPConv; 1201 } 1202 return SDValue(); 1203 } 1204 case ISD::INTRINSIC_WO_CHAIN: 1205 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 1206 case ISD::INTRINSIC_W_CHAIN: 1207 return LowerINTRINSIC_W_CHAIN(Op, DAG); 1208 case ISD::BSWAP: 1209 case ISD::BITREVERSE: { 1210 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 1211 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 1212 MVT VT = Op.getSimpleValueType(); 1213 SDLoc DL(Op); 1214 // Start with the maximum immediate value which is the bitwidth - 1. 1215 unsigned Imm = VT.getSizeInBits() - 1; 1216 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 1217 if (Op.getOpcode() == ISD::BSWAP) 1218 Imm &= ~0x7U; 1219 return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0), 1220 DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT())); 1221 } 1222 case ISD::FSHL: 1223 case ISD::FSHR: { 1224 MVT VT = Op.getSimpleValueType(); 1225 assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization"); 1226 SDLoc DL(Op); 1227 // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only 1228 // use log(XLen) bits. Mask the shift amount accordingly. 1229 unsigned ShAmtWidth = Subtarget.getXLen() - 1; 1230 SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), 1231 DAG.getConstant(ShAmtWidth, DL, VT)); 1232 unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR; 1233 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt); 1234 } 1235 case ISD::TRUNCATE: { 1236 SDLoc DL(Op); 1237 MVT VT = Op.getSimpleValueType(); 1238 // Only custom-lower vector truncates 1239 if (!VT.isVector()) 1240 return Op; 1241 1242 // Truncates to mask types are handled differently 1243 if (VT.getVectorElementType() == MVT::i1) 1244 return lowerVectorMaskTrunc(Op, DAG); 1245 1246 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary 1247 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which 1248 // truncate by one power of two at a time. 1249 MVT DstEltVT = VT.getVectorElementType(); 1250 1251 SDValue Src = Op.getOperand(0); 1252 MVT SrcVT = Src.getSimpleValueType(); 1253 MVT SrcEltVT = SrcVT.getVectorElementType(); 1254 1255 assert(DstEltVT.bitsLT(SrcEltVT) && 1256 isPowerOf2_64(DstEltVT.getSizeInBits()) && 1257 isPowerOf2_64(SrcEltVT.getSizeInBits()) && 1258 "Unexpected vector truncate lowering"); 1259 1260 MVT ContainerVT = SrcVT; 1261 if (SrcVT.isFixedLengthVector()) { 1262 ContainerVT = getContainerForFixedLengthVector(SrcVT); 1263 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 1264 } 1265 1266 SDValue Result = Src; 1267 SDValue Mask, VL; 1268 std::tie(Mask, VL) = 1269 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); 1270 LLVMContext &Context = *DAG.getContext(); 1271 const ElementCount Count = ContainerVT.getVectorElementCount(); 1272 do { 1273 SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); 1274 EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); 1275 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, 1276 Mask, VL); 1277 } while (SrcEltVT != DstEltVT); 1278 1279 if (SrcVT.isFixedLengthVector()) 1280 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 1281 1282 return Result; 1283 } 1284 case ISD::ANY_EXTEND: 1285 case ISD::ZERO_EXTEND: 1286 if (Op.getOperand(0).getValueType().isVector() && 1287 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1288 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); 1289 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); 1290 case ISD::SIGN_EXTEND: 1291 if (Op.getOperand(0).getValueType().isVector() && 1292 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 1293 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); 1294 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); 1295 case ISD::SPLAT_VECTOR: 1296 return lowerSPLATVECTOR(Op, DAG); 1297 case ISD::INSERT_VECTOR_ELT: 1298 return lowerINSERT_VECTOR_ELT(Op, DAG); 1299 case ISD::EXTRACT_VECTOR_ELT: 1300 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 1301 case ISD::VSCALE: { 1302 MVT VT = Op.getSimpleValueType(); 1303 SDLoc DL(Op); 1304 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); 1305 // We define our scalable vector types for lmul=1 to use a 64 bit known 1306 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate 1307 // vscale as VLENB / 8. 1308 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, 1309 DAG.getConstant(3, DL, VT)); 1310 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); 1311 } 1312 case ISD::FP_EXTEND: { 1313 // RVV can only do fp_extend to types double the size as the source. We 1314 // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going 1315 // via f32. 1316 SDLoc DL(Op); 1317 MVT VT = Op.getSimpleValueType(); 1318 SDValue Src = Op.getOperand(0); 1319 MVT SrcVT = Src.getSimpleValueType(); 1320 1321 // Prepare any fixed-length vector operands. 1322 MVT ContainerVT = VT; 1323 if (SrcVT.isFixedLengthVector()) { 1324 ContainerVT = getContainerForFixedLengthVector(VT); 1325 MVT SrcContainerVT = 1326 ContainerVT.changeVectorElementType(SrcVT.getVectorElementType()); 1327 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1328 } 1329 1330 if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 || 1331 SrcVT.getVectorElementType() != MVT::f16) { 1332 // For scalable vectors, we only need to close the gap between 1333 // vXf16->vXf64. 1334 if (!VT.isFixedLengthVector()) 1335 return Op; 1336 // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version. 1337 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 1338 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1339 } 1340 1341 MVT InterVT = VT.changeVectorElementType(MVT::f32); 1342 MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32); 1343 SDValue IntermediateExtend = getRVVFPExtendOrRound( 1344 Src, InterVT, InterContainerVT, DL, DAG, Subtarget); 1345 1346 SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT, 1347 DL, DAG, Subtarget); 1348 if (VT.isFixedLengthVector()) 1349 return convertFromScalableVector(VT, Extend, DAG, Subtarget); 1350 return Extend; 1351 } 1352 case ISD::FP_ROUND: { 1353 // RVV can only do fp_round to types half the size as the source. We 1354 // custom-lower f64->f16 rounds via RVV's round-to-odd float 1355 // conversion instruction. 1356 SDLoc DL(Op); 1357 MVT VT = Op.getSimpleValueType(); 1358 SDValue Src = Op.getOperand(0); 1359 MVT SrcVT = Src.getSimpleValueType(); 1360 1361 // Prepare any fixed-length vector operands. 1362 MVT ContainerVT = VT; 1363 if (VT.isFixedLengthVector()) { 1364 MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 1365 ContainerVT = 1366 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 1367 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1368 } 1369 1370 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 || 1371 SrcVT.getVectorElementType() != MVT::f64) { 1372 // For scalable vectors, we only need to close the gap between 1373 // vXf64<->vXf16. 1374 if (!VT.isFixedLengthVector()) 1375 return Op; 1376 // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version. 1377 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget); 1378 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1379 } 1380 1381 SDValue Mask, VL; 1382 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1383 1384 MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); 1385 SDValue IntermediateRound = 1386 DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL); 1387 SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT, 1388 DL, DAG, Subtarget); 1389 1390 if (VT.isFixedLengthVector()) 1391 return convertFromScalableVector(VT, Round, DAG, Subtarget); 1392 return Round; 1393 } 1394 case ISD::FP_TO_SINT: 1395 case ISD::FP_TO_UINT: 1396 case ISD::SINT_TO_FP: 1397 case ISD::UINT_TO_FP: { 1398 // RVV can only do fp<->int conversions to types half/double the size as 1399 // the source. We custom-lower any conversions that do two hops into 1400 // sequences. 1401 MVT VT = Op.getSimpleValueType(); 1402 if (!VT.isVector()) 1403 return Op; 1404 SDLoc DL(Op); 1405 SDValue Src = Op.getOperand(0); 1406 MVT EltVT = VT.getVectorElementType(); 1407 MVT SrcVT = Src.getSimpleValueType(); 1408 MVT SrcEltVT = SrcVT.getVectorElementType(); 1409 unsigned EltSize = EltVT.getSizeInBits(); 1410 unsigned SrcEltSize = SrcEltVT.getSizeInBits(); 1411 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && 1412 "Unexpected vector element types"); 1413 1414 bool IsInt2FP = SrcEltVT.isInteger(); 1415 // Widening conversions 1416 if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) { 1417 if (IsInt2FP) { 1418 // Do a regular integer sign/zero extension then convert to float. 1419 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()), 1420 VT.getVectorElementCount()); 1421 unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP 1422 ? ISD::ZERO_EXTEND 1423 : ISD::SIGN_EXTEND; 1424 SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); 1425 return DAG.getNode(Op.getOpcode(), DL, VT, Ext); 1426 } 1427 // FP2Int 1428 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering"); 1429 // Do one doubling fp_extend then complete the operation by converting 1430 // to int. 1431 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 1432 SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); 1433 return DAG.getNode(Op.getOpcode(), DL, VT, FExt); 1434 } 1435 1436 // Narrowing conversions 1437 if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) { 1438 if (IsInt2FP) { 1439 // One narrowing int_to_fp, then an fp_round. 1440 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering"); 1441 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 1442 SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); 1443 return DAG.getFPExtendOrRound(Int2FP, DL, VT); 1444 } 1445 // FP2Int 1446 // One narrowing fp_to_int, then truncate the integer. If the float isn't 1447 // representable by the integer, the result is poison. 1448 MVT IVecVT = 1449 MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2), 1450 VT.getVectorElementCount()); 1451 SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); 1452 return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); 1453 } 1454 1455 // Scalable vectors can exit here. Patterns will handle equally-sized 1456 // conversions halving/doubling ones. 1457 if (!VT.isFixedLengthVector()) 1458 return Op; 1459 1460 // For fixed-length vectors we lower to a custom "VL" node. 1461 unsigned RVVOpc = 0; 1462 switch (Op.getOpcode()) { 1463 default: 1464 llvm_unreachable("Impossible opcode"); 1465 case ISD::FP_TO_SINT: 1466 RVVOpc = RISCVISD::FP_TO_SINT_VL; 1467 break; 1468 case ISD::FP_TO_UINT: 1469 RVVOpc = RISCVISD::FP_TO_UINT_VL; 1470 break; 1471 case ISD::SINT_TO_FP: 1472 RVVOpc = RISCVISD::SINT_TO_FP_VL; 1473 break; 1474 case ISD::UINT_TO_FP: 1475 RVVOpc = RISCVISD::UINT_TO_FP_VL; 1476 break; 1477 } 1478 1479 MVT ContainerVT, SrcContainerVT; 1480 // Derive the reference container type from the larger vector type. 1481 if (SrcEltSize > EltSize) { 1482 SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 1483 ContainerVT = 1484 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 1485 } else { 1486 ContainerVT = getContainerForFixedLengthVector(VT); 1487 SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); 1488 } 1489 1490 SDValue Mask, VL; 1491 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1492 1493 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 1494 Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); 1495 return convertFromScalableVector(VT, Src, DAG, Subtarget); 1496 } 1497 case ISD::VECREDUCE_ADD: 1498 case ISD::VECREDUCE_UMAX: 1499 case ISD::VECREDUCE_SMAX: 1500 case ISD::VECREDUCE_UMIN: 1501 case ISD::VECREDUCE_SMIN: 1502 case ISD::VECREDUCE_AND: 1503 case ISD::VECREDUCE_OR: 1504 case ISD::VECREDUCE_XOR: 1505 return lowerVECREDUCE(Op, DAG); 1506 case ISD::VECREDUCE_FADD: 1507 case ISD::VECREDUCE_SEQ_FADD: 1508 return lowerFPVECREDUCE(Op, DAG); 1509 case ISD::INSERT_SUBVECTOR: 1510 return lowerINSERT_SUBVECTOR(Op, DAG); 1511 case ISD::EXTRACT_SUBVECTOR: 1512 return lowerEXTRACT_SUBVECTOR(Op, DAG); 1513 case ISD::BUILD_VECTOR: 1514 return lowerBUILD_VECTOR(Op, DAG, Subtarget); 1515 case ISD::VECTOR_SHUFFLE: 1516 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 1517 case ISD::CONCAT_VECTORS: { 1518 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is 1519 // better than going through the stack, as the default expansion does. 1520 SDLoc DL(Op); 1521 MVT VT = Op.getSimpleValueType(); 1522 assert(VT.isFixedLengthVector() && "Unexpected CONCAT_VECTORS lowering"); 1523 unsigned NumOpElts = 1524 Op.getOperand(0).getSimpleValueType().getVectorNumElements(); 1525 SDValue Vec = DAG.getUNDEF(VT); 1526 for (const auto &OpIdx : enumerate(Op->ops())) 1527 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(), 1528 DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); 1529 return Vec; 1530 } 1531 case ISD::LOAD: 1532 return lowerFixedLengthVectorLoadToRVV(Op, DAG); 1533 case ISD::STORE: 1534 return lowerFixedLengthVectorStoreToRVV(Op, DAG); 1535 case ISD::SETCC: 1536 return lowerFixedLengthVectorSetccToRVV(Op, DAG); 1537 case ISD::ADD: 1538 return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); 1539 case ISD::SUB: 1540 return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); 1541 case ISD::MUL: 1542 return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); 1543 case ISD::MULHS: 1544 return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); 1545 case ISD::MULHU: 1546 return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); 1547 case ISD::AND: 1548 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, 1549 RISCVISD::AND_VL); 1550 case ISD::OR: 1551 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, 1552 RISCVISD::OR_VL); 1553 case ISD::XOR: 1554 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, 1555 RISCVISD::XOR_VL); 1556 case ISD::SDIV: 1557 return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); 1558 case ISD::SREM: 1559 return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); 1560 case ISD::UDIV: 1561 return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); 1562 case ISD::UREM: 1563 return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); 1564 case ISD::SHL: 1565 return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL); 1566 case ISD::SRA: 1567 return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL); 1568 case ISD::SRL: 1569 return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL); 1570 case ISD::FADD: 1571 return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); 1572 case ISD::FSUB: 1573 return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); 1574 case ISD::FMUL: 1575 return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); 1576 case ISD::FDIV: 1577 return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); 1578 case ISD::FNEG: 1579 return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); 1580 case ISD::FABS: 1581 return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); 1582 case ISD::FSQRT: 1583 return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); 1584 case ISD::FMA: 1585 return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); 1586 case ISD::SMIN: 1587 return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); 1588 case ISD::SMAX: 1589 return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); 1590 case ISD::UMIN: 1591 return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); 1592 case ISD::UMAX: 1593 return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); 1594 case ISD::VSELECT: 1595 return lowerFixedLengthVectorSelectToRVV(Op, DAG); 1596 } 1597 } 1598 1599 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 1600 SelectionDAG &DAG, unsigned Flags) { 1601 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 1602 } 1603 1604 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 1605 SelectionDAG &DAG, unsigned Flags) { 1606 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 1607 Flags); 1608 } 1609 1610 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 1611 SelectionDAG &DAG, unsigned Flags) { 1612 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 1613 N->getOffset(), Flags); 1614 } 1615 1616 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 1617 SelectionDAG &DAG, unsigned Flags) { 1618 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 1619 } 1620 1621 template <class NodeTy> 1622 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 1623 bool IsLocal) const { 1624 SDLoc DL(N); 1625 EVT Ty = getPointerTy(DAG.getDataLayout()); 1626 1627 if (isPositionIndependent()) { 1628 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 1629 if (IsLocal) 1630 // Use PC-relative addressing to access the symbol. This generates the 1631 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 1632 // %pcrel_lo(auipc)). 1633 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 1634 1635 // Use PC-relative addressing to access the GOT for this symbol, then load 1636 // the address from the GOT. This generates the pattern (PseudoLA sym), 1637 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 1638 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 1639 } 1640 1641 switch (getTargetMachine().getCodeModel()) { 1642 default: 1643 report_fatal_error("Unsupported code model for lowering"); 1644 case CodeModel::Small: { 1645 // Generate a sequence for accessing addresses within the first 2 GiB of 1646 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 1647 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 1648 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 1649 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 1650 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 1651 } 1652 case CodeModel::Medium: { 1653 // Generate a sequence for accessing addresses within any 2GiB range within 1654 // the address space. This generates the pattern (PseudoLLA sym), which 1655 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 1656 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 1657 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 1658 } 1659 } 1660 } 1661 1662 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 1663 SelectionDAG &DAG) const { 1664 SDLoc DL(Op); 1665 EVT Ty = Op.getValueType(); 1666 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 1667 int64_t Offset = N->getOffset(); 1668 MVT XLenVT = Subtarget.getXLenVT(); 1669 1670 const GlobalValue *GV = N->getGlobal(); 1671 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 1672 SDValue Addr = getAddr(N, DAG, IsLocal); 1673 1674 // In order to maximise the opportunity for common subexpression elimination, 1675 // emit a separate ADD node for the global address offset instead of folding 1676 // it in the global address node. Later peephole optimisations may choose to 1677 // fold it back in when profitable. 1678 if (Offset != 0) 1679 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 1680 DAG.getConstant(Offset, DL, XLenVT)); 1681 return Addr; 1682 } 1683 1684 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 1685 SelectionDAG &DAG) const { 1686 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 1687 1688 return getAddr(N, DAG); 1689 } 1690 1691 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 1692 SelectionDAG &DAG) const { 1693 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 1694 1695 return getAddr(N, DAG); 1696 } 1697 1698 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, 1699 SelectionDAG &DAG) const { 1700 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 1701 1702 return getAddr(N, DAG); 1703 } 1704 1705 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 1706 SelectionDAG &DAG, 1707 bool UseGOT) const { 1708 SDLoc DL(N); 1709 EVT Ty = getPointerTy(DAG.getDataLayout()); 1710 const GlobalValue *GV = N->getGlobal(); 1711 MVT XLenVT = Subtarget.getXLenVT(); 1712 1713 if (UseGOT) { 1714 // Use PC-relative addressing to access the GOT for this TLS symbol, then 1715 // load the address from the GOT and add the thread pointer. This generates 1716 // the pattern (PseudoLA_TLS_IE sym), which expands to 1717 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 1718 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 1719 SDValue Load = 1720 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 1721 1722 // Add the thread pointer. 1723 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 1724 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 1725 } 1726 1727 // Generate a sequence for accessing the address relative to the thread 1728 // pointer, with the appropriate adjustment for the thread pointer offset. 1729 // This generates the pattern 1730 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 1731 SDValue AddrHi = 1732 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 1733 SDValue AddrAdd = 1734 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 1735 SDValue AddrLo = 1736 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 1737 1738 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 1739 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 1740 SDValue MNAdd = SDValue( 1741 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 1742 0); 1743 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 1744 } 1745 1746 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 1747 SelectionDAG &DAG) const { 1748 SDLoc DL(N); 1749 EVT Ty = getPointerTy(DAG.getDataLayout()); 1750 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 1751 const GlobalValue *GV = N->getGlobal(); 1752 1753 // Use a PC-relative addressing mode to access the global dynamic GOT address. 1754 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 1755 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 1756 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 1757 SDValue Load = 1758 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 1759 1760 // Prepare argument list to generate call. 1761 ArgListTy Args; 1762 ArgListEntry Entry; 1763 Entry.Node = Load; 1764 Entry.Ty = CallTy; 1765 Args.push_back(Entry); 1766 1767 // Setup call to __tls_get_addr. 1768 TargetLowering::CallLoweringInfo CLI(DAG); 1769 CLI.setDebugLoc(DL) 1770 .setChain(DAG.getEntryNode()) 1771 .setLibCallee(CallingConv::C, CallTy, 1772 DAG.getExternalSymbol("__tls_get_addr", Ty), 1773 std::move(Args)); 1774 1775 return LowerCallTo(CLI).first; 1776 } 1777 1778 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 1779 SelectionDAG &DAG) const { 1780 SDLoc DL(Op); 1781 EVT Ty = Op.getValueType(); 1782 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 1783 int64_t Offset = N->getOffset(); 1784 MVT XLenVT = Subtarget.getXLenVT(); 1785 1786 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 1787 1788 if (DAG.getMachineFunction().getFunction().getCallingConv() == 1789 CallingConv::GHC) 1790 report_fatal_error("In GHC calling convention TLS is not supported"); 1791 1792 SDValue Addr; 1793 switch (Model) { 1794 case TLSModel::LocalExec: 1795 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 1796 break; 1797 case TLSModel::InitialExec: 1798 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 1799 break; 1800 case TLSModel::LocalDynamic: 1801 case TLSModel::GeneralDynamic: 1802 Addr = getDynamicTLSAddr(N, DAG); 1803 break; 1804 } 1805 1806 // In order to maximise the opportunity for common subexpression elimination, 1807 // emit a separate ADD node for the global address offset instead of folding 1808 // it in the global address node. Later peephole optimisations may choose to 1809 // fold it back in when profitable. 1810 if (Offset != 0) 1811 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 1812 DAG.getConstant(Offset, DL, XLenVT)); 1813 return Addr; 1814 } 1815 1816 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 1817 SDValue CondV = Op.getOperand(0); 1818 SDValue TrueV = Op.getOperand(1); 1819 SDValue FalseV = Op.getOperand(2); 1820 SDLoc DL(Op); 1821 MVT XLenVT = Subtarget.getXLenVT(); 1822 1823 // If the result type is XLenVT and CondV is the output of a SETCC node 1824 // which also operated on XLenVT inputs, then merge the SETCC node into the 1825 // lowered RISCVISD::SELECT_CC to take advantage of the integer 1826 // compare+branch instructions. i.e.: 1827 // (select (setcc lhs, rhs, cc), truev, falsev) 1828 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 1829 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC && 1830 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 1831 SDValue LHS = CondV.getOperand(0); 1832 SDValue RHS = CondV.getOperand(1); 1833 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 1834 ISD::CondCode CCVal = CC->get(); 1835 1836 normaliseSetCC(LHS, RHS, CCVal); 1837 1838 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT); 1839 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 1840 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 1841 } 1842 1843 // Otherwise: 1844 // (select condv, truev, falsev) 1845 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 1846 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 1847 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT); 1848 1849 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 1850 1851 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 1852 } 1853 1854 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 1855 MachineFunction &MF = DAG.getMachineFunction(); 1856 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 1857 1858 SDLoc DL(Op); 1859 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 1860 getPointerTy(MF.getDataLayout())); 1861 1862 // vastart just stores the address of the VarArgsFrameIndex slot into the 1863 // memory location argument. 1864 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 1865 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 1866 MachinePointerInfo(SV)); 1867 } 1868 1869 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 1870 SelectionDAG &DAG) const { 1871 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1872 MachineFunction &MF = DAG.getMachineFunction(); 1873 MachineFrameInfo &MFI = MF.getFrameInfo(); 1874 MFI.setFrameAddressIsTaken(true); 1875 Register FrameReg = RI.getFrameRegister(MF); 1876 int XLenInBytes = Subtarget.getXLen() / 8; 1877 1878 EVT VT = Op.getValueType(); 1879 SDLoc DL(Op); 1880 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 1881 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1882 while (Depth--) { 1883 int Offset = -(XLenInBytes * 2); 1884 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 1885 DAG.getIntPtrConstant(Offset, DL)); 1886 FrameAddr = 1887 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 1888 } 1889 return FrameAddr; 1890 } 1891 1892 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 1893 SelectionDAG &DAG) const { 1894 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 1895 MachineFunction &MF = DAG.getMachineFunction(); 1896 MachineFrameInfo &MFI = MF.getFrameInfo(); 1897 MFI.setReturnAddressIsTaken(true); 1898 MVT XLenVT = Subtarget.getXLenVT(); 1899 int XLenInBytes = Subtarget.getXLen() / 8; 1900 1901 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 1902 return SDValue(); 1903 1904 EVT VT = Op.getValueType(); 1905 SDLoc DL(Op); 1906 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1907 if (Depth) { 1908 int Off = -XLenInBytes; 1909 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 1910 SDValue Offset = DAG.getConstant(Off, DL, VT); 1911 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 1912 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 1913 MachinePointerInfo()); 1914 } 1915 1916 // Return the value of the return address register, marking it an implicit 1917 // live-in. 1918 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 1919 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 1920 } 1921 1922 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 1923 SelectionDAG &DAG) const { 1924 SDLoc DL(Op); 1925 SDValue Lo = Op.getOperand(0); 1926 SDValue Hi = Op.getOperand(1); 1927 SDValue Shamt = Op.getOperand(2); 1928 EVT VT = Lo.getValueType(); 1929 1930 // if Shamt-XLEN < 0: // Shamt < XLEN 1931 // Lo = Lo << Shamt 1932 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt)) 1933 // else: 1934 // Lo = 0 1935 // Hi = Lo << (Shamt-XLEN) 1936 1937 SDValue Zero = DAG.getConstant(0, DL, VT); 1938 SDValue One = DAG.getConstant(1, DL, VT); 1939 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 1940 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 1941 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 1942 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 1943 1944 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 1945 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 1946 SDValue ShiftRightLo = 1947 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 1948 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 1949 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 1950 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 1951 1952 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 1953 1954 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 1955 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 1956 1957 SDValue Parts[2] = {Lo, Hi}; 1958 return DAG.getMergeValues(Parts, DL); 1959 } 1960 1961 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 1962 bool IsSRA) const { 1963 SDLoc DL(Op); 1964 SDValue Lo = Op.getOperand(0); 1965 SDValue Hi = Op.getOperand(1); 1966 SDValue Shamt = Op.getOperand(2); 1967 EVT VT = Lo.getValueType(); 1968 1969 // SRA expansion: 1970 // if Shamt-XLEN < 0: // Shamt < XLEN 1971 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 1972 // Hi = Hi >>s Shamt 1973 // else: 1974 // Lo = Hi >>s (Shamt-XLEN); 1975 // Hi = Hi >>s (XLEN-1) 1976 // 1977 // SRL expansion: 1978 // if Shamt-XLEN < 0: // Shamt < XLEN 1979 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt)) 1980 // Hi = Hi >>u Shamt 1981 // else: 1982 // Lo = Hi >>u (Shamt-XLEN); 1983 // Hi = 0; 1984 1985 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 1986 1987 SDValue Zero = DAG.getConstant(0, DL, VT); 1988 SDValue One = DAG.getConstant(1, DL, VT); 1989 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 1990 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 1991 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 1992 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt); 1993 1994 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 1995 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 1996 SDValue ShiftLeftHi = 1997 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 1998 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 1999 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 2000 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 2001 SDValue HiFalse = 2002 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 2003 2004 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 2005 2006 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 2007 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 2008 2009 SDValue Parts[2] = {Lo, Hi}; 2010 return DAG.getMergeValues(Parts, DL); 2011 } 2012 2013 // Custom-lower a SPLAT_VECTOR where XLEN<SEW, as the SEW element type is 2014 // illegal (currently only vXi64 RV32). 2015 // FIXME: We could also catch non-constant sign-extended i32 values and lower 2016 // them to SPLAT_VECTOR_I64 2017 SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op, 2018 SelectionDAG &DAG) const { 2019 SDLoc DL(Op); 2020 EVT VecVT = Op.getValueType(); 2021 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && 2022 "Unexpected SPLAT_VECTOR lowering"); 2023 SDValue SplatVal = Op.getOperand(0); 2024 2025 // If we can prove that the value is a sign-extended 32-bit value, lower this 2026 // as a custom node in order to try and match RVV vector/scalar instructions. 2027 if (auto *CVal = dyn_cast<ConstantSDNode>(SplatVal)) { 2028 if (isInt<32>(CVal->getSExtValue())) 2029 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, 2030 DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32)); 2031 } 2032 2033 if (SplatVal.getOpcode() == ISD::SIGN_EXTEND && 2034 SplatVal.getOperand(0).getValueType() == MVT::i32) { 2035 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, 2036 SplatVal.getOperand(0)); 2037 } 2038 2039 // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not 2040 // to accidentally sign-extend the 32-bit halves to the e64 SEW: 2041 // vmv.v.x vX, hi 2042 // vsll.vx vX, vX, /*32*/ 2043 // vmv.v.x vY, lo 2044 // vsll.vx vY, vY, /*32*/ 2045 // vsrl.vx vY, vY, /*32*/ 2046 // vor.vv vX, vX, vY 2047 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2048 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2049 SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT); 2050 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, Zero); 2051 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, SplatVal, One); 2052 2053 Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo); 2054 Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV); 2055 Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV); 2056 2057 if (isNullConstant(Hi)) 2058 return Lo; 2059 2060 Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi); 2061 Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV); 2062 2063 return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi); 2064 } 2065 2066 // Custom-lower extensions from mask vectors by using a vselect either with 1 2067 // for zero/any-extension or -1 for sign-extension: 2068 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) 2069 // Note that any-extension is lowered identically to zero-extension. 2070 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, 2071 int64_t ExtTrueVal) const { 2072 SDLoc DL(Op); 2073 MVT VecVT = Op.getSimpleValueType(); 2074 SDValue Src = Op.getOperand(0); 2075 // Only custom-lower extensions from mask types 2076 assert(Src.getValueType().isVector() && 2077 Src.getValueType().getVectorElementType() == MVT::i1); 2078 2079 MVT XLenVT = Subtarget.getXLenVT(); 2080 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); 2081 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); 2082 2083 if (VecVT.isScalableVector()) { 2084 // Be careful not to introduce illegal scalar types at this stage, and be 2085 // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is 2086 // illegal and must be expanded. Since we know that the constants are 2087 // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly. 2088 bool IsRV32E64 = 2089 !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64; 2090 2091 if (!IsRV32E64) { 2092 SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero); 2093 SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal); 2094 } else { 2095 SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero); 2096 SplatTrueVal = 2097 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal); 2098 } 2099 2100 return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); 2101 } 2102 2103 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 2104 MVT I1ContainerVT = 2105 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2106 2107 SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); 2108 2109 SDValue Mask, VL; 2110 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2111 2112 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL); 2113 SplatTrueVal = 2114 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL); 2115 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, 2116 SplatTrueVal, SplatZero, VL); 2117 2118 return convertFromScalableVector(VecVT, Select, DAG, Subtarget); 2119 } 2120 2121 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( 2122 SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { 2123 MVT ExtVT = Op.getSimpleValueType(); 2124 // Only custom-lower extensions from fixed-length vector types. 2125 if (!ExtVT.isFixedLengthVector()) 2126 return Op; 2127 MVT VT = Op.getOperand(0).getSimpleValueType(); 2128 // Grab the canonical container type for the extended type. Infer the smaller 2129 // type from that to ensure the same number of vector elements, as we know 2130 // the LMUL will be sufficient to hold the smaller type. 2131 MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); 2132 // Get the extended container type manually to ensure the same number of 2133 // vector elements between source and dest. 2134 MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 2135 ContainerExtVT.getVectorElementCount()); 2136 2137 SDValue Op1 = 2138 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 2139 2140 SDLoc DL(Op); 2141 SDValue Mask, VL; 2142 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2143 2144 SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); 2145 2146 return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); 2147 } 2148 2149 // Custom-lower truncations from vectors to mask vectors by using a mask and a 2150 // setcc operation: 2151 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) 2152 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op, 2153 SelectionDAG &DAG) const { 2154 SDLoc DL(Op); 2155 EVT MaskVT = Op.getValueType(); 2156 // Only expect to custom-lower truncations to mask types 2157 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && 2158 "Unexpected type for vector mask lowering"); 2159 SDValue Src = Op.getOperand(0); 2160 MVT VecVT = Src.getSimpleValueType(); 2161 2162 // If this is a fixed vector, we need to convert it to a scalable vector. 2163 MVT ContainerVT = VecVT; 2164 if (VecVT.isFixedLengthVector()) { 2165 ContainerVT = getContainerForFixedLengthVector(VecVT); 2166 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 2167 } 2168 2169 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); 2170 SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 2171 2172 SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne); 2173 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero); 2174 2175 if (VecVT.isScalableVector()) { 2176 SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne); 2177 return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE); 2178 } 2179 2180 SDValue Mask, VL; 2181 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2182 2183 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 2184 SDValue Trunc = 2185 DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); 2186 Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, 2187 DAG.getCondCode(ISD::SETNE), Mask, VL); 2188 return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); 2189 } 2190 2191 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 2192 SelectionDAG &DAG) const { 2193 SDLoc DL(Op); 2194 MVT VecVT = Op.getSimpleValueType(); 2195 SDValue Vec = Op.getOperand(0); 2196 SDValue Val = Op.getOperand(1); 2197 SDValue Idx = Op.getOperand(2); 2198 2199 MVT ContainerVT = VecVT; 2200 // If the operand is a fixed-length vector, convert to a scalable one. 2201 if (VecVT.isFixedLengthVector()) { 2202 ContainerVT = getContainerForFixedLengthVector(VecVT); 2203 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2204 } 2205 2206 SDValue Mask, VL; 2207 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 2208 2209 // Custom-legalize INSERT_VECTOR_ELT where XLEN>=SEW, so that the vector is 2210 // first slid down into position, the value is inserted into the first 2211 // position, and the vector is slid back up. We do this to simplify patterns. 2212 // (slideup vec, (insertelt (slidedown impdef, vec, idx), val, 0), idx), 2213 if (Subtarget.is64Bit() || Val.getValueType() != MVT::i64) { 2214 if (isNullConstant(Idx)) 2215 return DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Vec, Val, VL); 2216 SDValue Slidedown = 2217 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 2218 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 2219 SDValue InsertElt0 = 2220 DAG.getNode(RISCVISD::VMV_S_XF_VL, DL, ContainerVT, Slidedown, Val, VL); 2221 return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, InsertElt0, 2222 Idx, Mask, VL); 2223 } 2224 2225 // Custom-legalize INSERT_VECTOR_ELT where XLEN<SEW, as the SEW element type 2226 // is illegal (currently only vXi64 RV32). 2227 // Since there is no easy way of getting a single element into a vector when 2228 // XLEN<SEW, we lower the operation to the following sequence: 2229 // splat vVal, rVal 2230 // vid.v vVid 2231 // vmseq.vx mMask, vVid, rIdx 2232 // vmerge.vvm vDest, vSrc, vVal, mMask 2233 // This essentially merges the original vector with the inserted element by 2234 // using a mask whose only set bit is that corresponding to the insert 2235 // index. 2236 SDValue SplattedVal = DAG.getSplatVector(ContainerVT, DL, Val); 2237 SDValue SplattedIdx = 2238 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, Idx, VL); 2239 2240 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); 2241 auto SetCCVT = 2242 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ContainerVT); 2243 SDValue SelectCond = 2244 DAG.getNode(RISCVISD::SETCC_VL, DL, SetCCVT, VID, SplattedIdx, 2245 DAG.getCondCode(ISD::SETEQ), Mask, VL); 2246 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, 2247 SelectCond, SplattedVal, Vec, VL); 2248 if (!VecVT.isFixedLengthVector()) 2249 return Select; 2250 return convertFromScalableVector(VecVT, Select, DAG, Subtarget); 2251 } 2252 2253 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then 2254 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer 2255 // types this is done using VMV_X_S to allow us to glean information about the 2256 // sign bits of the result. 2257 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 2258 SelectionDAG &DAG) const { 2259 SDLoc DL(Op); 2260 SDValue Idx = Op.getOperand(1); 2261 SDValue Vec = Op.getOperand(0); 2262 EVT EltVT = Op.getValueType(); 2263 MVT VecVT = Vec.getSimpleValueType(); 2264 MVT XLenVT = Subtarget.getXLenVT(); 2265 2266 // If this is a fixed vector, we need to convert it to a scalable vector. 2267 MVT ContainerVT = VecVT; 2268 if (VecVT.isFixedLengthVector()) { 2269 ContainerVT = getContainerForFixedLengthVector(VecVT); 2270 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2271 } 2272 2273 // If the index is 0, the vector is already in the right position. 2274 if (!isNullConstant(Idx)) { 2275 // Use a VL of 1 to avoid processing more elements than we need. 2276 SDValue VL = DAG.getConstant(1, DL, XLenVT); 2277 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2278 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2279 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 2280 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 2281 } 2282 2283 if (!EltVT.isInteger()) { 2284 // Floating-point extracts are handled in TableGen. 2285 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, 2286 DAG.getConstant(0, DL, XLenVT)); 2287 } 2288 2289 SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 2290 return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); 2291 } 2292 2293 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2294 SelectionDAG &DAG) const { 2295 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2296 SDLoc DL(Op); 2297 2298 if (Subtarget.hasStdExtV()) { 2299 // Some RVV intrinsics may claim that they want an integer operand to be 2300 // extended. 2301 if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 2302 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { 2303 if (II->ExtendedOperand) { 2304 assert(II->ExtendedOperand < Op.getNumOperands()); 2305 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 2306 SDValue &ScalarOp = Operands[II->ExtendedOperand]; 2307 EVT OpVT = ScalarOp.getValueType(); 2308 if (OpVT == MVT::i8 || OpVT == MVT::i16 || 2309 (OpVT == MVT::i32 && Subtarget.is64Bit())) { 2310 // If the operand is a constant, sign extend to increase our chances 2311 // of being able to use a .vi instruction. ANY_EXTEND would become a 2312 // a zero extend and the simm5 check in isel would fail. 2313 // FIXME: Should we ignore the upper bits in isel instead? 2314 unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND 2315 : ISD::ANY_EXTEND; 2316 ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); 2317 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), 2318 Operands); 2319 } 2320 } 2321 } 2322 } 2323 2324 switch (IntNo) { 2325 default: 2326 return SDValue(); // Don't custom lower most intrinsics. 2327 case Intrinsic::thread_pointer: { 2328 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2329 return DAG.getRegister(RISCV::X4, PtrVT); 2330 } 2331 case Intrinsic::riscv_vmv_x_s: 2332 assert(Op.getValueType() == Subtarget.getXLenVT() && "Unexpected VT!"); 2333 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), 2334 Op.getOperand(1)); 2335 case Intrinsic::riscv_vmv_v_x: { 2336 SDValue Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), 2337 Op.getOperand(1)); 2338 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, Op.getValueType(), 2339 Scalar, Op.getOperand(2)); 2340 } 2341 case Intrinsic::riscv_vfmv_v_f: 2342 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), 2343 Op.getOperand(1), Op.getOperand(2)); 2344 } 2345 } 2346 2347 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 2348 SelectionDAG &DAG) const { 2349 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 2350 SDLoc DL(Op); 2351 2352 if (Subtarget.hasStdExtV()) { 2353 // Some RVV intrinsics may claim that they want an integer operand to be 2354 // extended. 2355 if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 2356 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo)) { 2357 if (II->ExtendedOperand) { 2358 // The operands start from the second argument in INTRINSIC_W_CHAIN. 2359 unsigned ExtendOp = II->ExtendedOperand + 1; 2360 assert(ExtendOp < Op.getNumOperands()); 2361 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 2362 SDValue &ScalarOp = Operands[ExtendOp]; 2363 EVT OpVT = ScalarOp.getValueType(); 2364 if (OpVT == MVT::i8 || OpVT == MVT::i16 || 2365 (OpVT == MVT::i32 && Subtarget.is64Bit())) { 2366 // If the operand is a constant, sign extend to increase our chances 2367 // of being able to use a .vi instruction. ANY_EXTEND would become a 2368 // a zero extend and the simm5 check in isel would fail. 2369 // FIXME: Should we ignore the upper bits in isel instead? 2370 unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND 2371 : ISD::ANY_EXTEND; 2372 ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp); 2373 return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), 2374 Operands); 2375 } 2376 } 2377 } 2378 } 2379 2380 return SDValue(); // Don't custom lower most intrinsics. 2381 } 2382 2383 static MVT getLMUL1VT(MVT VT) { 2384 assert(VT.getVectorElementType().getSizeInBits() <= 64 && 2385 "Unexpected vector MVT"); 2386 return MVT::getScalableVectorVT( 2387 VT.getVectorElementType(), 2388 RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); 2389 } 2390 2391 static unsigned getRVVReductionOp(unsigned ISDOpcode) { 2392 switch (ISDOpcode) { 2393 default: 2394 llvm_unreachable("Unhandled reduction"); 2395 case ISD::VECREDUCE_ADD: 2396 return RISCVISD::VECREDUCE_ADD; 2397 case ISD::VECREDUCE_UMAX: 2398 return RISCVISD::VECREDUCE_UMAX; 2399 case ISD::VECREDUCE_SMAX: 2400 return RISCVISD::VECREDUCE_SMAX; 2401 case ISD::VECREDUCE_UMIN: 2402 return RISCVISD::VECREDUCE_UMIN; 2403 case ISD::VECREDUCE_SMIN: 2404 return RISCVISD::VECREDUCE_SMIN; 2405 case ISD::VECREDUCE_AND: 2406 return RISCVISD::VECREDUCE_AND; 2407 case ISD::VECREDUCE_OR: 2408 return RISCVISD::VECREDUCE_OR; 2409 case ISD::VECREDUCE_XOR: 2410 return RISCVISD::VECREDUCE_XOR; 2411 } 2412 } 2413 2414 // Take a (supported) standard ISD reduction opcode and transform it to a RISCV 2415 // reduction opcode. Note that this returns a vector type, which must be 2416 // further processed to access the scalar result in element 0. 2417 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, 2418 SelectionDAG &DAG) const { 2419 SDLoc DL(Op); 2420 assert(Op.getValueType().isSimple() && 2421 Op.getOperand(0).getValueType().isSimple() && 2422 "Unexpected vector-reduce lowering"); 2423 MVT VecVT = Op.getOperand(0).getSimpleValueType(); 2424 MVT VecEltVT = VecVT.getVectorElementType(); 2425 unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode()); 2426 MVT M1VT = getLMUL1VT(VecVT); 2427 SDValue NeutralElem = DAG.getNeutralElement( 2428 ISD::getVecReduceBaseOpcode(Op.getOpcode()), DL, VecEltVT, SDNodeFlags()); 2429 SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem); 2430 SDValue Reduction = 2431 DAG.getNode(RVVOpcode, DL, M1VT, Op.getOperand(0), IdentitySplat); 2432 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 2433 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 2434 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); 2435 } 2436 2437 // Given a reduction op, this function returns the matching reduction opcode, 2438 // the vector SDValue and the scalar SDValue required to lower this to a 2439 // RISCVISD node. 2440 static std::tuple<unsigned, SDValue, SDValue> 2441 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { 2442 SDLoc DL(Op); 2443 switch (Op.getOpcode()) { 2444 default: 2445 llvm_unreachable("Unhandled reduction"); 2446 case ISD::VECREDUCE_FADD: 2447 return std::make_tuple(RISCVISD::VECREDUCE_FADD, Op.getOperand(0), 2448 DAG.getConstantFP(0.0, DL, EltVT)); 2449 case ISD::VECREDUCE_SEQ_FADD: 2450 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD, Op.getOperand(1), 2451 Op.getOperand(0)); 2452 } 2453 } 2454 2455 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, 2456 SelectionDAG &DAG) const { 2457 SDLoc DL(Op); 2458 MVT VecEltVT = Op.getSimpleValueType(); 2459 2460 unsigned RVVOpcode; 2461 SDValue VectorVal, ScalarVal; 2462 std::tie(RVVOpcode, VectorVal, ScalarVal) = 2463 getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); 2464 2465 MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); 2466 SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal); 2467 SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat); 2468 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 2469 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 2470 } 2471 2472 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 2473 SelectionDAG &DAG) const { 2474 SDValue Vec = Op.getOperand(0); 2475 SDValue SubVec = Op.getOperand(1); 2476 MVT VecVT = Vec.getSimpleValueType(); 2477 MVT SubVecVT = SubVec.getSimpleValueType(); 2478 2479 SDLoc DL(Op); 2480 MVT XLenVT = Subtarget.getXLenVT(); 2481 unsigned OrigIdx = Op.getConstantOperandVal(2); 2482 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2483 2484 // We don't have the ability to slide mask vectors up indexed by their i1 2485 // elements; the smallest we can do is i8. Often we are able to bitcast to 2486 // equivalent i8 vectors. Note that when inserting a fixed-length vector 2487 // into a scalable one, we might not necessarily have enough scalable 2488 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. 2489 if (SubVecVT.getVectorElementType() == MVT::i1 && 2490 (OrigIdx != 0 || !Vec.isUndef())) { 2491 if (VecVT.getVectorMinNumElements() >= 8 && 2492 SubVecVT.getVectorMinNumElements() >= 8) { 2493 assert(OrigIdx % 8 == 0 && "Invalid index"); 2494 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 2495 SubVecVT.getVectorMinNumElements() % 8 == 0 && 2496 "Unexpected mask vector lowering"); 2497 OrigIdx /= 8; 2498 SubVecVT = 2499 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 2500 SubVecVT.isScalableVector()); 2501 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 2502 VecVT.isScalableVector()); 2503 Vec = DAG.getBitcast(VecVT, Vec); 2504 SubVec = DAG.getBitcast(SubVecVT, SubVec); 2505 } else { 2506 // We can't slide this mask vector up indexed by its i1 elements. 2507 // This poses a problem when we wish to insert a scalable vector which 2508 // can't be re-expressed as a larger type. Just choose the slow path and 2509 // extend to a larger type, then truncate back down. 2510 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 2511 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 2512 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 2513 SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); 2514 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, 2515 Op.getOperand(2)); 2516 SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); 2517 return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); 2518 } 2519 } 2520 2521 // If the subvector vector is a fixed-length type, we cannot use subregister 2522 // manipulation to simplify the codegen; we don't know which register of a 2523 // LMUL group contains the specific subvector as we only know the minimum 2524 // register size. Therefore we must slide the vector group up the full 2525 // amount. 2526 if (SubVecVT.isFixedLengthVector()) { 2527 if (OrigIdx == 0 && Vec.isUndef()) 2528 return Op; 2529 MVT ContainerVT = VecVT; 2530 if (VecVT.isFixedLengthVector()) { 2531 ContainerVT = getContainerForFixedLengthVector(VecVT); 2532 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2533 } 2534 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, 2535 DAG.getUNDEF(ContainerVT), SubVec, 2536 DAG.getConstant(0, DL, XLenVT)); 2537 SDValue Mask = 2538 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 2539 // Set the vector length to only the number of elements we care about. Note 2540 // that for slideup this includes the offset. 2541 SDValue VL = 2542 DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); 2543 SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 2544 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 2545 SubVec, SlideupAmt, Mask, VL); 2546 if (!VecVT.isFixedLengthVector()) 2547 return Slideup; 2548 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 2549 } 2550 2551 unsigned SubRegIdx, RemIdx; 2552 std::tie(SubRegIdx, RemIdx) = 2553 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 2554 VecVT, SubVecVT, OrigIdx, TRI); 2555 2556 RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); 2557 bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 || 2558 SubVecLMUL == RISCVVLMUL::LMUL_F4 || 2559 SubVecLMUL == RISCVVLMUL::LMUL_F8; 2560 2561 // 1. If the Idx has been completely eliminated and this subvector's size is 2562 // a vector register or a multiple thereof, or the surrounding elements are 2563 // undef, then this is a subvector insert which naturally aligns to a vector 2564 // register. These can easily be handled using subregister manipulation. 2565 // 2. If the subvector is smaller than a vector register, then the insertion 2566 // must preserve the undisturbed elements of the register. We do this by 2567 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type 2568 // (which resolves to a subregister copy), performing a VSLIDEUP to place the 2569 // subvector within the vector register, and an INSERT_SUBVECTOR of that 2570 // LMUL=1 type back into the larger vector (resolving to another subregister 2571 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type 2572 // to avoid allocating a large register group to hold our subvector. 2573 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) 2574 return Op; 2575 2576 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements 2577 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy 2578 // (in our case undisturbed). This means we can set up a subvector insertion 2579 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the 2580 // size of the subvector. 2581 MVT InterSubVT = VecVT; 2582 SDValue AlignedExtract = Vec; 2583 unsigned AlignedIdx = OrigIdx - RemIdx; 2584 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 2585 InterSubVT = getLMUL1VT(VecVT); 2586 // Extract a subvector equal to the nearest full vector register type. This 2587 // should resolve to a EXTRACT_SUBREG instruction. 2588 AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 2589 DAG.getConstant(AlignedIdx, DL, XLenVT)); 2590 } 2591 2592 SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); 2593 // For scalable vectors this must be further multiplied by vscale. 2594 SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); 2595 2596 SDValue Mask, VL; 2597 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 2598 2599 // Construct the vector length corresponding to RemIdx + length(SubVecVT). 2600 VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); 2601 VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); 2602 VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); 2603 2604 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, 2605 DAG.getUNDEF(InterSubVT), SubVec, 2606 DAG.getConstant(0, DL, XLenVT)); 2607 2608 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, 2609 AlignedExtract, SubVec, SlideupAmt, Mask, VL); 2610 2611 // If required, insert this subvector back into the correct vector register. 2612 // This should resolve to an INSERT_SUBREG instruction. 2613 if (VecVT.bitsGT(InterSubVT)) 2614 Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, 2615 DAG.getConstant(AlignedIdx, DL, XLenVT)); 2616 2617 // We might have bitcast from a mask type: cast back to the original type if 2618 // required. 2619 return DAG.getBitcast(Op.getSimpleValueType(), Slideup); 2620 } 2621 2622 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, 2623 SelectionDAG &DAG) const { 2624 SDValue Vec = Op.getOperand(0); 2625 MVT SubVecVT = Op.getSimpleValueType(); 2626 MVT VecVT = Vec.getSimpleValueType(); 2627 2628 SDLoc DL(Op); 2629 MVT XLenVT = Subtarget.getXLenVT(); 2630 unsigned OrigIdx = Op.getConstantOperandVal(1); 2631 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 2632 2633 // We don't have the ability to slide mask vectors down indexed by their i1 2634 // elements; the smallest we can do is i8. Often we are able to bitcast to 2635 // equivalent i8 vectors. Note that when extracting a fixed-length vector 2636 // from a scalable one, we might not necessarily have enough scalable 2637 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. 2638 if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { 2639 if (VecVT.getVectorMinNumElements() >= 8 && 2640 SubVecVT.getVectorMinNumElements() >= 8) { 2641 assert(OrigIdx % 8 == 0 && "Invalid index"); 2642 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 2643 SubVecVT.getVectorMinNumElements() % 8 == 0 && 2644 "Unexpected mask vector lowering"); 2645 OrigIdx /= 8; 2646 SubVecVT = 2647 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 2648 SubVecVT.isScalableVector()); 2649 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 2650 VecVT.isScalableVector()); 2651 Vec = DAG.getBitcast(VecVT, Vec); 2652 } else { 2653 // We can't slide this mask vector down, indexed by its i1 elements. 2654 // This poses a problem when we wish to extract a scalable vector which 2655 // can't be re-expressed as a larger type. Just choose the slow path and 2656 // extend to a larger type, then truncate back down. 2657 // TODO: We could probably improve this when extracting certain fixed 2658 // from fixed, where we can extract as i8 and shift the correct element 2659 // right to reach the desired subvector? 2660 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 2661 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 2662 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 2663 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, 2664 Op.getOperand(1)); 2665 SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); 2666 return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); 2667 } 2668 } 2669 2670 // If the subvector vector is a fixed-length type, we cannot use subregister 2671 // manipulation to simplify the codegen; we don't know which register of a 2672 // LMUL group contains the specific subvector as we only know the minimum 2673 // register size. Therefore we must slide the vector group down the full 2674 // amount. 2675 if (SubVecVT.isFixedLengthVector()) { 2676 // With an index of 0 this is a cast-like subvector, which can be performed 2677 // with subregister operations. 2678 if (OrigIdx == 0) 2679 return Op; 2680 MVT ContainerVT = VecVT; 2681 if (VecVT.isFixedLengthVector()) { 2682 ContainerVT = getContainerForFixedLengthVector(VecVT); 2683 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 2684 } 2685 SDValue Mask = 2686 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 2687 // Set the vector length to only the number of elements we care about. This 2688 // avoids sliding down elements we're going to discard straight away. 2689 SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); 2690 SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 2691 SDValue Slidedown = 2692 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 2693 DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); 2694 // Now we can use a cast-like subvector extract to get the result. 2695 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 2696 DAG.getConstant(0, DL, XLenVT)); 2697 } 2698 2699 unsigned SubRegIdx, RemIdx; 2700 std::tie(SubRegIdx, RemIdx) = 2701 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 2702 VecVT, SubVecVT, OrigIdx, TRI); 2703 2704 // If the Idx has been completely eliminated then this is a subvector extract 2705 // which naturally aligns to a vector register. These can easily be handled 2706 // using subregister manipulation. 2707 if (RemIdx == 0) 2708 return Op; 2709 2710 // Else we must shift our vector register directly to extract the subvector. 2711 // Do this using VSLIDEDOWN. 2712 2713 // If the vector type is an LMUL-group type, extract a subvector equal to the 2714 // nearest full vector register type. This should resolve to a EXTRACT_SUBREG 2715 // instruction. 2716 MVT InterSubVT = VecVT; 2717 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 2718 InterSubVT = getLMUL1VT(VecVT); 2719 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 2720 DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); 2721 } 2722 2723 // Slide this vector register down by the desired number of elements in order 2724 // to place the desired subvector starting at element 0. 2725 SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); 2726 // For scalable vectors this must be further multiplied by vscale. 2727 SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); 2728 2729 SDValue Mask, VL; 2730 std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); 2731 SDValue Slidedown = 2732 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, 2733 DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); 2734 2735 // Now the vector is in the right position, extract our final subvector. This 2736 // should resolve to a COPY. 2737 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 2738 DAG.getConstant(0, DL, XLenVT)); 2739 2740 // We might have bitcast from a mask type: cast back to the original type if 2741 // required. 2742 return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); 2743 } 2744 2745 SDValue 2746 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, 2747 SelectionDAG &DAG) const { 2748 auto *Load = cast<LoadSDNode>(Op); 2749 2750 SDLoc DL(Op); 2751 MVT VT = Op.getSimpleValueType(); 2752 MVT ContainerVT = getContainerForFixedLengthVector(VT); 2753 2754 SDValue VL = 2755 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 2756 2757 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 2758 SDValue NewLoad = DAG.getMemIntrinsicNode( 2759 RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL}, 2760 Load->getMemoryVT(), Load->getMemOperand()); 2761 2762 SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 2763 return DAG.getMergeValues({Result, Load->getChain()}, DL); 2764 } 2765 2766 SDValue 2767 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, 2768 SelectionDAG &DAG) const { 2769 auto *Store = cast<StoreSDNode>(Op); 2770 2771 SDLoc DL(Op); 2772 MVT VT = Store->getValue().getSimpleValueType(); 2773 2774 // FIXME: We probably need to zero any extra bits in a byte for mask stores. 2775 // This is tricky to do. 2776 2777 MVT ContainerVT = getContainerForFixedLengthVector(VT); 2778 2779 SDValue VL = 2780 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 2781 2782 SDValue NewValue = 2783 convertToScalableVector(ContainerVT, Store->getValue(), DAG, Subtarget); 2784 return DAG.getMemIntrinsicNode( 2785 RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other), 2786 {Store->getChain(), NewValue, Store->getBasePtr(), VL}, 2787 Store->getMemoryVT(), Store->getMemOperand()); 2788 } 2789 2790 SDValue 2791 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, 2792 SelectionDAG &DAG) const { 2793 MVT InVT = Op.getOperand(0).getSimpleValueType(); 2794 MVT ContainerVT = getContainerForFixedLengthVector(InVT); 2795 2796 MVT VT = Op.getSimpleValueType(); 2797 2798 SDValue Op1 = 2799 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 2800 SDValue Op2 = 2801 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 2802 2803 SDLoc DL(Op); 2804 SDValue VL = 2805 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 2806 2807 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2808 2809 bool Invert = false; 2810 Optional<unsigned> LogicOpc; 2811 if (ContainerVT.isFloatingPoint()) { 2812 bool Swap = false; 2813 switch (CC) { 2814 default: 2815 break; 2816 case ISD::SETULE: 2817 case ISD::SETULT: 2818 Swap = true; 2819 LLVM_FALLTHROUGH; 2820 case ISD::SETUGE: 2821 case ISD::SETUGT: 2822 CC = getSetCCInverse(CC, ContainerVT); 2823 Invert = true; 2824 break; 2825 case ISD::SETOGE: 2826 case ISD::SETOGT: 2827 case ISD::SETGE: 2828 case ISD::SETGT: 2829 Swap = true; 2830 break; 2831 case ISD::SETUEQ: 2832 // Use !((OLT Op1, Op2) || (OLT Op2, Op1)) 2833 Invert = true; 2834 LogicOpc = RISCVISD::VMOR_VL; 2835 CC = ISD::SETOLT; 2836 break; 2837 case ISD::SETONE: 2838 // Use ((OLT Op1, Op2) || (OLT Op2, Op1)) 2839 LogicOpc = RISCVISD::VMOR_VL; 2840 CC = ISD::SETOLT; 2841 break; 2842 case ISD::SETO: 2843 // Use (OEQ Op1, Op1) && (OEQ Op2, Op2) 2844 LogicOpc = RISCVISD::VMAND_VL; 2845 CC = ISD::SETOEQ; 2846 break; 2847 case ISD::SETUO: 2848 // Use (UNE Op1, Op1) || (UNE Op2, Op2) 2849 LogicOpc = RISCVISD::VMOR_VL; 2850 CC = ISD::SETUNE; 2851 break; 2852 } 2853 2854 if (Swap) { 2855 CC = getSetCCSwappedOperands(CC); 2856 std::swap(Op1, Op2); 2857 } 2858 } 2859 2860 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2861 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2862 2863 // There are 3 cases we need to emit. 2864 // 1. For (OEQ Op1, Op1) && (OEQ Op2, Op2) or (UNE Op1, Op1) || (UNE Op2, Op2) 2865 // we need to compare each operand with itself. 2866 // 2. For (OLT Op1, Op2) || (OLT Op2, Op1) we need to compare Op1 and Op2 in 2867 // both orders. 2868 // 3. For any other case we just need one compare with Op1 and Op2. 2869 SDValue Cmp; 2870 if (LogicOpc && (CC == ISD::SETOEQ || CC == ISD::SETUNE)) { 2871 Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op1, 2872 DAG.getCondCode(CC), Mask, VL); 2873 SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op2, 2874 DAG.getCondCode(CC), Mask, VL); 2875 Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL); 2876 } else { 2877 Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, 2878 DAG.getCondCode(CC), Mask, VL); 2879 if (LogicOpc) { 2880 SDValue Cmp2 = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op2, Op1, 2881 DAG.getCondCode(CC), Mask, VL); 2882 Cmp = DAG.getNode(*LogicOpc, DL, MaskVT, Cmp, Cmp2, VL); 2883 } 2884 } 2885 2886 if (Invert) { 2887 SDValue AllOnes = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 2888 Cmp = DAG.getNode(RISCVISD::VMXOR_VL, DL, MaskVT, Cmp, AllOnes, VL); 2889 } 2890 2891 return convertFromScalableVector(VT, Cmp, DAG, Subtarget); 2892 } 2893 2894 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( 2895 SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { 2896 MVT VT = Op.getSimpleValueType(); 2897 2898 if (VT.getVectorElementType() == MVT::i1) 2899 return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); 2900 2901 return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); 2902 } 2903 2904 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( 2905 SDValue Op, SelectionDAG &DAG) const { 2906 MVT VT = Op.getSimpleValueType(); 2907 MVT ContainerVT = getContainerForFixedLengthVector(VT); 2908 2909 MVT I1ContainerVT = 2910 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 2911 2912 SDValue CC = 2913 convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); 2914 SDValue Op1 = 2915 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 2916 SDValue Op2 = 2917 convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); 2918 2919 SDLoc DL(Op); 2920 SDValue Mask, VL; 2921 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2922 2923 SDValue Select = 2924 DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); 2925 2926 return convertFromScalableVector(VT, Select, DAG, Subtarget); 2927 } 2928 2929 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, 2930 unsigned NewOpc, 2931 bool HasMask) const { 2932 MVT VT = Op.getSimpleValueType(); 2933 assert(useRVVForFixedLengthVectorVT(VT) && 2934 "Only expected to lower fixed length vector operation!"); 2935 MVT ContainerVT = getContainerForFixedLengthVector(VT); 2936 2937 // Create list of operands by converting existing ones to scalable types. 2938 SmallVector<SDValue, 6> Ops; 2939 for (const SDValue &V : Op->op_values()) { 2940 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 2941 2942 // Pass through non-vector operands. 2943 if (!V.getValueType().isVector()) { 2944 Ops.push_back(V); 2945 continue; 2946 } 2947 2948 // "cast" fixed length vector to a scalable vector. 2949 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && 2950 "Only fixed length vectors are supported!"); 2951 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 2952 } 2953 2954 SDLoc DL(Op); 2955 SDValue Mask, VL; 2956 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2957 if (HasMask) 2958 Ops.push_back(Mask); 2959 Ops.push_back(VL); 2960 2961 SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); 2962 return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); 2963 } 2964 2965 // Returns the opcode of the target-specific SDNode that implements the 32-bit 2966 // form of the given Opcode. 2967 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 2968 switch (Opcode) { 2969 default: 2970 llvm_unreachable("Unexpected opcode"); 2971 case ISD::SHL: 2972 return RISCVISD::SLLW; 2973 case ISD::SRA: 2974 return RISCVISD::SRAW; 2975 case ISD::SRL: 2976 return RISCVISD::SRLW; 2977 case ISD::SDIV: 2978 return RISCVISD::DIVW; 2979 case ISD::UDIV: 2980 return RISCVISD::DIVUW; 2981 case ISD::UREM: 2982 return RISCVISD::REMUW; 2983 case ISD::ROTL: 2984 return RISCVISD::ROLW; 2985 case ISD::ROTR: 2986 return RISCVISD::RORW; 2987 case RISCVISD::GREVI: 2988 return RISCVISD::GREVIW; 2989 case RISCVISD::GORCI: 2990 return RISCVISD::GORCIW; 2991 } 2992 } 2993 2994 // Converts the given 32-bit operation to a target-specific SelectionDAG node. 2995 // Because i32 isn't a legal type for RV64, these operations would otherwise 2996 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W 2997 // later one because the fact the operation was originally of type i32 is 2998 // lost. 2999 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, 3000 unsigned ExtOpc = ISD::ANY_EXTEND) { 3001 SDLoc DL(N); 3002 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 3003 SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); 3004 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); 3005 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 3006 // ReplaceNodeResults requires we maintain the same type for the return value. 3007 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); 3008 } 3009 3010 // Converts the given 32-bit operation to a i64 operation with signed extension 3011 // semantic to reduce the signed extension instructions. 3012 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 3013 SDLoc DL(N); 3014 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3015 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 3016 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 3017 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 3018 DAG.getValueType(MVT::i32)); 3019 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 3020 } 3021 3022 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 3023 SmallVectorImpl<SDValue> &Results, 3024 SelectionDAG &DAG) const { 3025 SDLoc DL(N); 3026 switch (N->getOpcode()) { 3027 default: 3028 llvm_unreachable("Don't know how to custom type legalize this operation!"); 3029 case ISD::STRICT_FP_TO_SINT: 3030 case ISD::STRICT_FP_TO_UINT: 3031 case ISD::FP_TO_SINT: 3032 case ISD::FP_TO_UINT: { 3033 bool IsStrict = N->isStrictFPOpcode(); 3034 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3035 "Unexpected custom legalisation"); 3036 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 3037 // If the FP type needs to be softened, emit a library call using the 'si' 3038 // version. If we left it to default legalization we'd end up with 'di'. If 3039 // the FP type doesn't need to be softened just let generic type 3040 // legalization promote the result type. 3041 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 3042 TargetLowering::TypeSoftenFloat) 3043 return; 3044 RTLIB::Libcall LC; 3045 if (N->getOpcode() == ISD::FP_TO_SINT || 3046 N->getOpcode() == ISD::STRICT_FP_TO_SINT) 3047 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 3048 else 3049 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 3050 MakeLibCallOptions CallOptions; 3051 EVT OpVT = Op0.getValueType(); 3052 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 3053 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 3054 SDValue Result; 3055 std::tie(Result, Chain) = 3056 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 3057 Results.push_back(Result); 3058 if (IsStrict) 3059 Results.push_back(Chain); 3060 break; 3061 } 3062 case ISD::READCYCLECOUNTER: { 3063 assert(!Subtarget.is64Bit() && 3064 "READCYCLECOUNTER only has custom type legalization on riscv32"); 3065 3066 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 3067 SDValue RCW = 3068 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 3069 3070 Results.push_back( 3071 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 3072 Results.push_back(RCW.getValue(2)); 3073 break; 3074 } 3075 case ISD::ADD: 3076 case ISD::SUB: 3077 case ISD::MUL: 3078 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3079 "Unexpected custom legalisation"); 3080 if (N->getOperand(1).getOpcode() == ISD::Constant) 3081 return; 3082 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 3083 break; 3084 case ISD::SHL: 3085 case ISD::SRA: 3086 case ISD::SRL: 3087 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3088 "Unexpected custom legalisation"); 3089 if (N->getOperand(1).getOpcode() == ISD::Constant) 3090 return; 3091 Results.push_back(customLegalizeToWOp(N, DAG)); 3092 break; 3093 case ISD::ROTL: 3094 case ISD::ROTR: 3095 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3096 "Unexpected custom legalisation"); 3097 Results.push_back(customLegalizeToWOp(N, DAG)); 3098 break; 3099 case ISD::SDIV: 3100 case ISD::UDIV: 3101 case ISD::UREM: { 3102 MVT VT = N->getSimpleValueType(0); 3103 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 3104 Subtarget.is64Bit() && Subtarget.hasStdExtM() && 3105 "Unexpected custom legalisation"); 3106 if (N->getOperand(0).getOpcode() == ISD::Constant || 3107 N->getOperand(1).getOpcode() == ISD::Constant) 3108 return; 3109 3110 // If the input is i32, use ANY_EXTEND since the W instructions don't read 3111 // the upper 32 bits. For other types we need to sign or zero extend 3112 // based on the opcode. 3113 unsigned ExtOpc = ISD::ANY_EXTEND; 3114 if (VT != MVT::i32) 3115 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND 3116 : ISD::ZERO_EXTEND; 3117 3118 Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); 3119 break; 3120 } 3121 case ISD::BITCAST: { 3122 assert(((N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3123 Subtarget.hasStdExtF()) || 3124 (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh())) && 3125 "Unexpected custom legalisation"); 3126 SDValue Op0 = N->getOperand(0); 3127 if (N->getValueType(0) == MVT::i16 && Subtarget.hasStdExtZfh()) { 3128 if (Op0.getValueType() != MVT::f16) 3129 return; 3130 SDValue FPConv = 3131 DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, Subtarget.getXLenVT(), Op0); 3132 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 3133 } else if (N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3134 Subtarget.hasStdExtF()) { 3135 if (Op0.getValueType() != MVT::f32) 3136 return; 3137 SDValue FPConv = 3138 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 3139 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 3140 } 3141 break; 3142 } 3143 case RISCVISD::GREVI: 3144 case RISCVISD::GORCI: { 3145 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3146 "Unexpected custom legalisation"); 3147 // This is similar to customLegalizeToWOp, except that we pass the second 3148 // operand (a TargetConstant) straight through: it is already of type 3149 // XLenVT. 3150 SDLoc DL(N); 3151 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 3152 SDValue NewOp0 = 3153 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3154 SDValue NewRes = 3155 DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, N->getOperand(1)); 3156 // ReplaceNodeResults requires we maintain the same type for the return 3157 // value. 3158 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 3159 break; 3160 } 3161 case RISCVISD::SHFLI: { 3162 // There is no SHFLIW instruction, but we can just promote the operation. 3163 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3164 "Unexpected custom legalisation"); 3165 SDLoc DL(N); 3166 SDValue NewOp0 = 3167 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3168 SDValue NewRes = 3169 DAG.getNode(RISCVISD::SHFLI, DL, MVT::i64, NewOp0, N->getOperand(1)); 3170 // ReplaceNodeResults requires we maintain the same type for the return 3171 // value. 3172 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 3173 break; 3174 } 3175 case ISD::BSWAP: 3176 case ISD::BITREVERSE: { 3177 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3178 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 3179 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 3180 N->getOperand(0)); 3181 unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24; 3182 SDValue GREVIW = DAG.getNode(RISCVISD::GREVIW, DL, MVT::i64, NewOp0, 3183 DAG.getTargetConstant(Imm, DL, 3184 Subtarget.getXLenVT())); 3185 // ReplaceNodeResults requires we maintain the same type for the return 3186 // value. 3187 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW)); 3188 break; 3189 } 3190 case ISD::FSHL: 3191 case ISD::FSHR: { 3192 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 3193 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 3194 SDValue NewOp0 = 3195 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 3196 SDValue NewOp1 = 3197 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 3198 SDValue NewOp2 = 3199 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 3200 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 3201 // Mask the shift amount to 5 bits. 3202 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 3203 DAG.getConstant(0x1f, DL, MVT::i64)); 3204 unsigned Opc = 3205 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW; 3206 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2); 3207 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 3208 break; 3209 } 3210 case ISD::EXTRACT_VECTOR_ELT: { 3211 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element 3212 // type is illegal (currently only vXi64 RV32). 3213 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are 3214 // transferred to the destination register. We issue two of these from the 3215 // upper- and lower- halves of the SEW-bit vector element, slid down to the 3216 // first element. 3217 SDLoc DL(N); 3218 SDValue Vec = N->getOperand(0); 3219 SDValue Idx = N->getOperand(1); 3220 3221 // The vector type hasn't been legalized yet so we can't issue target 3222 // specific nodes if it needs legalization. 3223 // FIXME: We would manually legalize if it's important. 3224 if (!isTypeLegal(Vec.getValueType())) 3225 return; 3226 3227 MVT VecVT = Vec.getSimpleValueType(); 3228 3229 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && 3230 VecVT.getVectorElementType() == MVT::i64 && 3231 "Unexpected EXTRACT_VECTOR_ELT legalization"); 3232 3233 // If this is a fixed vector, we need to convert it to a scalable vector. 3234 MVT ContainerVT = VecVT; 3235 if (VecVT.isFixedLengthVector()) { 3236 ContainerVT = getContainerForFixedLengthVector(VecVT); 3237 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 3238 } 3239 3240 MVT XLenVT = Subtarget.getXLenVT(); 3241 3242 // Use a VL of 1 to avoid processing more elements than we need. 3243 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount()); 3244 SDValue VL = DAG.getConstant(1, DL, XLenVT); 3245 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 3246 3247 // Unless the index is known to be 0, we must slide the vector down to get 3248 // the desired element into index 0. 3249 if (!isNullConstant(Idx)) { 3250 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 3251 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 3252 } 3253 3254 // Extract the lower XLEN bits of the correct vector element. 3255 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 3256 3257 // To extract the upper XLEN bits of the vector element, shift the first 3258 // element right by 32 bits and re-extract the lower XLEN bits. 3259 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 3260 DAG.getConstant(32, DL, XLenVT), VL); 3261 SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, 3262 ThirtyTwoV, Mask, VL); 3263 3264 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 3265 3266 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 3267 break; 3268 } 3269 case ISD::INTRINSIC_WO_CHAIN: { 3270 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3271 switch (IntNo) { 3272 default: 3273 llvm_unreachable( 3274 "Don't know how to custom type legalize this intrinsic!"); 3275 case Intrinsic::riscv_vmv_x_s: { 3276 EVT VT = N->getValueType(0); 3277 assert((VT == MVT::i8 || VT == MVT::i16 || 3278 (Subtarget.is64Bit() && VT == MVT::i32)) && 3279 "Unexpected custom legalisation!"); 3280 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, 3281 Subtarget.getXLenVT(), N->getOperand(1)); 3282 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); 3283 break; 3284 } 3285 } 3286 break; 3287 } 3288 case ISD::VECREDUCE_ADD: 3289 case ISD::VECREDUCE_AND: 3290 case ISD::VECREDUCE_OR: 3291 case ISD::VECREDUCE_XOR: 3292 case ISD::VECREDUCE_SMAX: 3293 case ISD::VECREDUCE_UMAX: 3294 case ISD::VECREDUCE_SMIN: 3295 case ISD::VECREDUCE_UMIN: 3296 // The custom-lowering for these nodes returns a vector whose first element 3297 // is the result of the reduction. Extract its first element and let the 3298 // legalization for EXTRACT_VECTOR_ELT do the rest of the job. 3299 Results.push_back(lowerVECREDUCE(SDValue(N, 0), DAG)); 3300 break; 3301 } 3302 } 3303 3304 // A structure to hold one of the bit-manipulation patterns below. Together, a 3305 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 3306 // (or (and (shl x, 1), 0xAAAAAAAA), 3307 // (and (srl x, 1), 0x55555555)) 3308 struct RISCVBitmanipPat { 3309 SDValue Op; 3310 unsigned ShAmt; 3311 bool IsSHL; 3312 3313 bool formsPairWith(const RISCVBitmanipPat &Other) const { 3314 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 3315 } 3316 }; 3317 3318 // Matches patterns of the form 3319 // (and (shl x, C2), (C1 << C2)) 3320 // (and (srl x, C2), C1) 3321 // (shl (and x, C1), C2) 3322 // (srl (and x, (C1 << C2)), C2) 3323 // Where C2 is a power of 2 and C1 has at least that many leading zeroes. 3324 // The expected masks for each shift amount are specified in BitmanipMasks where 3325 // BitmanipMasks[log2(C2)] specifies the expected C1 value. 3326 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether 3327 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible 3328 // XLen is 64. 3329 static Optional<RISCVBitmanipPat> 3330 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { 3331 assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && 3332 "Unexpected number of masks"); 3333 Optional<uint64_t> Mask; 3334 // Optionally consume a mask around the shift operation. 3335 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 3336 Mask = Op.getConstantOperandVal(1); 3337 Op = Op.getOperand(0); 3338 } 3339 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 3340 return None; 3341 bool IsSHL = Op.getOpcode() == ISD::SHL; 3342 3343 if (!isa<ConstantSDNode>(Op.getOperand(1))) 3344 return None; 3345 uint64_t ShAmt = Op.getConstantOperandVal(1); 3346 3347 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 3348 if (ShAmt >= Width && !isPowerOf2_64(ShAmt)) 3349 return None; 3350 // If we don't have enough masks for 64 bit, then we must be trying to 3351 // match SHFL so we're only allowed to shift 1/4 of the width. 3352 if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) 3353 return None; 3354 3355 SDValue Src = Op.getOperand(0); 3356 3357 // The expected mask is shifted left when the AND is found around SHL 3358 // patterns. 3359 // ((x >> 1) & 0x55555555) 3360 // ((x << 1) & 0xAAAAAAAA) 3361 bool SHLExpMask = IsSHL; 3362 3363 if (!Mask) { 3364 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 3365 // the mask is all ones: consume that now. 3366 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 3367 Mask = Src.getConstantOperandVal(1); 3368 Src = Src.getOperand(0); 3369 // The expected mask is now in fact shifted left for SRL, so reverse the 3370 // decision. 3371 // ((x & 0xAAAAAAAA) >> 1) 3372 // ((x & 0x55555555) << 1) 3373 SHLExpMask = !SHLExpMask; 3374 } else { 3375 // Use a default shifted mask of all-ones if there's no AND, truncated 3376 // down to the expected width. This simplifies the logic later on. 3377 Mask = maskTrailingOnes<uint64_t>(Width); 3378 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 3379 } 3380 } 3381 3382 unsigned MaskIdx = Log2_32(ShAmt); 3383 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 3384 3385 if (SHLExpMask) 3386 ExpMask <<= ShAmt; 3387 3388 if (Mask != ExpMask) 3389 return None; 3390 3391 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 3392 } 3393 3394 // Matches any of the following bit-manipulation patterns: 3395 // (and (shl x, 1), (0x55555555 << 1)) 3396 // (and (srl x, 1), 0x55555555) 3397 // (shl (and x, 0x55555555), 1) 3398 // (srl (and x, (0x55555555 << 1)), 1) 3399 // where the shift amount and mask may vary thus: 3400 // [1] = 0x55555555 / 0xAAAAAAAA 3401 // [2] = 0x33333333 / 0xCCCCCCCC 3402 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 3403 // [8] = 0x00FF00FF / 0xFF00FF00 3404 // [16] = 0x0000FFFF / 0xFFFFFFFF 3405 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 3406 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { 3407 // These are the unshifted masks which we use to match bit-manipulation 3408 // patterns. They may be shifted left in certain circumstances. 3409 static const uint64_t BitmanipMasks[] = { 3410 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 3411 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; 3412 3413 return matchRISCVBitmanipPat(Op, BitmanipMasks); 3414 } 3415 3416 // Match the following pattern as a GREVI(W) operation 3417 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 3418 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 3419 const RISCVSubtarget &Subtarget) { 3420 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 3421 EVT VT = Op.getValueType(); 3422 3423 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 3424 auto LHS = matchGREVIPat(Op.getOperand(0)); 3425 auto RHS = matchGREVIPat(Op.getOperand(1)); 3426 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 3427 SDLoc DL(Op); 3428 return DAG.getNode( 3429 RISCVISD::GREVI, DL, VT, LHS->Op, 3430 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 3431 } 3432 } 3433 return SDValue(); 3434 } 3435 3436 // Matches any the following pattern as a GORCI(W) operation 3437 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 3438 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 3439 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 3440 // Note that with the variant of 3., 3441 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 3442 // the inner pattern will first be matched as GREVI and then the outer 3443 // pattern will be matched to GORC via the first rule above. 3444 // 4. (or (rotl/rotr x, bitwidth/2), x) 3445 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 3446 const RISCVSubtarget &Subtarget) { 3447 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 3448 EVT VT = Op.getValueType(); 3449 3450 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 3451 SDLoc DL(Op); 3452 SDValue Op0 = Op.getOperand(0); 3453 SDValue Op1 = Op.getOperand(1); 3454 3455 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 3456 if (Reverse.getOpcode() == RISCVISD::GREVI && Reverse.getOperand(0) == X && 3457 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 3458 return DAG.getNode(RISCVISD::GORCI, DL, VT, X, Reverse.getOperand(1)); 3459 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 3460 if ((Reverse.getOpcode() == ISD::ROTL || 3461 Reverse.getOpcode() == ISD::ROTR) && 3462 Reverse.getOperand(0) == X && 3463 isa<ConstantSDNode>(Reverse.getOperand(1))) { 3464 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 3465 if (RotAmt == (VT.getSizeInBits() / 2)) 3466 return DAG.getNode( 3467 RISCVISD::GORCI, DL, VT, X, 3468 DAG.getTargetConstant(RotAmt, DL, Subtarget.getXLenVT())); 3469 } 3470 return SDValue(); 3471 }; 3472 3473 // Check for either commutable permutation of (or (GREVI x, shamt), x) 3474 if (SDValue V = MatchOROfReverse(Op0, Op1)) 3475 return V; 3476 if (SDValue V = MatchOROfReverse(Op1, Op0)) 3477 return V; 3478 3479 // OR is commutable so canonicalize its OR operand to the left 3480 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 3481 std::swap(Op0, Op1); 3482 if (Op0.getOpcode() != ISD::OR) 3483 return SDValue(); 3484 SDValue OrOp0 = Op0.getOperand(0); 3485 SDValue OrOp1 = Op0.getOperand(1); 3486 auto LHS = matchGREVIPat(OrOp0); 3487 // OR is commutable so swap the operands and try again: x might have been 3488 // on the left 3489 if (!LHS) { 3490 std::swap(OrOp0, OrOp1); 3491 LHS = matchGREVIPat(OrOp0); 3492 } 3493 auto RHS = matchGREVIPat(Op1); 3494 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 3495 return DAG.getNode( 3496 RISCVISD::GORCI, DL, VT, LHS->Op, 3497 DAG.getTargetConstant(LHS->ShAmt, DL, Subtarget.getXLenVT())); 3498 } 3499 } 3500 return SDValue(); 3501 } 3502 3503 // Matches any of the following bit-manipulation patterns: 3504 // (and (shl x, 1), (0x22222222 << 1)) 3505 // (and (srl x, 1), 0x22222222) 3506 // (shl (and x, 0x22222222), 1) 3507 // (srl (and x, (0x22222222 << 1)), 1) 3508 // where the shift amount and mask may vary thus: 3509 // [1] = 0x22222222 / 0x44444444 3510 // [2] = 0x0C0C0C0C / 0x3C3C3C3C 3511 // [4] = 0x00F000F0 / 0x0F000F00 3512 // [8] = 0x0000FF00 / 0x00FF0000 3513 // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) 3514 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { 3515 // These are the unshifted masks which we use to match bit-manipulation 3516 // patterns. They may be shifted left in certain circumstances. 3517 static const uint64_t BitmanipMasks[] = { 3518 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, 3519 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; 3520 3521 return matchRISCVBitmanipPat(Op, BitmanipMasks); 3522 } 3523 3524 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) 3525 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, 3526 const RISCVSubtarget &Subtarget) { 3527 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 3528 EVT VT = Op.getValueType(); 3529 3530 if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) 3531 return SDValue(); 3532 3533 SDValue Op0 = Op.getOperand(0); 3534 SDValue Op1 = Op.getOperand(1); 3535 3536 // Or is commutable so canonicalize the second OR to the LHS. 3537 if (Op0.getOpcode() != ISD::OR) 3538 std::swap(Op0, Op1); 3539 if (Op0.getOpcode() != ISD::OR) 3540 return SDValue(); 3541 3542 // We found an inner OR, so our operands are the operands of the inner OR 3543 // and the other operand of the outer OR. 3544 SDValue A = Op0.getOperand(0); 3545 SDValue B = Op0.getOperand(1); 3546 SDValue C = Op1; 3547 3548 auto Match1 = matchSHFLPat(A); 3549 auto Match2 = matchSHFLPat(B); 3550 3551 // If neither matched, we failed. 3552 if (!Match1 && !Match2) 3553 return SDValue(); 3554 3555 // We had at least one match. if one failed, try the remaining C operand. 3556 if (!Match1) { 3557 std::swap(A, C); 3558 Match1 = matchSHFLPat(A); 3559 if (!Match1) 3560 return SDValue(); 3561 } else if (!Match2) { 3562 std::swap(B, C); 3563 Match2 = matchSHFLPat(B); 3564 if (!Match2) 3565 return SDValue(); 3566 } 3567 assert(Match1 && Match2); 3568 3569 // Make sure our matches pair up. 3570 if (!Match1->formsPairWith(*Match2)) 3571 return SDValue(); 3572 3573 // All the remains is to make sure C is an AND with the same input, that masks 3574 // out the bits that are being shuffled. 3575 if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || 3576 C.getOperand(0) != Match1->Op) 3577 return SDValue(); 3578 3579 uint64_t Mask = C.getConstantOperandVal(1); 3580 3581 static const uint64_t BitmanipMasks[] = { 3582 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, 3583 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, 3584 }; 3585 3586 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 3587 unsigned MaskIdx = Log2_32(Match1->ShAmt); 3588 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 3589 3590 if (Mask != ExpMask) 3591 return SDValue(); 3592 3593 SDLoc DL(Op); 3594 return DAG.getNode( 3595 RISCVISD::SHFLI, DL, VT, Match1->Op, 3596 DAG.getTargetConstant(Match1->ShAmt, DL, Subtarget.getXLenVT())); 3597 } 3598 3599 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 3600 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 3601 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 3602 // not undo itself, but they are redundant. 3603 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 3604 unsigned ShAmt1 = N->getConstantOperandVal(1); 3605 SDValue Src = N->getOperand(0); 3606 3607 if (Src.getOpcode() != N->getOpcode()) 3608 return SDValue(); 3609 3610 unsigned ShAmt2 = Src.getConstantOperandVal(1); 3611 Src = Src.getOperand(0); 3612 3613 unsigned CombinedShAmt; 3614 if (N->getOpcode() == RISCVISD::GORCI || N->getOpcode() == RISCVISD::GORCIW) 3615 CombinedShAmt = ShAmt1 | ShAmt2; 3616 else 3617 CombinedShAmt = ShAmt1 ^ ShAmt2; 3618 3619 if (CombinedShAmt == 0) 3620 return Src; 3621 3622 SDLoc DL(N); 3623 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), Src, 3624 DAG.getTargetConstant(CombinedShAmt, DL, 3625 N->getOperand(1).getValueType())); 3626 } 3627 3628 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 3629 DAGCombinerInfo &DCI) const { 3630 SelectionDAG &DAG = DCI.DAG; 3631 3632 switch (N->getOpcode()) { 3633 default: 3634 break; 3635 case RISCVISD::SplitF64: { 3636 SDValue Op0 = N->getOperand(0); 3637 // If the input to SplitF64 is just BuildPairF64 then the operation is 3638 // redundant. Instead, use BuildPairF64's operands directly. 3639 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 3640 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 3641 3642 SDLoc DL(N); 3643 3644 // It's cheaper to materialise two 32-bit integers than to load a double 3645 // from the constant pool and transfer it to integer registers through the 3646 // stack. 3647 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 3648 APInt V = C->getValueAPF().bitcastToAPInt(); 3649 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 3650 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 3651 return DCI.CombineTo(N, Lo, Hi); 3652 } 3653 3654 // This is a target-specific version of a DAGCombine performed in 3655 // DAGCombiner::visitBITCAST. It performs the equivalent of: 3656 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 3657 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 3658 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 3659 !Op0.getNode()->hasOneUse()) 3660 break; 3661 SDValue NewSplitF64 = 3662 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 3663 Op0.getOperand(0)); 3664 SDValue Lo = NewSplitF64.getValue(0); 3665 SDValue Hi = NewSplitF64.getValue(1); 3666 APInt SignBit = APInt::getSignMask(32); 3667 if (Op0.getOpcode() == ISD::FNEG) { 3668 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 3669 DAG.getConstant(SignBit, DL, MVT::i32)); 3670 return DCI.CombineTo(N, Lo, NewHi); 3671 } 3672 assert(Op0.getOpcode() == ISD::FABS); 3673 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 3674 DAG.getConstant(~SignBit, DL, MVT::i32)); 3675 return DCI.CombineTo(N, Lo, NewHi); 3676 } 3677 case RISCVISD::SLLW: 3678 case RISCVISD::SRAW: 3679 case RISCVISD::SRLW: 3680 case RISCVISD::ROLW: 3681 case RISCVISD::RORW: { 3682 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 3683 SDValue LHS = N->getOperand(0); 3684 SDValue RHS = N->getOperand(1); 3685 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32); 3686 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5); 3687 if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) || 3688 SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) { 3689 if (N->getOpcode() != ISD::DELETED_NODE) 3690 DCI.AddToWorklist(N); 3691 return SDValue(N, 0); 3692 } 3693 break; 3694 } 3695 case RISCVISD::FSL: 3696 case RISCVISD::FSR: { 3697 // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. 3698 SDValue ShAmt = N->getOperand(2); 3699 unsigned BitWidth = ShAmt.getValueSizeInBits(); 3700 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 3701 APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1); 3702 if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 3703 if (N->getOpcode() != ISD::DELETED_NODE) 3704 DCI.AddToWorklist(N); 3705 return SDValue(N, 0); 3706 } 3707 break; 3708 } 3709 case RISCVISD::FSLW: 3710 case RISCVISD::FSRW: { 3711 // Only the lower 32 bits of Values and lower 6 bits of shift amount are 3712 // read. 3713 SDValue Op0 = N->getOperand(0); 3714 SDValue Op1 = N->getOperand(1); 3715 SDValue ShAmt = N->getOperand(2); 3716 APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 3717 APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6); 3718 if (SimplifyDemandedBits(Op0, OpMask, DCI) || 3719 SimplifyDemandedBits(Op1, OpMask, DCI) || 3720 SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) { 3721 if (N->getOpcode() != ISD::DELETED_NODE) 3722 DCI.AddToWorklist(N); 3723 return SDValue(N, 0); 3724 } 3725 break; 3726 } 3727 case RISCVISD::GREVIW: 3728 case RISCVISD::GORCIW: { 3729 // Only the lower 32 bits of the first operand are read 3730 SDValue Op0 = N->getOperand(0); 3731 APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32); 3732 if (SimplifyDemandedBits(Op0, Mask, DCI)) { 3733 if (N->getOpcode() != ISD::DELETED_NODE) 3734 DCI.AddToWorklist(N); 3735 return SDValue(N, 0); 3736 } 3737 3738 return combineGREVI_GORCI(N, DCI.DAG); 3739 } 3740 case RISCVISD::FMV_X_ANYEXTW_RV64: { 3741 SDLoc DL(N); 3742 SDValue Op0 = N->getOperand(0); 3743 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 3744 // conversion is unnecessary and can be replaced with an ANY_EXTEND 3745 // of the FMV_W_X_RV64 operand. 3746 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) { 3747 assert(Op0.getOperand(0).getValueType() == MVT::i64 && 3748 "Unexpected value type!"); 3749 return Op0.getOperand(0); 3750 } 3751 3752 // This is a target-specific version of a DAGCombine performed in 3753 // DAGCombiner::visitBITCAST. It performs the equivalent of: 3754 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 3755 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 3756 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 3757 !Op0.getNode()->hasOneUse()) 3758 break; 3759 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, 3760 Op0.getOperand(0)); 3761 APInt SignBit = APInt::getSignMask(32).sext(64); 3762 if (Op0.getOpcode() == ISD::FNEG) 3763 return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV, 3764 DAG.getConstant(SignBit, DL, MVT::i64)); 3765 3766 assert(Op0.getOpcode() == ISD::FABS); 3767 return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV, 3768 DAG.getConstant(~SignBit, DL, MVT::i64)); 3769 } 3770 case RISCVISD::GREVI: 3771 case RISCVISD::GORCI: 3772 return combineGREVI_GORCI(N, DCI.DAG); 3773 case ISD::OR: 3774 if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget)) 3775 return GREV; 3776 if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget)) 3777 return GORC; 3778 if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget)) 3779 return SHFL; 3780 break; 3781 case RISCVISD::SELECT_CC: { 3782 // Transform 3783 SDValue LHS = N->getOperand(0); 3784 SDValue RHS = N->getOperand(1); 3785 auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2)); 3786 if (!ISD::isIntEqualitySetCC(CCVal)) 3787 break; 3788 3789 // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> 3790 // (select_cc X, Y, lt, trueV, falseV) 3791 // Sometimes the setcc is introduced after select_cc has been formed. 3792 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 3793 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 3794 // If we're looking for eq 0 instead of ne 0, we need to invert the 3795 // condition. 3796 bool Invert = CCVal == ISD::SETEQ; 3797 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 3798 if (Invert) 3799 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 3800 3801 RHS = LHS.getOperand(1); 3802 LHS = LHS.getOperand(0); 3803 normaliseSetCC(LHS, RHS, CCVal); 3804 3805 SDLoc DL(N); 3806 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 3807 return DAG.getNode( 3808 RISCVISD::SELECT_CC, DL, N->getValueType(0), 3809 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 3810 } 3811 3812 // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> 3813 // (select_cc X, Y, eq/ne, trueV, falseV) 3814 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 3815 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), 3816 {LHS.getOperand(0), LHS.getOperand(1), 3817 N->getOperand(2), N->getOperand(3), 3818 N->getOperand(4)}); 3819 // (select_cc X, 1, setne, trueV, falseV) -> 3820 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. 3821 // This can occur when legalizing some floating point comparisons. 3822 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 3823 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 3824 SDLoc DL(N); 3825 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 3826 SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); 3827 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 3828 return DAG.getNode( 3829 RISCVISD::SELECT_CC, DL, N->getValueType(0), 3830 {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); 3831 } 3832 3833 break; 3834 } 3835 case ISD::SETCC: { 3836 // (setcc X, 1, setne) -> (setcc X, 0, seteq) if we can prove X is 0/1. 3837 // Comparing with 0 may allow us to fold into bnez/beqz. 3838 SDValue LHS = N->getOperand(0); 3839 SDValue RHS = N->getOperand(1); 3840 if (LHS.getValueType().isScalableVector()) 3841 break; 3842 auto CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 3843 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 3844 if (isOneConstant(RHS) && ISD::isIntEqualitySetCC(CC) && 3845 DAG.MaskedValueIsZero(LHS, Mask)) { 3846 SDLoc DL(N); 3847 SDValue Zero = DAG.getConstant(0, DL, LHS.getValueType()); 3848 CC = ISD::getSetCCInverse(CC, LHS.getValueType()); 3849 return DAG.getSetCC(DL, N->getValueType(0), LHS, Zero, CC); 3850 } 3851 break; 3852 } 3853 case ISD::FCOPYSIGN: { 3854 EVT VT = N->getValueType(0); 3855 if (!VT.isVector()) 3856 break; 3857 // There is a form of VFSGNJ which injects the negated sign of its second 3858 // operand. Try and bubble any FNEG up after the extend/round to produce 3859 // this optimized pattern. Avoid modifying cases where FP_ROUND and 3860 // TRUNC=1. 3861 SDValue In2 = N->getOperand(1); 3862 // Avoid cases where the extend/round has multiple uses, as duplicating 3863 // those is typically more expensive than removing a fneg. 3864 if (!In2.hasOneUse()) 3865 break; 3866 if (In2.getOpcode() != ISD::FP_EXTEND && 3867 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) 3868 break; 3869 In2 = In2.getOperand(0); 3870 if (In2.getOpcode() != ISD::FNEG) 3871 break; 3872 SDLoc DL(N); 3873 SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); 3874 return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), 3875 DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); 3876 } 3877 } 3878 3879 return SDValue(); 3880 } 3881 3882 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 3883 const SDNode *N, CombineLevel Level) const { 3884 // The following folds are only desirable if `(OP _, c1 << c2)` can be 3885 // materialised in fewer instructions than `(OP _, c1)`: 3886 // 3887 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 3888 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 3889 SDValue N0 = N->getOperand(0); 3890 EVT Ty = N0.getValueType(); 3891 if (Ty.isScalarInteger() && 3892 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 3893 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 3894 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3895 if (C1 && C2) { 3896 const APInt &C1Int = C1->getAPIntValue(); 3897 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 3898 3899 // We can materialise `c1 << c2` into an add immediate, so it's "free", 3900 // and the combine should happen, to potentially allow further combines 3901 // later. 3902 if (ShiftedC1Int.getMinSignedBits() <= 64 && 3903 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 3904 return true; 3905 3906 // We can materialise `c1` in an add immediate, so it's "free", and the 3907 // combine should be prevented. 3908 if (C1Int.getMinSignedBits() <= 64 && 3909 isLegalAddImmediate(C1Int.getSExtValue())) 3910 return false; 3911 3912 // Neither constant will fit into an immediate, so find materialisation 3913 // costs. 3914 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 3915 Subtarget.is64Bit()); 3916 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 3917 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit()); 3918 3919 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 3920 // combine should be prevented. 3921 if (C1Cost < ShiftedC1Cost) 3922 return false; 3923 } 3924 } 3925 return true; 3926 } 3927 3928 bool RISCVTargetLowering::targetShrinkDemandedConstant( 3929 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3930 TargetLoweringOpt &TLO) const { 3931 // Delay this optimization as late as possible. 3932 if (!TLO.LegalOps) 3933 return false; 3934 3935 EVT VT = Op.getValueType(); 3936 if (VT.isVector()) 3937 return false; 3938 3939 // Only handle AND for now. 3940 if (Op.getOpcode() != ISD::AND) 3941 return false; 3942 3943 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 3944 if (!C) 3945 return false; 3946 3947 const APInt &Mask = C->getAPIntValue(); 3948 3949 // Clear all non-demanded bits initially. 3950 APInt ShrunkMask = Mask & DemandedBits; 3951 3952 // If the shrunk mask fits in sign extended 12 bits, let the target 3953 // independent code apply it. 3954 if (ShrunkMask.isSignedIntN(12)) 3955 return false; 3956 3957 // Try to make a smaller immediate by setting undemanded bits. 3958 3959 // We need to be able to make a negative number through a combination of mask 3960 // and undemanded bits. 3961 APInt ExpandedMask = Mask | ~DemandedBits; 3962 if (!ExpandedMask.isNegative()) 3963 return false; 3964 3965 // What is the fewest number of bits we need to represent the negative number. 3966 unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); 3967 3968 // Try to make a 12 bit negative immediate. If that fails try to make a 32 3969 // bit negative immediate unless the shrunk immediate already fits in 32 bits. 3970 APInt NewMask = ShrunkMask; 3971 if (MinSignedBits <= 12) 3972 NewMask.setBitsFrom(11); 3973 else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) 3974 NewMask.setBitsFrom(31); 3975 else 3976 return false; 3977 3978 // Sanity check that our new mask is a subset of the demanded mask. 3979 assert(NewMask.isSubsetOf(ExpandedMask)); 3980 3981 // If we aren't changing the mask, just return true to keep it and prevent 3982 // the caller from optimizing. 3983 if (NewMask == Mask) 3984 return true; 3985 3986 // Replace the constant with the new mask. 3987 SDLoc DL(Op); 3988 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 3989 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 3990 return TLO.CombineTo(Op, NewOp); 3991 } 3992 3993 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 3994 KnownBits &Known, 3995 const APInt &DemandedElts, 3996 const SelectionDAG &DAG, 3997 unsigned Depth) const { 3998 unsigned BitWidth = Known.getBitWidth(); 3999 unsigned Opc = Op.getOpcode(); 4000 assert((Opc >= ISD::BUILTIN_OP_END || 4001 Opc == ISD::INTRINSIC_WO_CHAIN || 4002 Opc == ISD::INTRINSIC_W_CHAIN || 4003 Opc == ISD::INTRINSIC_VOID) && 4004 "Should use MaskedValueIsZero if you don't know whether Op" 4005 " is a target node!"); 4006 4007 Known.resetAll(); 4008 switch (Opc) { 4009 default: break; 4010 case RISCVISD::REMUW: { 4011 KnownBits Known2; 4012 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 4013 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 4014 // We only care about the lower 32 bits. 4015 Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); 4016 // Restore the original width by sign extending. 4017 Known = Known.sext(BitWidth); 4018 break; 4019 } 4020 case RISCVISD::DIVUW: { 4021 KnownBits Known2; 4022 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 4023 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 4024 // We only care about the lower 32 bits. 4025 Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); 4026 // Restore the original width by sign extending. 4027 Known = Known.sext(BitWidth); 4028 break; 4029 } 4030 case RISCVISD::READ_VLENB: 4031 // We assume VLENB is at least 8 bytes. 4032 // FIXME: The 1.0 draft spec defines minimum VLEN as 128 bits. 4033 Known.Zero.setLowBits(3); 4034 break; 4035 } 4036 } 4037 4038 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 4039 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4040 unsigned Depth) const { 4041 switch (Op.getOpcode()) { 4042 default: 4043 break; 4044 case RISCVISD::SLLW: 4045 case RISCVISD::SRAW: 4046 case RISCVISD::SRLW: 4047 case RISCVISD::DIVW: 4048 case RISCVISD::DIVUW: 4049 case RISCVISD::REMUW: 4050 case RISCVISD::ROLW: 4051 case RISCVISD::RORW: 4052 case RISCVISD::GREVIW: 4053 case RISCVISD::GORCIW: 4054 case RISCVISD::FSLW: 4055 case RISCVISD::FSRW: 4056 // TODO: As the result is sign-extended, this is conservatively correct. A 4057 // more precise answer could be calculated for SRAW depending on known 4058 // bits in the shift amount. 4059 return 33; 4060 case RISCVISD::SHFLI: { 4061 // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word 4062 // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but 4063 // will stay within the upper 32 bits. If there were more than 32 sign bits 4064 // before there will be at least 33 sign bits after. 4065 if (Op.getValueType() == MVT::i64 && 4066 (Op.getConstantOperandVal(1) & 0x10) == 0) { 4067 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4068 if (Tmp > 32) 4069 return 33; 4070 } 4071 break; 4072 } 4073 case RISCVISD::VMV_X_S: 4074 // The number of sign bits of the scalar result is computed by obtaining the 4075 // element type of the input vector operand, subtracting its width from the 4076 // XLEN, and then adding one (sign bit within the element type). If the 4077 // element type is wider than XLen, the least-significant XLEN bits are 4078 // taken. 4079 if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen()) 4080 return 1; 4081 return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1; 4082 } 4083 4084 return 1; 4085 } 4086 4087 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 4088 MachineBasicBlock *BB) { 4089 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 4090 4091 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 4092 // Should the count have wrapped while it was being read, we need to try 4093 // again. 4094 // ... 4095 // read: 4096 // rdcycleh x3 # load high word of cycle 4097 // rdcycle x2 # load low word of cycle 4098 // rdcycleh x4 # load high word of cycle 4099 // bne x3, x4, read # check if high word reads match, otherwise try again 4100 // ... 4101 4102 MachineFunction &MF = *BB->getParent(); 4103 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4104 MachineFunction::iterator It = ++BB->getIterator(); 4105 4106 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 4107 MF.insert(It, LoopMBB); 4108 4109 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 4110 MF.insert(It, DoneMBB); 4111 4112 // Transfer the remainder of BB and its successor edges to DoneMBB. 4113 DoneMBB->splice(DoneMBB->begin(), BB, 4114 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 4115 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 4116 4117 BB->addSuccessor(LoopMBB); 4118 4119 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4120 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 4121 Register LoReg = MI.getOperand(0).getReg(); 4122 Register HiReg = MI.getOperand(1).getReg(); 4123 DebugLoc DL = MI.getDebugLoc(); 4124 4125 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 4126 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 4127 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 4128 .addReg(RISCV::X0); 4129 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 4130 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 4131 .addReg(RISCV::X0); 4132 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 4133 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 4134 .addReg(RISCV::X0); 4135 4136 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 4137 .addReg(HiReg) 4138 .addReg(ReadAgainReg) 4139 .addMBB(LoopMBB); 4140 4141 LoopMBB->addSuccessor(LoopMBB); 4142 LoopMBB->addSuccessor(DoneMBB); 4143 4144 MI.eraseFromParent(); 4145 4146 return DoneMBB; 4147 } 4148 4149 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 4150 MachineBasicBlock *BB) { 4151 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 4152 4153 MachineFunction &MF = *BB->getParent(); 4154 DebugLoc DL = MI.getDebugLoc(); 4155 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4156 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 4157 Register LoReg = MI.getOperand(0).getReg(); 4158 Register HiReg = MI.getOperand(1).getReg(); 4159 Register SrcReg = MI.getOperand(2).getReg(); 4160 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 4161 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 4162 4163 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 4164 RI); 4165 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 4166 MachineMemOperand *MMOLo = 4167 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 4168 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 4169 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 4170 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 4171 .addFrameIndex(FI) 4172 .addImm(0) 4173 .addMemOperand(MMOLo); 4174 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 4175 .addFrameIndex(FI) 4176 .addImm(4) 4177 .addMemOperand(MMOHi); 4178 MI.eraseFromParent(); // The pseudo instruction is gone now. 4179 return BB; 4180 } 4181 4182 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 4183 MachineBasicBlock *BB) { 4184 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 4185 "Unexpected instruction"); 4186 4187 MachineFunction &MF = *BB->getParent(); 4188 DebugLoc DL = MI.getDebugLoc(); 4189 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4190 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 4191 Register DstReg = MI.getOperand(0).getReg(); 4192 Register LoReg = MI.getOperand(1).getReg(); 4193 Register HiReg = MI.getOperand(2).getReg(); 4194 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 4195 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 4196 4197 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 4198 MachineMemOperand *MMOLo = 4199 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 4200 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 4201 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 4202 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 4203 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 4204 .addFrameIndex(FI) 4205 .addImm(0) 4206 .addMemOperand(MMOLo); 4207 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 4208 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 4209 .addFrameIndex(FI) 4210 .addImm(4) 4211 .addMemOperand(MMOHi); 4212 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 4213 MI.eraseFromParent(); // The pseudo instruction is gone now. 4214 return BB; 4215 } 4216 4217 static bool isSelectPseudo(MachineInstr &MI) { 4218 switch (MI.getOpcode()) { 4219 default: 4220 return false; 4221 case RISCV::Select_GPR_Using_CC_GPR: 4222 case RISCV::Select_FPR16_Using_CC_GPR: 4223 case RISCV::Select_FPR32_Using_CC_GPR: 4224 case RISCV::Select_FPR64_Using_CC_GPR: 4225 return true; 4226 } 4227 } 4228 4229 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 4230 MachineBasicBlock *BB) { 4231 // To "insert" Select_* instructions, we actually have to insert the triangle 4232 // control-flow pattern. The incoming instructions know the destination vreg 4233 // to set, the condition code register to branch on, the true/false values to 4234 // select between, and the condcode to use to select the appropriate branch. 4235 // 4236 // We produce the following control flow: 4237 // HeadMBB 4238 // | \ 4239 // | IfFalseMBB 4240 // | / 4241 // TailMBB 4242 // 4243 // When we find a sequence of selects we attempt to optimize their emission 4244 // by sharing the control flow. Currently we only handle cases where we have 4245 // multiple selects with the exact same condition (same LHS, RHS and CC). 4246 // The selects may be interleaved with other instructions if the other 4247 // instructions meet some requirements we deem safe: 4248 // - They are debug instructions. Otherwise, 4249 // - They do not have side-effects, do not access memory and their inputs do 4250 // not depend on the results of the select pseudo-instructions. 4251 // The TrueV/FalseV operands of the selects cannot depend on the result of 4252 // previous selects in the sequence. 4253 // These conditions could be further relaxed. See the X86 target for a 4254 // related approach and more information. 4255 Register LHS = MI.getOperand(1).getReg(); 4256 Register RHS = MI.getOperand(2).getReg(); 4257 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm()); 4258 4259 SmallVector<MachineInstr *, 4> SelectDebugValues; 4260 SmallSet<Register, 4> SelectDests; 4261 SelectDests.insert(MI.getOperand(0).getReg()); 4262 4263 MachineInstr *LastSelectPseudo = &MI; 4264 4265 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 4266 SequenceMBBI != E; ++SequenceMBBI) { 4267 if (SequenceMBBI->isDebugInstr()) 4268 continue; 4269 else if (isSelectPseudo(*SequenceMBBI)) { 4270 if (SequenceMBBI->getOperand(1).getReg() != LHS || 4271 SequenceMBBI->getOperand(2).getReg() != RHS || 4272 SequenceMBBI->getOperand(3).getImm() != CC || 4273 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 4274 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 4275 break; 4276 LastSelectPseudo = &*SequenceMBBI; 4277 SequenceMBBI->collectDebugValues(SelectDebugValues); 4278 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 4279 } else { 4280 if (SequenceMBBI->hasUnmodeledSideEffects() || 4281 SequenceMBBI->mayLoadOrStore()) 4282 break; 4283 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 4284 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 4285 })) 4286 break; 4287 } 4288 } 4289 4290 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 4291 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 4292 DebugLoc DL = MI.getDebugLoc(); 4293 MachineFunction::iterator I = ++BB->getIterator(); 4294 4295 MachineBasicBlock *HeadMBB = BB; 4296 MachineFunction *F = BB->getParent(); 4297 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 4298 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 4299 4300 F->insert(I, IfFalseMBB); 4301 F->insert(I, TailMBB); 4302 4303 // Transfer debug instructions associated with the selects to TailMBB. 4304 for (MachineInstr *DebugInstr : SelectDebugValues) { 4305 TailMBB->push_back(DebugInstr->removeFromParent()); 4306 } 4307 4308 // Move all instructions after the sequence to TailMBB. 4309 TailMBB->splice(TailMBB->end(), HeadMBB, 4310 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 4311 // Update machine-CFG edges by transferring all successors of the current 4312 // block to the new block which will contain the Phi nodes for the selects. 4313 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 4314 // Set the successors for HeadMBB. 4315 HeadMBB->addSuccessor(IfFalseMBB); 4316 HeadMBB->addSuccessor(TailMBB); 4317 4318 // Insert appropriate branch. 4319 unsigned Opcode = getBranchOpcodeForIntCondCode(CC); 4320 4321 BuildMI(HeadMBB, DL, TII.get(Opcode)) 4322 .addReg(LHS) 4323 .addReg(RHS) 4324 .addMBB(TailMBB); 4325 4326 // IfFalseMBB just falls through to TailMBB. 4327 IfFalseMBB->addSuccessor(TailMBB); 4328 4329 // Create PHIs for all of the select pseudo-instructions. 4330 auto SelectMBBI = MI.getIterator(); 4331 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 4332 auto InsertionPoint = TailMBB->begin(); 4333 while (SelectMBBI != SelectEnd) { 4334 auto Next = std::next(SelectMBBI); 4335 if (isSelectPseudo(*SelectMBBI)) { 4336 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 4337 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 4338 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 4339 .addReg(SelectMBBI->getOperand(4).getReg()) 4340 .addMBB(HeadMBB) 4341 .addReg(SelectMBBI->getOperand(5).getReg()) 4342 .addMBB(IfFalseMBB); 4343 SelectMBBI->eraseFromParent(); 4344 } 4345 SelectMBBI = Next; 4346 } 4347 4348 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 4349 return TailMBB; 4350 } 4351 4352 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB, 4353 int VLIndex, unsigned SEWIndex, 4354 RISCVVLMUL VLMul, bool ForceTailAgnostic) { 4355 MachineFunction &MF = *BB->getParent(); 4356 DebugLoc DL = MI.getDebugLoc(); 4357 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 4358 4359 unsigned SEW = MI.getOperand(SEWIndex).getImm(); 4360 assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); 4361 RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8)); 4362 4363 MachineRegisterInfo &MRI = MF.getRegInfo(); 4364 4365 auto BuildVSETVLI = [&]() { 4366 if (VLIndex >= 0) { 4367 Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass); 4368 Register VLReg = MI.getOperand(VLIndex).getReg(); 4369 4370 // VL might be a compile time constant, but isel would have to put it 4371 // in a register. See if VL comes from an ADDI X0, imm. 4372 if (VLReg.isVirtual()) { 4373 MachineInstr *Def = MRI.getVRegDef(VLReg); 4374 if (Def && Def->getOpcode() == RISCV::ADDI && 4375 Def->getOperand(1).getReg() == RISCV::X0 && 4376 Def->getOperand(2).isImm()) { 4377 uint64_t Imm = Def->getOperand(2).getImm(); 4378 // VSETIVLI allows a 5-bit zero extended immediate. 4379 if (isUInt<5>(Imm)) 4380 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI)) 4381 .addReg(DestReg, RegState::Define | RegState::Dead) 4382 .addImm(Imm); 4383 } 4384 } 4385 4386 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) 4387 .addReg(DestReg, RegState::Define | RegState::Dead) 4388 .addReg(VLReg); 4389 } 4390 4391 // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0). 4392 return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI)) 4393 .addReg(RISCV::X0, RegState::Define | RegState::Dead) 4394 .addReg(RISCV::X0, RegState::Kill); 4395 }; 4396 4397 MachineInstrBuilder MIB = BuildVSETVLI(); 4398 4399 // Default to tail agnostic unless the destination is tied to a source. In 4400 // that case the user would have some control over the tail values. The tail 4401 // policy is also ignored on instructions that only update element 0 like 4402 // vmv.s.x or reductions so use agnostic there to match the common case. 4403 // FIXME: This is conservatively correct, but we might want to detect that 4404 // the input is undefined. 4405 bool TailAgnostic = true; 4406 unsigned UseOpIdx; 4407 if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) { 4408 TailAgnostic = false; 4409 // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic. 4410 const MachineOperand &UseMO = MI.getOperand(UseOpIdx); 4411 MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg()); 4412 if (UseMI && UseMI->isImplicitDef()) 4413 TailAgnostic = true; 4414 } 4415 4416 // For simplicity we reuse the vtype representation here. 4417 MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth, 4418 /*TailAgnostic*/ TailAgnostic, 4419 /*MaskAgnostic*/ false)); 4420 4421 // Remove (now) redundant operands from pseudo 4422 MI.getOperand(SEWIndex).setImm(-1); 4423 if (VLIndex >= 0) { 4424 MI.getOperand(VLIndex).setReg(RISCV::NoRegister); 4425 MI.getOperand(VLIndex).setIsKill(false); 4426 } 4427 4428 return BB; 4429 } 4430 4431 MachineBasicBlock * 4432 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 4433 MachineBasicBlock *BB) const { 4434 uint64_t TSFlags = MI.getDesc().TSFlags; 4435 4436 if (TSFlags & RISCVII::HasSEWOpMask) { 4437 unsigned NumOperands = MI.getNumExplicitOperands(); 4438 int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1; 4439 unsigned SEWIndex = NumOperands - 1; 4440 bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask; 4441 4442 RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >> 4443 RISCVII::VLMulShift); 4444 return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic); 4445 } 4446 4447 switch (MI.getOpcode()) { 4448 default: 4449 llvm_unreachable("Unexpected instr type to insert"); 4450 case RISCV::ReadCycleWide: 4451 assert(!Subtarget.is64Bit() && 4452 "ReadCycleWrite is only to be used on riscv32"); 4453 return emitReadCycleWidePseudo(MI, BB); 4454 case RISCV::Select_GPR_Using_CC_GPR: 4455 case RISCV::Select_FPR16_Using_CC_GPR: 4456 case RISCV::Select_FPR32_Using_CC_GPR: 4457 case RISCV::Select_FPR64_Using_CC_GPR: 4458 return emitSelectPseudo(MI, BB); 4459 case RISCV::BuildPairF64Pseudo: 4460 return emitBuildPairF64Pseudo(MI, BB); 4461 case RISCV::SplitF64Pseudo: 4462 return emitSplitF64Pseudo(MI, BB); 4463 } 4464 } 4465 4466 // Calling Convention Implementation. 4467 // The expectations for frontend ABI lowering vary from target to target. 4468 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 4469 // details, but this is a longer term goal. For now, we simply try to keep the 4470 // role of the frontend as simple and well-defined as possible. The rules can 4471 // be summarised as: 4472 // * Never split up large scalar arguments. We handle them here. 4473 // * If a hardfloat calling convention is being used, and the struct may be 4474 // passed in a pair of registers (fp+fp, int+fp), and both registers are 4475 // available, then pass as two separate arguments. If either the GPRs or FPRs 4476 // are exhausted, then pass according to the rule below. 4477 // * If a struct could never be passed in registers or directly in a stack 4478 // slot (as it is larger than 2*XLEN and the floating point rules don't 4479 // apply), then pass it using a pointer with the byval attribute. 4480 // * If a struct is less than 2*XLEN, then coerce to either a two-element 4481 // word-sized array or a 2*XLEN scalar (depending on alignment). 4482 // * The frontend can determine whether a struct is returned by reference or 4483 // not based on its size and fields. If it will be returned by reference, the 4484 // frontend must modify the prototype so a pointer with the sret annotation is 4485 // passed as the first argument. This is not necessary for large scalar 4486 // returns. 4487 // * Struct return values and varargs should be coerced to structs containing 4488 // register-size fields in the same situations they would be for fixed 4489 // arguments. 4490 4491 static const MCPhysReg ArgGPRs[] = { 4492 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 4493 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 4494 }; 4495 static const MCPhysReg ArgFPR16s[] = { 4496 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 4497 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 4498 }; 4499 static const MCPhysReg ArgFPR32s[] = { 4500 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 4501 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 4502 }; 4503 static const MCPhysReg ArgFPR64s[] = { 4504 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 4505 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 4506 }; 4507 // This is an interim calling convention and it may be changed in the future. 4508 static const MCPhysReg ArgVRs[] = { 4509 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 4510 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 4511 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 4512 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 4513 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 4514 RISCV::V20M2, RISCV::V22M2}; 4515 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 4516 RISCV::V20M4}; 4517 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 4518 4519 // Pass a 2*XLEN argument that has been split into two XLEN values through 4520 // registers or the stack as necessary. 4521 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 4522 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 4523 MVT ValVT2, MVT LocVT2, 4524 ISD::ArgFlagsTy ArgFlags2) { 4525 unsigned XLenInBytes = XLen / 8; 4526 if (Register Reg = State.AllocateReg(ArgGPRs)) { 4527 // At least one half can be passed via register. 4528 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 4529 VA1.getLocVT(), CCValAssign::Full)); 4530 } else { 4531 // Both halves must be passed on the stack, with proper alignment. 4532 Align StackAlign = 4533 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 4534 State.addLoc( 4535 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 4536 State.AllocateStack(XLenInBytes, StackAlign), 4537 VA1.getLocVT(), CCValAssign::Full)); 4538 State.addLoc(CCValAssign::getMem( 4539 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 4540 LocVT2, CCValAssign::Full)); 4541 return false; 4542 } 4543 4544 if (Register Reg = State.AllocateReg(ArgGPRs)) { 4545 // The second half can also be passed via register. 4546 State.addLoc( 4547 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 4548 } else { 4549 // The second half is passed via the stack, without additional alignment. 4550 State.addLoc(CCValAssign::getMem( 4551 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 4552 LocVT2, CCValAssign::Full)); 4553 } 4554 4555 return false; 4556 } 4557 4558 // Implements the RISC-V calling convention. Returns true upon failure. 4559 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 4560 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 4561 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 4562 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, 4563 Optional<unsigned> FirstMaskArgument) { 4564 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 4565 assert(XLen == 32 || XLen == 64); 4566 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 4567 4568 // Any return value split in to more than two values can't be returned 4569 // directly. 4570 if (IsRet && ValNo > 1) 4571 return true; 4572 4573 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 4574 // variadic argument, or if no F16/F32 argument registers are available. 4575 bool UseGPRForF16_F32 = true; 4576 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 4577 // variadic argument, or if no F64 argument registers are available. 4578 bool UseGPRForF64 = true; 4579 4580 switch (ABI) { 4581 default: 4582 llvm_unreachable("Unexpected ABI"); 4583 case RISCVABI::ABI_ILP32: 4584 case RISCVABI::ABI_LP64: 4585 break; 4586 case RISCVABI::ABI_ILP32F: 4587 case RISCVABI::ABI_LP64F: 4588 UseGPRForF16_F32 = !IsFixed; 4589 break; 4590 case RISCVABI::ABI_ILP32D: 4591 case RISCVABI::ABI_LP64D: 4592 UseGPRForF16_F32 = !IsFixed; 4593 UseGPRForF64 = !IsFixed; 4594 break; 4595 } 4596 4597 // FPR16, FPR32, and FPR64 alias each other. 4598 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 4599 UseGPRForF16_F32 = true; 4600 UseGPRForF64 = true; 4601 } 4602 4603 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 4604 // similar local variables rather than directly checking against the target 4605 // ABI. 4606 4607 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 4608 LocVT = XLenVT; 4609 LocInfo = CCValAssign::BCvt; 4610 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 4611 LocVT = MVT::i64; 4612 LocInfo = CCValAssign::BCvt; 4613 } 4614 4615 // If this is a variadic argument, the RISC-V calling convention requires 4616 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 4617 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 4618 // be used regardless of whether the original argument was split during 4619 // legalisation or not. The argument will not be passed by registers if the 4620 // original type is larger than 2*XLEN, so the register alignment rule does 4621 // not apply. 4622 unsigned TwoXLenInBytes = (2 * XLen) / 8; 4623 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 4624 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 4625 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 4626 // Skip 'odd' register if necessary. 4627 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 4628 State.AllocateReg(ArgGPRs); 4629 } 4630 4631 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 4632 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 4633 State.getPendingArgFlags(); 4634 4635 assert(PendingLocs.size() == PendingArgFlags.size() && 4636 "PendingLocs and PendingArgFlags out of sync"); 4637 4638 // Handle passing f64 on RV32D with a soft float ABI or when floating point 4639 // registers are exhausted. 4640 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 4641 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 4642 "Can't lower f64 if it is split"); 4643 // Depending on available argument GPRS, f64 may be passed in a pair of 4644 // GPRs, split between a GPR and the stack, or passed completely on the 4645 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 4646 // cases. 4647 Register Reg = State.AllocateReg(ArgGPRs); 4648 LocVT = MVT::i32; 4649 if (!Reg) { 4650 unsigned StackOffset = State.AllocateStack(8, Align(8)); 4651 State.addLoc( 4652 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 4653 return false; 4654 } 4655 if (!State.AllocateReg(ArgGPRs)) 4656 State.AllocateStack(4, Align(4)); 4657 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 4658 return false; 4659 } 4660 4661 // Split arguments might be passed indirectly, so keep track of the pending 4662 // values. 4663 if (ArgFlags.isSplit() || !PendingLocs.empty()) { 4664 LocVT = XLenVT; 4665 LocInfo = CCValAssign::Indirect; 4666 PendingLocs.push_back( 4667 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 4668 PendingArgFlags.push_back(ArgFlags); 4669 if (!ArgFlags.isSplitEnd()) { 4670 return false; 4671 } 4672 } 4673 4674 // If the split argument only had two elements, it should be passed directly 4675 // in registers or on the stack. 4676 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) { 4677 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 4678 // Apply the normal calling convention rules to the first half of the 4679 // split argument. 4680 CCValAssign VA = PendingLocs[0]; 4681 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 4682 PendingLocs.clear(); 4683 PendingArgFlags.clear(); 4684 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 4685 ArgFlags); 4686 } 4687 4688 // Allocate to a register if possible, or else a stack slot. 4689 Register Reg; 4690 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 4691 Reg = State.AllocateReg(ArgFPR16s); 4692 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 4693 Reg = State.AllocateReg(ArgFPR32s); 4694 else if (ValVT == MVT::f64 && !UseGPRForF64) 4695 Reg = State.AllocateReg(ArgFPR64s); 4696 else if (ValVT.isScalableVector()) { 4697 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 4698 if (RC == &RISCV::VRRegClass) { 4699 // Assign the first mask argument to V0. 4700 // This is an interim calling convention and it may be changed in the 4701 // future. 4702 if (FirstMaskArgument.hasValue() && 4703 ValNo == FirstMaskArgument.getValue()) { 4704 Reg = State.AllocateReg(RISCV::V0); 4705 } else { 4706 Reg = State.AllocateReg(ArgVRs); 4707 } 4708 } else if (RC == &RISCV::VRM2RegClass) { 4709 Reg = State.AllocateReg(ArgVRM2s); 4710 } else if (RC == &RISCV::VRM4RegClass) { 4711 Reg = State.AllocateReg(ArgVRM4s); 4712 } else if (RC == &RISCV::VRM8RegClass) { 4713 Reg = State.AllocateReg(ArgVRM8s); 4714 } else { 4715 llvm_unreachable("Unhandled class register for ValueType"); 4716 } 4717 if (!Reg) { 4718 LocInfo = CCValAssign::Indirect; 4719 // Try using a GPR to pass the address 4720 Reg = State.AllocateReg(ArgGPRs); 4721 LocVT = XLenVT; 4722 } 4723 } else 4724 Reg = State.AllocateReg(ArgGPRs); 4725 unsigned StackOffset = 4726 Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); 4727 4728 // If we reach this point and PendingLocs is non-empty, we must be at the 4729 // end of a split argument that must be passed indirectly. 4730 if (!PendingLocs.empty()) { 4731 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 4732 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 4733 4734 for (auto &It : PendingLocs) { 4735 if (Reg) 4736 It.convertToReg(Reg); 4737 else 4738 It.convertToMem(StackOffset); 4739 State.addLoc(It); 4740 } 4741 PendingLocs.clear(); 4742 PendingArgFlags.clear(); 4743 return false; 4744 } 4745 4746 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 4747 (TLI.getSubtarget().hasStdExtV() && ValVT.isScalableVector())) && 4748 "Expected an XLenVT or scalable vector types at this stage"); 4749 4750 if (Reg) { 4751 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 4752 return false; 4753 } 4754 4755 // When a floating-point value is passed on the stack, no bit-conversion is 4756 // needed. 4757 if (ValVT.isFloatingPoint()) { 4758 LocVT = ValVT; 4759 LocInfo = CCValAssign::Full; 4760 } 4761 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 4762 return false; 4763 } 4764 4765 template <typename ArgTy> 4766 static Optional<unsigned> preAssignMask(const ArgTy &Args) { 4767 for (const auto &ArgIdx : enumerate(Args)) { 4768 MVT ArgVT = ArgIdx.value().VT; 4769 if (ArgVT.isScalableVector() && 4770 ArgVT.getVectorElementType().SimpleTy == MVT::i1) 4771 return ArgIdx.index(); 4772 } 4773 return None; 4774 } 4775 4776 void RISCVTargetLowering::analyzeInputArgs( 4777 MachineFunction &MF, CCState &CCInfo, 4778 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const { 4779 unsigned NumArgs = Ins.size(); 4780 FunctionType *FType = MF.getFunction().getFunctionType(); 4781 4782 Optional<unsigned> FirstMaskArgument; 4783 if (Subtarget.hasStdExtV()) 4784 FirstMaskArgument = preAssignMask(Ins); 4785 4786 for (unsigned i = 0; i != NumArgs; ++i) { 4787 MVT ArgVT = Ins[i].VT; 4788 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 4789 4790 Type *ArgTy = nullptr; 4791 if (IsRet) 4792 ArgTy = FType->getReturnType(); 4793 else if (Ins[i].isOrigArg()) 4794 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 4795 4796 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 4797 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 4798 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, 4799 FirstMaskArgument)) { 4800 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 4801 << EVT(ArgVT).getEVTString() << '\n'); 4802 llvm_unreachable(nullptr); 4803 } 4804 } 4805 } 4806 4807 void RISCVTargetLowering::analyzeOutputArgs( 4808 MachineFunction &MF, CCState &CCInfo, 4809 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 4810 CallLoweringInfo *CLI) const { 4811 unsigned NumArgs = Outs.size(); 4812 4813 Optional<unsigned> FirstMaskArgument; 4814 if (Subtarget.hasStdExtV()) 4815 FirstMaskArgument = preAssignMask(Outs); 4816 4817 for (unsigned i = 0; i != NumArgs; i++) { 4818 MVT ArgVT = Outs[i].VT; 4819 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4820 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 4821 4822 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 4823 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 4824 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, 4825 FirstMaskArgument)) { 4826 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 4827 << EVT(ArgVT).getEVTString() << "\n"); 4828 llvm_unreachable(nullptr); 4829 } 4830 } 4831 } 4832 4833 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 4834 // values. 4835 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 4836 const CCValAssign &VA, const SDLoc &DL) { 4837 switch (VA.getLocInfo()) { 4838 default: 4839 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 4840 case CCValAssign::Full: 4841 break; 4842 case CCValAssign::BCvt: 4843 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 4844 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 4845 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 4846 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 4847 else 4848 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 4849 break; 4850 } 4851 return Val; 4852 } 4853 4854 // The caller is responsible for loading the full value if the argument is 4855 // passed with CCValAssign::Indirect. 4856 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 4857 const CCValAssign &VA, const SDLoc &DL, 4858 const RISCVTargetLowering &TLI) { 4859 MachineFunction &MF = DAG.getMachineFunction(); 4860 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4861 EVT LocVT = VA.getLocVT(); 4862 SDValue Val; 4863 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); 4864 Register VReg = RegInfo.createVirtualRegister(RC); 4865 RegInfo.addLiveIn(VA.getLocReg(), VReg); 4866 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 4867 4868 if (VA.getLocInfo() == CCValAssign::Indirect) 4869 return Val; 4870 4871 return convertLocVTToValVT(DAG, Val, VA, DL); 4872 } 4873 4874 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 4875 const CCValAssign &VA, const SDLoc &DL) { 4876 EVT LocVT = VA.getLocVT(); 4877 4878 switch (VA.getLocInfo()) { 4879 default: 4880 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 4881 case CCValAssign::Full: 4882 break; 4883 case CCValAssign::BCvt: 4884 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 4885 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 4886 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 4887 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 4888 else 4889 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 4890 break; 4891 } 4892 return Val; 4893 } 4894 4895 // The caller is responsible for loading the full value if the argument is 4896 // passed with CCValAssign::Indirect. 4897 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 4898 const CCValAssign &VA, const SDLoc &DL) { 4899 MachineFunction &MF = DAG.getMachineFunction(); 4900 MachineFrameInfo &MFI = MF.getFrameInfo(); 4901 EVT LocVT = VA.getLocVT(); 4902 EVT ValVT = VA.getValVT(); 4903 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 4904 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, 4905 VA.getLocMemOffset(), /*Immutable=*/true); 4906 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4907 SDValue Val; 4908 4909 ISD::LoadExtType ExtType; 4910 switch (VA.getLocInfo()) { 4911 default: 4912 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 4913 case CCValAssign::Full: 4914 case CCValAssign::Indirect: 4915 case CCValAssign::BCvt: 4916 ExtType = ISD::NON_EXTLOAD; 4917 break; 4918 } 4919 Val = DAG.getExtLoad( 4920 ExtType, DL, LocVT, Chain, FIN, 4921 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 4922 return Val; 4923 } 4924 4925 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 4926 const CCValAssign &VA, const SDLoc &DL) { 4927 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 4928 "Unexpected VA"); 4929 MachineFunction &MF = DAG.getMachineFunction(); 4930 MachineFrameInfo &MFI = MF.getFrameInfo(); 4931 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 4932 4933 if (VA.isMemLoc()) { 4934 // f64 is passed on the stack. 4935 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true); 4936 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 4937 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 4938 MachinePointerInfo::getFixedStack(MF, FI)); 4939 } 4940 4941 assert(VA.isRegLoc() && "Expected register VA assignment"); 4942 4943 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 4944 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 4945 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 4946 SDValue Hi; 4947 if (VA.getLocReg() == RISCV::X17) { 4948 // Second half of f64 is passed on the stack. 4949 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true); 4950 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 4951 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 4952 MachinePointerInfo::getFixedStack(MF, FI)); 4953 } else { 4954 // Second half of f64 is passed in another GPR. 4955 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 4956 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 4957 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 4958 } 4959 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 4960 } 4961 4962 // FastCC has less than 1% performance improvement for some particular 4963 // benchmark. But theoretically, it may has benenfit for some cases. 4964 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT, 4965 CCValAssign::LocInfo LocInfo, 4966 ISD::ArgFlagsTy ArgFlags, CCState &State) { 4967 4968 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 4969 // X5 and X6 might be used for save-restore libcall. 4970 static const MCPhysReg GPRList[] = { 4971 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 4972 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 4973 RISCV::X29, RISCV::X30, RISCV::X31}; 4974 if (unsigned Reg = State.AllocateReg(GPRList)) { 4975 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 4976 return false; 4977 } 4978 } 4979 4980 if (LocVT == MVT::f16) { 4981 static const MCPhysReg FPR16List[] = { 4982 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 4983 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 4984 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 4985 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 4986 if (unsigned Reg = State.AllocateReg(FPR16List)) { 4987 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 4988 return false; 4989 } 4990 } 4991 4992 if (LocVT == MVT::f32) { 4993 static const MCPhysReg FPR32List[] = { 4994 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 4995 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 4996 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 4997 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 4998 if (unsigned Reg = State.AllocateReg(FPR32List)) { 4999 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5000 return false; 5001 } 5002 } 5003 5004 if (LocVT == MVT::f64) { 5005 static const MCPhysReg FPR64List[] = { 5006 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 5007 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 5008 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 5009 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 5010 if (unsigned Reg = State.AllocateReg(FPR64List)) { 5011 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5012 return false; 5013 } 5014 } 5015 5016 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 5017 unsigned Offset4 = State.AllocateStack(4, Align(4)); 5018 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 5019 return false; 5020 } 5021 5022 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 5023 unsigned Offset5 = State.AllocateStack(8, Align(8)); 5024 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 5025 return false; 5026 } 5027 5028 return true; // CC didn't match. 5029 } 5030 5031 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 5032 CCValAssign::LocInfo LocInfo, 5033 ISD::ArgFlagsTy ArgFlags, CCState &State) { 5034 5035 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 5036 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 5037 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 5038 static const MCPhysReg GPRList[] = { 5039 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 5040 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 5041 if (unsigned Reg = State.AllocateReg(GPRList)) { 5042 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5043 return false; 5044 } 5045 } 5046 5047 if (LocVT == MVT::f32) { 5048 // Pass in STG registers: F1, ..., F6 5049 // fs0 ... fs5 5050 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 5051 RISCV::F18_F, RISCV::F19_F, 5052 RISCV::F20_F, RISCV::F21_F}; 5053 if (unsigned Reg = State.AllocateReg(FPR32List)) { 5054 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5055 return false; 5056 } 5057 } 5058 5059 if (LocVT == MVT::f64) { 5060 // Pass in STG registers: D1, ..., D6 5061 // fs6 ... fs11 5062 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 5063 RISCV::F24_D, RISCV::F25_D, 5064 RISCV::F26_D, RISCV::F27_D}; 5065 if (unsigned Reg = State.AllocateReg(FPR64List)) { 5066 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 5067 return false; 5068 } 5069 } 5070 5071 report_fatal_error("No registers left in GHC calling convention"); 5072 return true; 5073 } 5074 5075 // Transform physical registers into virtual registers. 5076 SDValue RISCVTargetLowering::LowerFormalArguments( 5077 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 5078 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 5079 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5080 5081 MachineFunction &MF = DAG.getMachineFunction(); 5082 5083 switch (CallConv) { 5084 default: 5085 report_fatal_error("Unsupported calling convention"); 5086 case CallingConv::C: 5087 case CallingConv::Fast: 5088 break; 5089 case CallingConv::GHC: 5090 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 5091 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 5092 report_fatal_error( 5093 "GHC calling convention requires the F and D instruction set extensions"); 5094 } 5095 5096 const Function &Func = MF.getFunction(); 5097 if (Func.hasFnAttribute("interrupt")) { 5098 if (!Func.arg_empty()) 5099 report_fatal_error( 5100 "Functions with the interrupt attribute cannot have arguments!"); 5101 5102 StringRef Kind = 5103 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 5104 5105 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 5106 report_fatal_error( 5107 "Function interrupt attribute argument not supported!"); 5108 } 5109 5110 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5111 MVT XLenVT = Subtarget.getXLenVT(); 5112 unsigned XLenInBytes = Subtarget.getXLen() / 8; 5113 // Used with vargs to acumulate store chains. 5114 std::vector<SDValue> OutChains; 5115 5116 // Assign locations to all of the incoming arguments. 5117 SmallVector<CCValAssign, 16> ArgLocs; 5118 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5119 5120 if (CallConv == CallingConv::Fast) 5121 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC); 5122 else if (CallConv == CallingConv::GHC) 5123 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 5124 else 5125 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false); 5126 5127 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 5128 CCValAssign &VA = ArgLocs[i]; 5129 SDValue ArgValue; 5130 // Passing f64 on RV32D with a soft float ABI must be handled as a special 5131 // case. 5132 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 5133 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 5134 else if (VA.isRegLoc()) 5135 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); 5136 else 5137 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 5138 5139 if (VA.getLocInfo() == CCValAssign::Indirect) { 5140 // If the original argument was split and passed by reference (e.g. i128 5141 // on RV32), we need to load all parts of it here (using the same 5142 // address). 5143 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 5144 MachinePointerInfo())); 5145 unsigned ArgIndex = Ins[i].OrigArgIndex; 5146 assert(Ins[i].PartOffset == 0); 5147 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 5148 CCValAssign &PartVA = ArgLocs[i + 1]; 5149 unsigned PartOffset = Ins[i + 1].PartOffset; 5150 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, 5151 DAG.getIntPtrConstant(PartOffset, DL)); 5152 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 5153 MachinePointerInfo())); 5154 ++i; 5155 } 5156 continue; 5157 } 5158 InVals.push_back(ArgValue); 5159 } 5160 5161 if (IsVarArg) { 5162 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 5163 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 5164 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 5165 MachineFrameInfo &MFI = MF.getFrameInfo(); 5166 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 5167 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 5168 5169 // Offset of the first variable argument from stack pointer, and size of 5170 // the vararg save area. For now, the varargs save area is either zero or 5171 // large enough to hold a0-a7. 5172 int VaArgOffset, VarArgsSaveSize; 5173 5174 // If all registers are allocated, then all varargs must be passed on the 5175 // stack and we don't need to save any argregs. 5176 if (ArgRegs.size() == Idx) { 5177 VaArgOffset = CCInfo.getNextStackOffset(); 5178 VarArgsSaveSize = 0; 5179 } else { 5180 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 5181 VaArgOffset = -VarArgsSaveSize; 5182 } 5183 5184 // Record the frame index of the first variable argument 5185 // which is a value necessary to VASTART. 5186 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 5187 RVFI->setVarArgsFrameIndex(FI); 5188 5189 // If saving an odd number of registers then create an extra stack slot to 5190 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 5191 // offsets to even-numbered registered remain 2*XLEN-aligned. 5192 if (Idx % 2) { 5193 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 5194 VarArgsSaveSize += XLenInBytes; 5195 } 5196 5197 // Copy the integer registers that may have been used for passing varargs 5198 // to the vararg save area. 5199 for (unsigned I = Idx; I < ArgRegs.size(); 5200 ++I, VaArgOffset += XLenInBytes) { 5201 const Register Reg = RegInfo.createVirtualRegister(RC); 5202 RegInfo.addLiveIn(ArgRegs[I], Reg); 5203 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 5204 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 5205 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 5206 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 5207 MachinePointerInfo::getFixedStack(MF, FI)); 5208 cast<StoreSDNode>(Store.getNode()) 5209 ->getMemOperand() 5210 ->setValue((Value *)nullptr); 5211 OutChains.push_back(Store); 5212 } 5213 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 5214 } 5215 5216 // All stores are grouped in one node to allow the matching between 5217 // the size of Ins and InVals. This only happens for vararg functions. 5218 if (!OutChains.empty()) { 5219 OutChains.push_back(Chain); 5220 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 5221 } 5222 5223 return Chain; 5224 } 5225 5226 /// isEligibleForTailCallOptimization - Check whether the call is eligible 5227 /// for tail call optimization. 5228 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 5229 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 5230 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 5231 const SmallVector<CCValAssign, 16> &ArgLocs) const { 5232 5233 auto &Callee = CLI.Callee; 5234 auto CalleeCC = CLI.CallConv; 5235 auto &Outs = CLI.Outs; 5236 auto &Caller = MF.getFunction(); 5237 auto CallerCC = Caller.getCallingConv(); 5238 5239 // Exception-handling functions need a special set of instructions to 5240 // indicate a return to the hardware. Tail-calling another function would 5241 // probably break this. 5242 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 5243 // should be expanded as new function attributes are introduced. 5244 if (Caller.hasFnAttribute("interrupt")) 5245 return false; 5246 5247 // Do not tail call opt if the stack is used to pass parameters. 5248 if (CCInfo.getNextStackOffset() != 0) 5249 return false; 5250 5251 // Do not tail call opt if any parameters need to be passed indirectly. 5252 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 5253 // passed indirectly. So the address of the value will be passed in a 5254 // register, or if not available, then the address is put on the stack. In 5255 // order to pass indirectly, space on the stack often needs to be allocated 5256 // in order to store the value. In this case the CCInfo.getNextStackOffset() 5257 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 5258 // are passed CCValAssign::Indirect. 5259 for (auto &VA : ArgLocs) 5260 if (VA.getLocInfo() == CCValAssign::Indirect) 5261 return false; 5262 5263 // Do not tail call opt if either caller or callee uses struct return 5264 // semantics. 5265 auto IsCallerStructRet = Caller.hasStructRetAttr(); 5266 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 5267 if (IsCallerStructRet || IsCalleeStructRet) 5268 return false; 5269 5270 // Externally-defined functions with weak linkage should not be 5271 // tail-called. The behaviour of branch instructions in this situation (as 5272 // used for tail calls) is implementation-defined, so we cannot rely on the 5273 // linker replacing the tail call with a return. 5274 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 5275 const GlobalValue *GV = G->getGlobal(); 5276 if (GV->hasExternalWeakLinkage()) 5277 return false; 5278 } 5279 5280 // The callee has to preserve all registers the caller needs to preserve. 5281 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5282 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 5283 if (CalleeCC != CallerCC) { 5284 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 5285 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 5286 return false; 5287 } 5288 5289 // Byval parameters hand the function a pointer directly into the stack area 5290 // we want to reuse during a tail call. Working around this *is* possible 5291 // but less efficient and uglier in LowerCall. 5292 for (auto &Arg : Outs) 5293 if (Arg.Flags.isByVal()) 5294 return false; 5295 5296 return true; 5297 } 5298 5299 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 5300 // and output parameter nodes. 5301 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 5302 SmallVectorImpl<SDValue> &InVals) const { 5303 SelectionDAG &DAG = CLI.DAG; 5304 SDLoc &DL = CLI.DL; 5305 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5306 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5307 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5308 SDValue Chain = CLI.Chain; 5309 SDValue Callee = CLI.Callee; 5310 bool &IsTailCall = CLI.IsTailCall; 5311 CallingConv::ID CallConv = CLI.CallConv; 5312 bool IsVarArg = CLI.IsVarArg; 5313 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5314 MVT XLenVT = Subtarget.getXLenVT(); 5315 5316 MachineFunction &MF = DAG.getMachineFunction(); 5317 5318 // Analyze the operands of the call, assigning locations to each operand. 5319 SmallVector<CCValAssign, 16> ArgLocs; 5320 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 5321 5322 if (CallConv == CallingConv::Fast) 5323 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC); 5324 else if (CallConv == CallingConv::GHC) 5325 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 5326 else 5327 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI); 5328 5329 // Check if it's really possible to do a tail call. 5330 if (IsTailCall) 5331 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 5332 5333 if (IsTailCall) 5334 ++NumTailCalls; 5335 else if (CLI.CB && CLI.CB->isMustTailCall()) 5336 report_fatal_error("failed to perform tail call elimination on a call " 5337 "site marked musttail"); 5338 5339 // Get a count of how many bytes are to be pushed on the stack. 5340 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 5341 5342 // Create local copies for byval args 5343 SmallVector<SDValue, 8> ByValArgs; 5344 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 5345 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5346 if (!Flags.isByVal()) 5347 continue; 5348 5349 SDValue Arg = OutVals[i]; 5350 unsigned Size = Flags.getByValSize(); 5351 Align Alignment = Flags.getNonZeroByValAlign(); 5352 5353 int FI = 5354 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 5355 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 5356 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 5357 5358 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 5359 /*IsVolatile=*/false, 5360 /*AlwaysInline=*/false, IsTailCall, 5361 MachinePointerInfo(), MachinePointerInfo()); 5362 ByValArgs.push_back(FIPtr); 5363 } 5364 5365 if (!IsTailCall) 5366 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 5367 5368 // Copy argument values to their designated locations. 5369 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 5370 SmallVector<SDValue, 8> MemOpChains; 5371 SDValue StackPtr; 5372 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 5373 CCValAssign &VA = ArgLocs[i]; 5374 SDValue ArgValue = OutVals[i]; 5375 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5376 5377 // Handle passing f64 on RV32D with a soft float ABI as a special case. 5378 bool IsF64OnRV32DSoftABI = 5379 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 5380 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 5381 SDValue SplitF64 = DAG.getNode( 5382 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 5383 SDValue Lo = SplitF64.getValue(0); 5384 SDValue Hi = SplitF64.getValue(1); 5385 5386 Register RegLo = VA.getLocReg(); 5387 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 5388 5389 if (RegLo == RISCV::X17) { 5390 // Second half of f64 is passed on the stack. 5391 // Work out the address of the stack slot. 5392 if (!StackPtr.getNode()) 5393 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 5394 // Emit the store. 5395 MemOpChains.push_back( 5396 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 5397 } else { 5398 // Second half of f64 is passed in another GPR. 5399 assert(RegLo < RISCV::X31 && "Invalid register pair"); 5400 Register RegHigh = RegLo + 1; 5401 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 5402 } 5403 continue; 5404 } 5405 5406 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 5407 // as any other MemLoc. 5408 5409 // Promote the value if needed. 5410 // For now, only handle fully promoted and indirect arguments. 5411 if (VA.getLocInfo() == CCValAssign::Indirect) { 5412 // Store the argument in a stack slot and pass its address. 5413 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT); 5414 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 5415 MemOpChains.push_back( 5416 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 5417 MachinePointerInfo::getFixedStack(MF, FI))); 5418 // If the original argument was split (e.g. i128), we need 5419 // to store all parts of it here (and pass just one address). 5420 unsigned ArgIndex = Outs[i].OrigArgIndex; 5421 assert(Outs[i].PartOffset == 0); 5422 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 5423 SDValue PartValue = OutVals[i + 1]; 5424 unsigned PartOffset = Outs[i + 1].PartOffset; 5425 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, 5426 DAG.getIntPtrConstant(PartOffset, DL)); 5427 MemOpChains.push_back( 5428 DAG.getStore(Chain, DL, PartValue, Address, 5429 MachinePointerInfo::getFixedStack(MF, FI))); 5430 ++i; 5431 } 5432 ArgValue = SpillSlot; 5433 } else { 5434 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL); 5435 } 5436 5437 // Use local copy if it is a byval arg. 5438 if (Flags.isByVal()) 5439 ArgValue = ByValArgs[j++]; 5440 5441 if (VA.isRegLoc()) { 5442 // Queue up the argument copies and emit them at the end. 5443 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 5444 } else { 5445 assert(VA.isMemLoc() && "Argument not register or memory"); 5446 assert(!IsTailCall && "Tail call not allowed if stack is used " 5447 "for passing parameters"); 5448 5449 // Work out the address of the stack slot. 5450 if (!StackPtr.getNode()) 5451 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 5452 SDValue Address = 5453 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 5454 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 5455 5456 // Emit the store. 5457 MemOpChains.push_back( 5458 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 5459 } 5460 } 5461 5462 // Join the stores, which are independent of one another. 5463 if (!MemOpChains.empty()) 5464 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 5465 5466 SDValue Glue; 5467 5468 // Build a sequence of copy-to-reg nodes, chained and glued together. 5469 for (auto &Reg : RegsToPass) { 5470 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 5471 Glue = Chain.getValue(1); 5472 } 5473 5474 // Validate that none of the argument registers have been marked as 5475 // reserved, if so report an error. Do the same for the return address if this 5476 // is not a tailcall. 5477 validateCCReservedRegs(RegsToPass, MF); 5478 if (!IsTailCall && 5479 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 5480 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 5481 MF.getFunction(), 5482 "Return address register required, but has been reserved."}); 5483 5484 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 5485 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 5486 // split it and then direct call can be matched by PseudoCALL. 5487 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 5488 const GlobalValue *GV = S->getGlobal(); 5489 5490 unsigned OpFlags = RISCVII::MO_CALL; 5491 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 5492 OpFlags = RISCVII::MO_PLT; 5493 5494 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 5495 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5496 unsigned OpFlags = RISCVII::MO_CALL; 5497 5498 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 5499 nullptr)) 5500 OpFlags = RISCVII::MO_PLT; 5501 5502 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 5503 } 5504 5505 // The first call operand is the chain and the second is the target address. 5506 SmallVector<SDValue, 8> Ops; 5507 Ops.push_back(Chain); 5508 Ops.push_back(Callee); 5509 5510 // Add argument registers to the end of the list so that they are 5511 // known live into the call. 5512 for (auto &Reg : RegsToPass) 5513 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 5514 5515 if (!IsTailCall) { 5516 // Add a register mask operand representing the call-preserved registers. 5517 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5518 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 5519 assert(Mask && "Missing call preserved mask for calling convention"); 5520 Ops.push_back(DAG.getRegisterMask(Mask)); 5521 } 5522 5523 // Glue the call to the argument copies, if any. 5524 if (Glue.getNode()) 5525 Ops.push_back(Glue); 5526 5527 // Emit the call. 5528 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 5529 5530 if (IsTailCall) { 5531 MF.getFrameInfo().setHasTailCall(); 5532 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 5533 } 5534 5535 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 5536 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 5537 Glue = Chain.getValue(1); 5538 5539 // Mark the end of the call, which is glued to the call itself. 5540 Chain = DAG.getCALLSEQ_END(Chain, 5541 DAG.getConstant(NumBytes, DL, PtrVT, true), 5542 DAG.getConstant(0, DL, PtrVT, true), 5543 Glue, DL); 5544 Glue = Chain.getValue(1); 5545 5546 // Assign locations to each value returned by this call. 5547 SmallVector<CCValAssign, 16> RVLocs; 5548 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 5549 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true); 5550 5551 // Copy all of the result registers out of their specified physreg. 5552 for (auto &VA : RVLocs) { 5553 // Copy the value out 5554 SDValue RetValue = 5555 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 5556 // Glue the RetValue to the end of the call sequence 5557 Chain = RetValue.getValue(1); 5558 Glue = RetValue.getValue(2); 5559 5560 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 5561 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 5562 SDValue RetValue2 = 5563 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 5564 Chain = RetValue2.getValue(1); 5565 Glue = RetValue2.getValue(2); 5566 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 5567 RetValue2); 5568 } 5569 5570 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL); 5571 5572 InVals.push_back(RetValue); 5573 } 5574 5575 return Chain; 5576 } 5577 5578 bool RISCVTargetLowering::CanLowerReturn( 5579 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 5580 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 5581 SmallVector<CCValAssign, 16> RVLocs; 5582 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 5583 5584 Optional<unsigned> FirstMaskArgument; 5585 if (Subtarget.hasStdExtV()) 5586 FirstMaskArgument = preAssignMask(Outs); 5587 5588 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 5589 MVT VT = Outs[i].VT; 5590 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5591 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 5592 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 5593 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, 5594 *this, FirstMaskArgument)) 5595 return false; 5596 } 5597 return true; 5598 } 5599 5600 SDValue 5601 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 5602 bool IsVarArg, 5603 const SmallVectorImpl<ISD::OutputArg> &Outs, 5604 const SmallVectorImpl<SDValue> &OutVals, 5605 const SDLoc &DL, SelectionDAG &DAG) const { 5606 const MachineFunction &MF = DAG.getMachineFunction(); 5607 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 5608 5609 // Stores the assignment of the return value to a location. 5610 SmallVector<CCValAssign, 16> RVLocs; 5611 5612 // Info about the registers and stack slot. 5613 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 5614 *DAG.getContext()); 5615 5616 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 5617 nullptr); 5618 5619 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 5620 report_fatal_error("GHC functions return void only"); 5621 5622 SDValue Glue; 5623 SmallVector<SDValue, 4> RetOps(1, Chain); 5624 5625 // Copy the result values into the output registers. 5626 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 5627 SDValue Val = OutVals[i]; 5628 CCValAssign &VA = RVLocs[i]; 5629 assert(VA.isRegLoc() && "Can only return in registers!"); 5630 5631 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 5632 // Handle returning f64 on RV32D with a soft float ABI. 5633 assert(VA.isRegLoc() && "Expected return via registers"); 5634 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 5635 DAG.getVTList(MVT::i32, MVT::i32), Val); 5636 SDValue Lo = SplitF64.getValue(0); 5637 SDValue Hi = SplitF64.getValue(1); 5638 Register RegLo = VA.getLocReg(); 5639 assert(RegLo < RISCV::X31 && "Invalid register pair"); 5640 Register RegHi = RegLo + 1; 5641 5642 if (STI.isRegisterReservedByUser(RegLo) || 5643 STI.isRegisterReservedByUser(RegHi)) 5644 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 5645 MF.getFunction(), 5646 "Return value register required, but has been reserved."}); 5647 5648 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 5649 Glue = Chain.getValue(1); 5650 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 5651 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 5652 Glue = Chain.getValue(1); 5653 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 5654 } else { 5655 // Handle a 'normal' return. 5656 Val = convertValVTToLocVT(DAG, Val, VA, DL); 5657 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 5658 5659 if (STI.isRegisterReservedByUser(VA.getLocReg())) 5660 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 5661 MF.getFunction(), 5662 "Return value register required, but has been reserved."}); 5663 5664 // Guarantee that all emitted copies are stuck together. 5665 Glue = Chain.getValue(1); 5666 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5667 } 5668 } 5669 5670 RetOps[0] = Chain; // Update chain. 5671 5672 // Add the glue node if we have it. 5673 if (Glue.getNode()) { 5674 RetOps.push_back(Glue); 5675 } 5676 5677 // Interrupt service routines use different return instructions. 5678 const Function &Func = DAG.getMachineFunction().getFunction(); 5679 if (Func.hasFnAttribute("interrupt")) { 5680 if (!Func.getReturnType()->isVoidTy()) 5681 report_fatal_error( 5682 "Functions with the interrupt attribute must have void return type!"); 5683 5684 MachineFunction &MF = DAG.getMachineFunction(); 5685 StringRef Kind = 5686 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 5687 5688 unsigned RetOpc; 5689 if (Kind == "user") 5690 RetOpc = RISCVISD::URET_FLAG; 5691 else if (Kind == "supervisor") 5692 RetOpc = RISCVISD::SRET_FLAG; 5693 else 5694 RetOpc = RISCVISD::MRET_FLAG; 5695 5696 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 5697 } 5698 5699 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps); 5700 } 5701 5702 void RISCVTargetLowering::validateCCReservedRegs( 5703 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 5704 MachineFunction &MF) const { 5705 const Function &F = MF.getFunction(); 5706 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 5707 5708 if (llvm::any_of(Regs, [&STI](auto Reg) { 5709 return STI.isRegisterReservedByUser(Reg.first); 5710 })) 5711 F.getContext().diagnose(DiagnosticInfoUnsupported{ 5712 F, "Argument register required, but has been reserved."}); 5713 } 5714 5715 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 5716 return CI->isTailCall(); 5717 } 5718 5719 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 5720 #define NODE_NAME_CASE(NODE) \ 5721 case RISCVISD::NODE: \ 5722 return "RISCVISD::" #NODE; 5723 // clang-format off 5724 switch ((RISCVISD::NodeType)Opcode) { 5725 case RISCVISD::FIRST_NUMBER: 5726 break; 5727 NODE_NAME_CASE(RET_FLAG) 5728 NODE_NAME_CASE(URET_FLAG) 5729 NODE_NAME_CASE(SRET_FLAG) 5730 NODE_NAME_CASE(MRET_FLAG) 5731 NODE_NAME_CASE(CALL) 5732 NODE_NAME_CASE(SELECT_CC) 5733 NODE_NAME_CASE(BuildPairF64) 5734 NODE_NAME_CASE(SplitF64) 5735 NODE_NAME_CASE(TAIL) 5736 NODE_NAME_CASE(SLLW) 5737 NODE_NAME_CASE(SRAW) 5738 NODE_NAME_CASE(SRLW) 5739 NODE_NAME_CASE(DIVW) 5740 NODE_NAME_CASE(DIVUW) 5741 NODE_NAME_CASE(REMUW) 5742 NODE_NAME_CASE(ROLW) 5743 NODE_NAME_CASE(RORW) 5744 NODE_NAME_CASE(FSLW) 5745 NODE_NAME_CASE(FSRW) 5746 NODE_NAME_CASE(FSL) 5747 NODE_NAME_CASE(FSR) 5748 NODE_NAME_CASE(FMV_H_X) 5749 NODE_NAME_CASE(FMV_X_ANYEXTH) 5750 NODE_NAME_CASE(FMV_W_X_RV64) 5751 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 5752 NODE_NAME_CASE(READ_CYCLE_WIDE) 5753 NODE_NAME_CASE(GREVI) 5754 NODE_NAME_CASE(GREVIW) 5755 NODE_NAME_CASE(GORCI) 5756 NODE_NAME_CASE(GORCIW) 5757 NODE_NAME_CASE(SHFLI) 5758 NODE_NAME_CASE(VMV_V_X_VL) 5759 NODE_NAME_CASE(VFMV_V_F_VL) 5760 NODE_NAME_CASE(VMV_X_S) 5761 NODE_NAME_CASE(VMV_S_XF_VL) 5762 NODE_NAME_CASE(SPLAT_VECTOR_I64) 5763 NODE_NAME_CASE(READ_VLENB) 5764 NODE_NAME_CASE(TRUNCATE_VECTOR_VL) 5765 NODE_NAME_CASE(VLEFF) 5766 NODE_NAME_CASE(VLEFF_MASK) 5767 NODE_NAME_CASE(VSLIDEUP_VL) 5768 NODE_NAME_CASE(VSLIDEDOWN_VL) 5769 NODE_NAME_CASE(VID_VL) 5770 NODE_NAME_CASE(VFNCVT_ROD_VL) 5771 NODE_NAME_CASE(VECREDUCE_ADD) 5772 NODE_NAME_CASE(VECREDUCE_UMAX) 5773 NODE_NAME_CASE(VECREDUCE_SMAX) 5774 NODE_NAME_CASE(VECREDUCE_UMIN) 5775 NODE_NAME_CASE(VECREDUCE_SMIN) 5776 NODE_NAME_CASE(VECREDUCE_AND) 5777 NODE_NAME_CASE(VECREDUCE_OR) 5778 NODE_NAME_CASE(VECREDUCE_XOR) 5779 NODE_NAME_CASE(VECREDUCE_FADD) 5780 NODE_NAME_CASE(VECREDUCE_SEQ_FADD) 5781 NODE_NAME_CASE(ADD_VL) 5782 NODE_NAME_CASE(AND_VL) 5783 NODE_NAME_CASE(MUL_VL) 5784 NODE_NAME_CASE(OR_VL) 5785 NODE_NAME_CASE(SDIV_VL) 5786 NODE_NAME_CASE(SHL_VL) 5787 NODE_NAME_CASE(SREM_VL) 5788 NODE_NAME_CASE(SRA_VL) 5789 NODE_NAME_CASE(SRL_VL) 5790 NODE_NAME_CASE(SUB_VL) 5791 NODE_NAME_CASE(UDIV_VL) 5792 NODE_NAME_CASE(UREM_VL) 5793 NODE_NAME_CASE(XOR_VL) 5794 NODE_NAME_CASE(FADD_VL) 5795 NODE_NAME_CASE(FSUB_VL) 5796 NODE_NAME_CASE(FMUL_VL) 5797 NODE_NAME_CASE(FDIV_VL) 5798 NODE_NAME_CASE(FNEG_VL) 5799 NODE_NAME_CASE(FABS_VL) 5800 NODE_NAME_CASE(FSQRT_VL) 5801 NODE_NAME_CASE(FMA_VL) 5802 NODE_NAME_CASE(SMIN_VL) 5803 NODE_NAME_CASE(SMAX_VL) 5804 NODE_NAME_CASE(UMIN_VL) 5805 NODE_NAME_CASE(UMAX_VL) 5806 NODE_NAME_CASE(MULHS_VL) 5807 NODE_NAME_CASE(MULHU_VL) 5808 NODE_NAME_CASE(FP_TO_SINT_VL) 5809 NODE_NAME_CASE(FP_TO_UINT_VL) 5810 NODE_NAME_CASE(SINT_TO_FP_VL) 5811 NODE_NAME_CASE(UINT_TO_FP_VL) 5812 NODE_NAME_CASE(FP_EXTEND_VL) 5813 NODE_NAME_CASE(FP_ROUND_VL) 5814 NODE_NAME_CASE(SETCC_VL) 5815 NODE_NAME_CASE(VSELECT_VL) 5816 NODE_NAME_CASE(VMAND_VL) 5817 NODE_NAME_CASE(VMOR_VL) 5818 NODE_NAME_CASE(VMXOR_VL) 5819 NODE_NAME_CASE(VMCLR_VL) 5820 NODE_NAME_CASE(VMSET_VL) 5821 NODE_NAME_CASE(VRGATHER_VX_VL) 5822 NODE_NAME_CASE(VSEXT_VL) 5823 NODE_NAME_CASE(VZEXT_VL) 5824 NODE_NAME_CASE(VLE_VL) 5825 NODE_NAME_CASE(VSE_VL) 5826 } 5827 // clang-format on 5828 return nullptr; 5829 #undef NODE_NAME_CASE 5830 } 5831 5832 /// getConstraintType - Given a constraint letter, return the type of 5833 /// constraint it is for this target. 5834 RISCVTargetLowering::ConstraintType 5835 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 5836 if (Constraint.size() == 1) { 5837 switch (Constraint[0]) { 5838 default: 5839 break; 5840 case 'f': 5841 return C_RegisterClass; 5842 case 'I': 5843 case 'J': 5844 case 'K': 5845 return C_Immediate; 5846 case 'A': 5847 return C_Memory; 5848 } 5849 } 5850 return TargetLowering::getConstraintType(Constraint); 5851 } 5852 5853 std::pair<unsigned, const TargetRegisterClass *> 5854 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 5855 StringRef Constraint, 5856 MVT VT) const { 5857 // First, see if this is a constraint that directly corresponds to a 5858 // RISCV register class. 5859 if (Constraint.size() == 1) { 5860 switch (Constraint[0]) { 5861 case 'r': 5862 return std::make_pair(0U, &RISCV::GPRRegClass); 5863 case 'f': 5864 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 5865 return std::make_pair(0U, &RISCV::FPR16RegClass); 5866 if (Subtarget.hasStdExtF() && VT == MVT::f32) 5867 return std::make_pair(0U, &RISCV::FPR32RegClass); 5868 if (Subtarget.hasStdExtD() && VT == MVT::f64) 5869 return std::make_pair(0U, &RISCV::FPR64RegClass); 5870 break; 5871 default: 5872 break; 5873 } 5874 } 5875 5876 // Clang will correctly decode the usage of register name aliases into their 5877 // official names. However, other frontends like `rustc` do not. This allows 5878 // users of these frontends to use the ABI names for registers in LLVM-style 5879 // register constraints. 5880 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 5881 .Case("{zero}", RISCV::X0) 5882 .Case("{ra}", RISCV::X1) 5883 .Case("{sp}", RISCV::X2) 5884 .Case("{gp}", RISCV::X3) 5885 .Case("{tp}", RISCV::X4) 5886 .Case("{t0}", RISCV::X5) 5887 .Case("{t1}", RISCV::X6) 5888 .Case("{t2}", RISCV::X7) 5889 .Cases("{s0}", "{fp}", RISCV::X8) 5890 .Case("{s1}", RISCV::X9) 5891 .Case("{a0}", RISCV::X10) 5892 .Case("{a1}", RISCV::X11) 5893 .Case("{a2}", RISCV::X12) 5894 .Case("{a3}", RISCV::X13) 5895 .Case("{a4}", RISCV::X14) 5896 .Case("{a5}", RISCV::X15) 5897 .Case("{a6}", RISCV::X16) 5898 .Case("{a7}", RISCV::X17) 5899 .Case("{s2}", RISCV::X18) 5900 .Case("{s3}", RISCV::X19) 5901 .Case("{s4}", RISCV::X20) 5902 .Case("{s5}", RISCV::X21) 5903 .Case("{s6}", RISCV::X22) 5904 .Case("{s7}", RISCV::X23) 5905 .Case("{s8}", RISCV::X24) 5906 .Case("{s9}", RISCV::X25) 5907 .Case("{s10}", RISCV::X26) 5908 .Case("{s11}", RISCV::X27) 5909 .Case("{t3}", RISCV::X28) 5910 .Case("{t4}", RISCV::X29) 5911 .Case("{t5}", RISCV::X30) 5912 .Case("{t6}", RISCV::X31) 5913 .Default(RISCV::NoRegister); 5914 if (XRegFromAlias != RISCV::NoRegister) 5915 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 5916 5917 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 5918 // TableGen record rather than the AsmName to choose registers for InlineAsm 5919 // constraints, plus we want to match those names to the widest floating point 5920 // register type available, manually select floating point registers here. 5921 // 5922 // The second case is the ABI name of the register, so that frontends can also 5923 // use the ABI names in register constraint lists. 5924 if (Subtarget.hasStdExtF()) { 5925 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 5926 .Cases("{f0}", "{ft0}", RISCV::F0_F) 5927 .Cases("{f1}", "{ft1}", RISCV::F1_F) 5928 .Cases("{f2}", "{ft2}", RISCV::F2_F) 5929 .Cases("{f3}", "{ft3}", RISCV::F3_F) 5930 .Cases("{f4}", "{ft4}", RISCV::F4_F) 5931 .Cases("{f5}", "{ft5}", RISCV::F5_F) 5932 .Cases("{f6}", "{ft6}", RISCV::F6_F) 5933 .Cases("{f7}", "{ft7}", RISCV::F7_F) 5934 .Cases("{f8}", "{fs0}", RISCV::F8_F) 5935 .Cases("{f9}", "{fs1}", RISCV::F9_F) 5936 .Cases("{f10}", "{fa0}", RISCV::F10_F) 5937 .Cases("{f11}", "{fa1}", RISCV::F11_F) 5938 .Cases("{f12}", "{fa2}", RISCV::F12_F) 5939 .Cases("{f13}", "{fa3}", RISCV::F13_F) 5940 .Cases("{f14}", "{fa4}", RISCV::F14_F) 5941 .Cases("{f15}", "{fa5}", RISCV::F15_F) 5942 .Cases("{f16}", "{fa6}", RISCV::F16_F) 5943 .Cases("{f17}", "{fa7}", RISCV::F17_F) 5944 .Cases("{f18}", "{fs2}", RISCV::F18_F) 5945 .Cases("{f19}", "{fs3}", RISCV::F19_F) 5946 .Cases("{f20}", "{fs4}", RISCV::F20_F) 5947 .Cases("{f21}", "{fs5}", RISCV::F21_F) 5948 .Cases("{f22}", "{fs6}", RISCV::F22_F) 5949 .Cases("{f23}", "{fs7}", RISCV::F23_F) 5950 .Cases("{f24}", "{fs8}", RISCV::F24_F) 5951 .Cases("{f25}", "{fs9}", RISCV::F25_F) 5952 .Cases("{f26}", "{fs10}", RISCV::F26_F) 5953 .Cases("{f27}", "{fs11}", RISCV::F27_F) 5954 .Cases("{f28}", "{ft8}", RISCV::F28_F) 5955 .Cases("{f29}", "{ft9}", RISCV::F29_F) 5956 .Cases("{f30}", "{ft10}", RISCV::F30_F) 5957 .Cases("{f31}", "{ft11}", RISCV::F31_F) 5958 .Default(RISCV::NoRegister); 5959 if (FReg != RISCV::NoRegister) { 5960 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 5961 if (Subtarget.hasStdExtD()) { 5962 unsigned RegNo = FReg - RISCV::F0_F; 5963 unsigned DReg = RISCV::F0_D + RegNo; 5964 return std::make_pair(DReg, &RISCV::FPR64RegClass); 5965 } 5966 return std::make_pair(FReg, &RISCV::FPR32RegClass); 5967 } 5968 } 5969 5970 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 5971 } 5972 5973 unsigned 5974 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 5975 // Currently only support length 1 constraints. 5976 if (ConstraintCode.size() == 1) { 5977 switch (ConstraintCode[0]) { 5978 case 'A': 5979 return InlineAsm::Constraint_A; 5980 default: 5981 break; 5982 } 5983 } 5984 5985 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 5986 } 5987 5988 void RISCVTargetLowering::LowerAsmOperandForConstraint( 5989 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 5990 SelectionDAG &DAG) const { 5991 // Currently only support length 1 constraints. 5992 if (Constraint.length() == 1) { 5993 switch (Constraint[0]) { 5994 case 'I': 5995 // Validate & create a 12-bit signed immediate operand. 5996 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 5997 uint64_t CVal = C->getSExtValue(); 5998 if (isInt<12>(CVal)) 5999 Ops.push_back( 6000 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 6001 } 6002 return; 6003 case 'J': 6004 // Validate & create an integer zero operand. 6005 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 6006 if (C->getZExtValue() == 0) 6007 Ops.push_back( 6008 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 6009 return; 6010 case 'K': 6011 // Validate & create a 5-bit unsigned immediate operand. 6012 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 6013 uint64_t CVal = C->getZExtValue(); 6014 if (isUInt<5>(CVal)) 6015 Ops.push_back( 6016 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 6017 } 6018 return; 6019 default: 6020 break; 6021 } 6022 } 6023 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 6024 } 6025 6026 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 6027 Instruction *Inst, 6028 AtomicOrdering Ord) const { 6029 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 6030 return Builder.CreateFence(Ord); 6031 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 6032 return Builder.CreateFence(AtomicOrdering::Release); 6033 return nullptr; 6034 } 6035 6036 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 6037 Instruction *Inst, 6038 AtomicOrdering Ord) const { 6039 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 6040 return Builder.CreateFence(AtomicOrdering::Acquire); 6041 return nullptr; 6042 } 6043 6044 TargetLowering::AtomicExpansionKind 6045 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 6046 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 6047 // point operations can't be used in an lr/sc sequence without breaking the 6048 // forward-progress guarantee. 6049 if (AI->isFloatingPointOperation()) 6050 return AtomicExpansionKind::CmpXChg; 6051 6052 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 6053 if (Size == 8 || Size == 16) 6054 return AtomicExpansionKind::MaskedIntrinsic; 6055 return AtomicExpansionKind::None; 6056 } 6057 6058 static Intrinsic::ID 6059 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 6060 if (XLen == 32) { 6061 switch (BinOp) { 6062 default: 6063 llvm_unreachable("Unexpected AtomicRMW BinOp"); 6064 case AtomicRMWInst::Xchg: 6065 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 6066 case AtomicRMWInst::Add: 6067 return Intrinsic::riscv_masked_atomicrmw_add_i32; 6068 case AtomicRMWInst::Sub: 6069 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 6070 case AtomicRMWInst::Nand: 6071 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 6072 case AtomicRMWInst::Max: 6073 return Intrinsic::riscv_masked_atomicrmw_max_i32; 6074 case AtomicRMWInst::Min: 6075 return Intrinsic::riscv_masked_atomicrmw_min_i32; 6076 case AtomicRMWInst::UMax: 6077 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 6078 case AtomicRMWInst::UMin: 6079 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 6080 } 6081 } 6082 6083 if (XLen == 64) { 6084 switch (BinOp) { 6085 default: 6086 llvm_unreachable("Unexpected AtomicRMW BinOp"); 6087 case AtomicRMWInst::Xchg: 6088 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 6089 case AtomicRMWInst::Add: 6090 return Intrinsic::riscv_masked_atomicrmw_add_i64; 6091 case AtomicRMWInst::Sub: 6092 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 6093 case AtomicRMWInst::Nand: 6094 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 6095 case AtomicRMWInst::Max: 6096 return Intrinsic::riscv_masked_atomicrmw_max_i64; 6097 case AtomicRMWInst::Min: 6098 return Intrinsic::riscv_masked_atomicrmw_min_i64; 6099 case AtomicRMWInst::UMax: 6100 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 6101 case AtomicRMWInst::UMin: 6102 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 6103 } 6104 } 6105 6106 llvm_unreachable("Unexpected XLen\n"); 6107 } 6108 6109 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 6110 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 6111 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 6112 unsigned XLen = Subtarget.getXLen(); 6113 Value *Ordering = 6114 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 6115 Type *Tys[] = {AlignedAddr->getType()}; 6116 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 6117 AI->getModule(), 6118 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 6119 6120 if (XLen == 64) { 6121 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 6122 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 6123 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 6124 } 6125 6126 Value *Result; 6127 6128 // Must pass the shift amount needed to sign extend the loaded value prior 6129 // to performing a signed comparison for min/max. ShiftAmt is the number of 6130 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 6131 // is the number of bits to left+right shift the value in order to 6132 // sign-extend. 6133 if (AI->getOperation() == AtomicRMWInst::Min || 6134 AI->getOperation() == AtomicRMWInst::Max) { 6135 const DataLayout &DL = AI->getModule()->getDataLayout(); 6136 unsigned ValWidth = 6137 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 6138 Value *SextShamt = 6139 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 6140 Result = Builder.CreateCall(LrwOpScwLoop, 6141 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 6142 } else { 6143 Result = 6144 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 6145 } 6146 6147 if (XLen == 64) 6148 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 6149 return Result; 6150 } 6151 6152 TargetLowering::AtomicExpansionKind 6153 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 6154 AtomicCmpXchgInst *CI) const { 6155 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 6156 if (Size == 8 || Size == 16) 6157 return AtomicExpansionKind::MaskedIntrinsic; 6158 return AtomicExpansionKind::None; 6159 } 6160 6161 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 6162 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 6163 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 6164 unsigned XLen = Subtarget.getXLen(); 6165 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 6166 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 6167 if (XLen == 64) { 6168 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 6169 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 6170 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 6171 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 6172 } 6173 Type *Tys[] = {AlignedAddr->getType()}; 6174 Function *MaskedCmpXchg = 6175 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 6176 Value *Result = Builder.CreateCall( 6177 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 6178 if (XLen == 64) 6179 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 6180 return Result; 6181 } 6182 6183 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 6184 EVT VT) const { 6185 VT = VT.getScalarType(); 6186 6187 if (!VT.isSimple()) 6188 return false; 6189 6190 switch (VT.getSimpleVT().SimpleTy) { 6191 case MVT::f16: 6192 return Subtarget.hasStdExtZfh(); 6193 case MVT::f32: 6194 return Subtarget.hasStdExtF(); 6195 case MVT::f64: 6196 return Subtarget.hasStdExtD(); 6197 default: 6198 break; 6199 } 6200 6201 return false; 6202 } 6203 6204 Register RISCVTargetLowering::getExceptionPointerRegister( 6205 const Constant *PersonalityFn) const { 6206 return RISCV::X10; 6207 } 6208 6209 Register RISCVTargetLowering::getExceptionSelectorRegister( 6210 const Constant *PersonalityFn) const { 6211 return RISCV::X11; 6212 } 6213 6214 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 6215 // Return false to suppress the unnecessary extensions if the LibCall 6216 // arguments or return value is f32 type for LP64 ABI. 6217 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 6218 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 6219 return false; 6220 6221 return true; 6222 } 6223 6224 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 6225 if (Subtarget.is64Bit() && Type == MVT::i32) 6226 return true; 6227 6228 return IsSigned; 6229 } 6230 6231 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 6232 SDValue C) const { 6233 // Check integral scalar types. 6234 if (VT.isScalarInteger()) { 6235 // Omit the optimization if the sub target has the M extension and the data 6236 // size exceeds XLen. 6237 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) 6238 return false; 6239 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 6240 // Break the MUL to a SLLI and an ADD/SUB. 6241 const APInt &Imm = ConstNode->getAPIntValue(); 6242 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 6243 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) 6244 return true; 6245 // Omit the following optimization if the sub target has the M extension 6246 // and the data size >= XLen. 6247 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) 6248 return false; 6249 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs 6250 // a pair of LUI/ADDI. 6251 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { 6252 APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); 6253 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || 6254 (1 - ImmS).isPowerOf2()) 6255 return true; 6256 } 6257 } 6258 } 6259 6260 return false; 6261 } 6262 6263 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const { 6264 if (!Subtarget.useRVVForFixedLengthVectors()) 6265 return false; 6266 6267 if (!VT.isFixedLengthVector()) 6268 return false; 6269 6270 // Don't use RVV for vectors we cannot scalarize if required. 6271 switch (VT.getVectorElementType().SimpleTy) { 6272 // i1 is supported but has different rules. 6273 default: 6274 return false; 6275 case MVT::i1: 6276 // Masks can only use a single register. 6277 if (VT.getVectorNumElements() > Subtarget.getMinRVVVectorSizeInBits()) 6278 return false; 6279 break; 6280 case MVT::i8: 6281 case MVT::i16: 6282 case MVT::i32: 6283 case MVT::i64: 6284 break; 6285 case MVT::f16: 6286 if (!Subtarget.hasStdExtZfh()) 6287 return false; 6288 break; 6289 case MVT::f32: 6290 if (!Subtarget.hasStdExtF()) 6291 return false; 6292 break; 6293 case MVT::f64: 6294 if (!Subtarget.hasStdExtD()) 6295 return false; 6296 break; 6297 } 6298 6299 unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT); 6300 // Don't use RVV for types that don't fit. 6301 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors()) 6302 return false; 6303 6304 // TODO: Perhaps an artificial restriction, but worth having whilst getting 6305 // the base fixed length RVV support in place. 6306 if (!VT.isPow2VectorType()) 6307 return false; 6308 6309 return true; 6310 } 6311 6312 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( 6313 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 6314 bool *Fast) const { 6315 if (!VT.isScalableVector()) 6316 return false; 6317 6318 EVT ElemVT = VT.getVectorElementType(); 6319 if (Alignment >= ElemVT.getStoreSize()) { 6320 if (Fast) 6321 *Fast = true; 6322 return true; 6323 } 6324 6325 return false; 6326 } 6327 6328 #define GET_REGISTER_MATCHER 6329 #include "RISCVGenAsmMatcher.inc" 6330 6331 Register 6332 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 6333 const MachineFunction &MF) const { 6334 Register Reg = MatchRegisterAltName(RegName); 6335 if (Reg == RISCV::NoRegister) 6336 Reg = MatchRegisterName(RegName); 6337 if (Reg == RISCV::NoRegister) 6338 report_fatal_error( 6339 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 6340 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 6341 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 6342 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 6343 StringRef(RegName) + "\".")); 6344 return Reg; 6345 } 6346 6347 namespace llvm { 6348 namespace RISCVVIntrinsicsTable { 6349 6350 #define GET_RISCVVIntrinsicsTable_IMPL 6351 #include "RISCVGenSearchableTables.inc" 6352 6353 } // namespace RISCVVIntrinsicsTable 6354 6355 } // namespace llvm 6356