1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
174                    MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
188 
189   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
190   if (!Subtarget.hasStdExtZbb())
191     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
192 
193   if (Subtarget.is64Bit()) {
194     setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
195                        MVT::i32, Custom);
196 
197     setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
198                        MVT::i32, Custom);
199   } else {
200     setLibcallName(
201         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
202         nullptr);
203     setLibcallName(RTLIB::MULO_I64, nullptr);
204   }
205 
206   if (!Subtarget.hasStdExtM()) {
207     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV,
208                         ISD::SREM, ISD::UREM},
209                        XLenVT, Expand);
210   } else {
211     if (Subtarget.is64Bit()) {
212       setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
213 
214       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
215                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
216     } else {
217       setOperationAction(ISD::MUL, MVT::i64, Custom);
218     }
219   }
220 
221   setOperationAction(
222       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
223       Expand);
224 
225   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
226                      Custom);
227 
228   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
229       Subtarget.hasStdExtZbkb()) {
230     if (Subtarget.is64Bit())
231       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
232   } else {
233     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
234   }
235 
236   if (Subtarget.hasStdExtZbp()) {
237     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
238     // more combining.
239     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
240 
241     // BSWAP i8 doesn't exist.
242     setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
243 
244     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
245 
246     if (Subtarget.is64Bit())
247       setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
248   } else {
249     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
250     // pattern match it directly in isel.
251     setOperationAction(ISD::BSWAP, XLenVT,
252                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
253                            ? Legal
254                            : Expand);
255     // Zbkb can use rev8+brev8 to implement bitreverse.
256     setOperationAction(ISD::BITREVERSE, XLenVT,
257                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
258   }
259 
260   if (Subtarget.hasStdExtZbb()) {
261     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
262                        Legal);
263 
264     if (Subtarget.is64Bit())
265       setOperationAction(
266           {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
267           MVT::i32, Custom);
268   } else {
269     setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
270 
271     if (Subtarget.is64Bit())
272       setOperationAction(ISD::ABS, MVT::i32, Custom);
273   }
274 
275   if (Subtarget.hasStdExtZbt()) {
276     setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
277     setOperationAction(ISD::SELECT, XLenVT, Legal);
278 
279     if (Subtarget.is64Bit())
280       setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
281   } else {
282     setOperationAction(ISD::SELECT, XLenVT, Custom);
283   }
284 
285   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
286       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
287       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
288       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
289       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
290       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
291       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
292 
293   static const ISD::CondCode FPCCToExpand[] = {
294       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
295       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
296       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
297 
298   static const ISD::NodeType FPOpToExpand[] = {
299       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
300       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
301 
302   if (Subtarget.hasStdExtZfh())
303     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
304 
305   if (Subtarget.hasStdExtZfh()) {
306     for (auto NT : FPLegalNodeTypes)
307       setOperationAction(NT, MVT::f16, Legal);
308     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
309     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
310     for (auto CC : FPCCToExpand)
311       setCondCodeAction(CC, MVT::f16, Expand);
312     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
313     setOperationAction(ISD::SELECT, MVT::f16, Custom);
314     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
315 
316     setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT,
317                         ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
318                         ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN,
319                         ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG,
320                         ISD::FLOG2, ISD::FLOG10},
321                        MVT::f16, Promote);
322 
323     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
324     // complete support for all operations in LegalizeDAG.
325 
326     // We need to custom promote this.
327     if (Subtarget.is64Bit())
328       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
329   }
330 
331   if (Subtarget.hasStdExtF()) {
332     for (auto NT : FPLegalNodeTypes)
333       setOperationAction(NT, MVT::f32, Legal);
334     for (auto CC : FPCCToExpand)
335       setCondCodeAction(CC, MVT::f32, Expand);
336     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
337     setOperationAction(ISD::SELECT, MVT::f32, Custom);
338     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
339     for (auto Op : FPOpToExpand)
340       setOperationAction(Op, MVT::f32, Expand);
341     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
342     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
343   }
344 
345   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
346     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
347 
348   if (Subtarget.hasStdExtD()) {
349     for (auto NT : FPLegalNodeTypes)
350       setOperationAction(NT, MVT::f64, Legal);
351     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
352     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
353     for (auto CC : FPCCToExpand)
354       setCondCodeAction(CC, MVT::f64, Expand);
355     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
356     setOperationAction(ISD::SELECT, MVT::f64, Custom);
357     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
358     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
359     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
360     for (auto Op : FPOpToExpand)
361       setOperationAction(Op, MVT::f64, Expand);
362     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
363     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
364   }
365 
366   if (Subtarget.is64Bit())
367     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
368                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
369                        MVT::i32, Custom);
370 
371   if (Subtarget.hasStdExtF()) {
372     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
373                        Custom);
374 
375     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
376                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
377                        XLenVT, Legal);
378 
379     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
380     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
381   }
382 
383   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
384                       ISD::JumpTable},
385                      XLenVT, Custom);
386 
387   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
388 
389   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
390   // Unfortunately this can't be determined just from the ISA naming string.
391   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
392                      Subtarget.is64Bit() ? Legal : Custom);
393 
394   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
395   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
396   if (Subtarget.is64Bit())
397     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
398 
399   if (Subtarget.hasStdExtA()) {
400     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
401     setMinCmpXchgSizeInBits(32);
402   } else {
403     setMaxAtomicSizeInBitsSupported(0);
404   }
405 
406   setBooleanContents(ZeroOrOneBooleanContent);
407 
408   if (Subtarget.hasVInstructions()) {
409     setBooleanVectorContents(ZeroOrOneBooleanContent);
410 
411     setOperationAction(ISD::VSCALE, XLenVT, Custom);
412 
413     // RVV intrinsics may have illegal operands.
414     // We also need to custom legalize vmv.x.s.
415     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
416                        {MVT::i8, MVT::i16}, Custom);
417     if (Subtarget.is64Bit())
418       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
419     else
420       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
421                          MVT::i64, Custom);
422 
423     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
424                        MVT::Other, Custom);
425 
426     static const unsigned IntegerVPOps[] = {
427         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
428         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
429         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
430         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
431         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
432         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
433         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
434         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
435         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SEXT,
436         ISD::VP_ZEXT,        ISD::VP_TRUNC};
437 
438     static const unsigned FloatingPointVPOps[] = {
439         ISD::VP_FADD,        ISD::VP_FSUB,
440         ISD::VP_FMUL,        ISD::VP_FDIV,
441         ISD::VP_FNEG,        ISD::VP_FMA,
442         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
443         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
444         ISD::VP_MERGE,       ISD::VP_SELECT,
445         ISD::VP_SITOFP,      ISD::VP_UITOFP,
446         ISD::VP_SETCC,       ISD::VP_FP_ROUND};
447 
448     if (!Subtarget.is64Bit()) {
449       // We must custom-lower certain vXi64 operations on RV32 due to the vector
450       // element type being illegal.
451       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
452                          MVT::i64, Custom);
453 
454       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
455                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
456                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
457                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
458                          MVT::i64, Custom);
459 
460       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
461                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
462                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
463                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
464                          MVT::i64, Custom);
465     }
466 
467     for (MVT VT : BoolVecVTs) {
468       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
469 
470       // Mask VTs are custom-expanded into a series of standard nodes
471       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
472                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
473                          VT, Custom);
474 
475       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
476                          Custom);
477 
478       setOperationAction(ISD::SELECT, VT, Custom);
479       setOperationAction(
480           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
481           Expand);
482 
483       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
484 
485       setOperationAction(
486           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
487           Custom);
488 
489       setOperationAction(
490           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
491           Custom);
492 
493       // RVV has native int->float & float->int conversions where the
494       // element type sizes are within one power-of-two of each other. Any
495       // wider distances between type sizes have to be lowered as sequences
496       // which progressively narrow the gap in stages.
497       setOperationAction(
498           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
499           VT, Custom);
500 
501       // Expand all extending loads to types larger than this, and truncating
502       // stores from types larger than this.
503       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
504         setTruncStoreAction(OtherVT, VT, Expand);
505         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
506                          VT, Expand);
507       }
508 
509       setOperationAction(
510           {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNC, ISD::VP_SETCC}, VT,
511           Custom);
512     }
513 
514     for (MVT VT : IntVecVTs) {
515       if (VT.getVectorElementType() == MVT::i64 &&
516           !Subtarget.hasVInstructionsI64())
517         continue;
518 
519       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
520       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
521 
522       // Vectors implement MULHS/MULHU.
523       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
524 
525       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
526       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
527         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
528 
529       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
530                          Legal);
531 
532       setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
533 
534       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT,
535                          Expand);
536 
537       setOperationAction(ISD::BSWAP, VT, Expand);
538 
539       // Custom-lower extensions and truncations from/to mask types.
540       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
541                          VT, Custom);
542 
543       // RVV has native int->float & float->int conversions where the
544       // element type sizes are within one power-of-two of each other. Any
545       // wider distances between type sizes have to be lowered as sequences
546       // which progressively narrow the gap in stages.
547       setOperationAction(
548           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
549           VT, Custom);
550 
551       setOperationAction(
552           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
553 
554       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
555       // nodes which truncate by one power of two at a time.
556       setOperationAction(ISD::TRUNCATE, VT, Custom);
557 
558       // Custom-lower insert/extract operations to simplify patterns.
559       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
560                          Custom);
561 
562       // Custom-lower reduction operations to set up the corresponding custom
563       // nodes' operands.
564       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
565                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
566                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
567                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
568                          VT, Custom);
569 
570       for (unsigned VPOpc : IntegerVPOps)
571         setOperationAction(VPOpc, VT, Custom);
572 
573       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
574 
575       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
576                          VT, Custom);
577 
578       setOperationAction(
579           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
580           Custom);
581 
582       setOperationAction(
583           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
584           VT, Custom);
585 
586       setOperationAction(ISD::SELECT, VT, Custom);
587       setOperationAction(ISD::SELECT_CC, VT, Expand);
588 
589       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
590 
591       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
592         setTruncStoreAction(VT, OtherVT, Expand);
593         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
594                          VT, Expand);
595       }
596 
597       // Splice
598       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
599 
600       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
601       // type that can represent the value exactly.
602       if (VT.getVectorElementType() != MVT::i64) {
603         MVT FloatEltVT =
604             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
605         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
606         if (isTypeLegal(FloatVT)) {
607           setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
608                              Custom);
609         }
610       }
611     }
612 
613     // Expand various CCs to best match the RVV ISA, which natively supports UNE
614     // but no other unordered comparisons, and supports all ordered comparisons
615     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
616     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
617     // and we pattern-match those back to the "original", swapping operands once
618     // more. This way we catch both operations and both "vf" and "fv" forms with
619     // fewer patterns.
620     static const ISD::CondCode VFPCCToExpand[] = {
621         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
622         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
623         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
624     };
625 
626     // Sets common operation actions on RVV floating-point vector types.
627     const auto SetCommonVFPActions = [&](MVT VT) {
628       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
629       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
630       // sizes are within one power-of-two of each other. Therefore conversions
631       // between vXf16 and vXf64 must be lowered as sequences which convert via
632       // vXf32.
633       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
634       // Custom-lower insert/extract operations to simplify patterns.
635       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
636                          Custom);
637       // Expand various condition codes (explained above).
638       for (auto CC : VFPCCToExpand)
639         setCondCodeAction(CC, VT, Expand);
640 
641       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
642 
643       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
644                          VT, Custom);
645 
646       setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
647                           ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
648                          VT, Custom);
649 
650       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
651 
652       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
653 
654       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
655                          VT, Custom);
656 
657       setOperationAction(
658           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
659           Custom);
660 
661       setOperationAction(ISD::SELECT, VT, Custom);
662       setOperationAction(ISD::SELECT_CC, VT, Expand);
663 
664       setOperationAction(
665           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
666           VT, Custom);
667 
668       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
669 
670       for (unsigned VPOpc : FloatingPointVPOps)
671         setOperationAction(VPOpc, VT, Custom);
672     };
673 
674     // Sets common extload/truncstore actions on RVV floating-point vector
675     // types.
676     const auto SetCommonVFPExtLoadTruncStoreActions =
677         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
678           for (auto SmallVT : SmallerVTs) {
679             setTruncStoreAction(VT, SmallVT, Expand);
680             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
681           }
682         };
683 
684     if (Subtarget.hasVInstructionsF16())
685       for (MVT VT : F16VecVTs)
686         SetCommonVFPActions(VT);
687 
688     for (MVT VT : F32VecVTs) {
689       if (Subtarget.hasVInstructionsF32())
690         SetCommonVFPActions(VT);
691       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
692     }
693 
694     for (MVT VT : F64VecVTs) {
695       if (Subtarget.hasVInstructionsF64())
696         SetCommonVFPActions(VT);
697       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
698       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
699     }
700 
701     if (Subtarget.useRVVForFixedLengthVectors()) {
702       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
703         if (!useRVVForFixedLengthVectorVT(VT))
704           continue;
705 
706         // By default everything must be expanded.
707         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
708           setOperationAction(Op, VT, Expand);
709         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
710           setTruncStoreAction(VT, OtherVT, Expand);
711           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
712                            OtherVT, VT, Expand);
713         }
714 
715         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
716         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
717                            Custom);
718 
719         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
720                            Custom);
721 
722         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
723                            VT, Custom);
724 
725         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
726 
727         setOperationAction(ISD::SETCC, VT, Custom);
728 
729         setOperationAction(ISD::SELECT, VT, Custom);
730 
731         setOperationAction(ISD::TRUNCATE, VT, Custom);
732 
733         setOperationAction(ISD::BITCAST, VT, Custom);
734 
735         setOperationAction(
736             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
737             Custom);
738 
739         setOperationAction(
740             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
741             Custom);
742 
743         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
744                             ISD::FP_TO_UINT},
745                            VT, Custom);
746 
747         // Operations below are different for between masks and other vectors.
748         if (VT.getVectorElementType() == MVT::i1) {
749           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
750                               ISD::OR, ISD::XOR},
751                              VT, Custom);
752 
753           setOperationAction(
754               {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNC},
755               VT, Custom);
756           continue;
757         }
758 
759         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
760         // it before type legalization for i64 vectors on RV32. It will then be
761         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
762         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
763         // improvements first.
764         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
765           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
766           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
767         }
768 
769         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
770         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
771 
772         setOperationAction(
773             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
774 
775         setOperationAction(
776             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
777             Custom);
778 
779         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
780                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
781                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
782                            VT, Custom);
783 
784         setOperationAction(
785             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
786 
787         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
788         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
789           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
790 
791         setOperationAction(
792             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
793             Custom);
794 
795         setOperationAction(ISD::VSELECT, VT, Custom);
796         setOperationAction(ISD::SELECT_CC, VT, Expand);
797 
798         setOperationAction(
799             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
800 
801         // Custom-lower reduction operations to set up the corresponding custom
802         // nodes' operands.
803         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
804                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
805                             ISD::VECREDUCE_UMIN},
806                            VT, Custom);
807 
808         for (unsigned VPOpc : IntegerVPOps)
809           setOperationAction(VPOpc, VT, Custom);
810 
811         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
812         // type that can represent the value exactly.
813         if (VT.getVectorElementType() != MVT::i64) {
814           MVT FloatEltVT =
815               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
816           EVT FloatVT =
817               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
818           if (isTypeLegal(FloatVT))
819             setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
820                                Custom);
821         }
822       }
823 
824       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
825         if (!useRVVForFixedLengthVectorVT(VT))
826           continue;
827 
828         // By default everything must be expanded.
829         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
830           setOperationAction(Op, VT, Expand);
831         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
832           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
833           setTruncStoreAction(VT, OtherVT, Expand);
834         }
835 
836         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
837         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
838                            Custom);
839 
840         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
841                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
842                             ISD::EXTRACT_VECTOR_ELT},
843                            VT, Custom);
844 
845         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
846                             ISD::MGATHER, ISD::MSCATTER},
847                            VT, Custom);
848 
849         setOperationAction(
850             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
851             Custom);
852 
853         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
854                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
855                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
856                            VT, Custom);
857 
858         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
859 
860         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
861                            VT, Custom);
862 
863         for (auto CC : VFPCCToExpand)
864           setCondCodeAction(CC, VT, Expand);
865 
866         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
867         setOperationAction(ISD::SELECT_CC, VT, Expand);
868 
869         setOperationAction(ISD::BITCAST, VT, Custom);
870 
871         setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
872                             ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
873                            VT, Custom);
874 
875         for (unsigned VPOpc : FloatingPointVPOps)
876           setOperationAction(VPOpc, VT, Custom);
877       }
878 
879       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
880       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
881                          Custom);
882       if (Subtarget.hasStdExtZfh())
883         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
884       if (Subtarget.hasStdExtF())
885         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
886       if (Subtarget.hasStdExtD())
887         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
888     }
889   }
890 
891   // Function alignments.
892   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
893   setMinFunctionAlignment(FunctionAlignment);
894   setPrefFunctionAlignment(FunctionAlignment);
895 
896   setMinimumJumpTableEntries(5);
897 
898   // Jumps are expensive, compared to logic
899   setJumpIsExpensive();
900 
901   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
902                        ISD::OR, ISD::XOR});
903 
904   if (Subtarget.hasStdExtF())
905     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
906 
907   if (Subtarget.hasStdExtZbp())
908     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
909 
910   if (Subtarget.hasStdExtZbb())
911     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
912 
913   if (Subtarget.hasStdExtZbkb())
914     setTargetDAGCombine(ISD::BITREVERSE);
915   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
916     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
917   if (Subtarget.hasStdExtF())
918     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
919                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
920   if (Subtarget.hasVInstructions())
921     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
922                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
923                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
924 
925   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
926   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
927 }
928 
929 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
930                                             LLVMContext &Context,
931                                             EVT VT) const {
932   if (!VT.isVector())
933     return getPointerTy(DL);
934   if (Subtarget.hasVInstructions() &&
935       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
936     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
937   return VT.changeVectorElementTypeToInteger();
938 }
939 
940 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
941   return Subtarget.getXLenVT();
942 }
943 
944 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
945                                              const CallInst &I,
946                                              MachineFunction &MF,
947                                              unsigned Intrinsic) const {
948   auto &DL = I.getModule()->getDataLayout();
949   switch (Intrinsic) {
950   default:
951     return false;
952   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
953   case Intrinsic::riscv_masked_atomicrmw_add_i32:
954   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
955   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
956   case Intrinsic::riscv_masked_atomicrmw_max_i32:
957   case Intrinsic::riscv_masked_atomicrmw_min_i32:
958   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
959   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
960   case Intrinsic::riscv_masked_cmpxchg_i32:
961     Info.opc = ISD::INTRINSIC_W_CHAIN;
962     Info.memVT = MVT::i32;
963     Info.ptrVal = I.getArgOperand(0);
964     Info.offset = 0;
965     Info.align = Align(4);
966     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
967                  MachineMemOperand::MOVolatile;
968     return true;
969   case Intrinsic::riscv_masked_strided_load:
970     Info.opc = ISD::INTRINSIC_W_CHAIN;
971     Info.ptrVal = I.getArgOperand(1);
972     Info.memVT = getValueType(DL, I.getType()->getScalarType());
973     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
974     Info.size = MemoryLocation::UnknownSize;
975     Info.flags |= MachineMemOperand::MOLoad;
976     return true;
977   case Intrinsic::riscv_masked_strided_store:
978     Info.opc = ISD::INTRINSIC_VOID;
979     Info.ptrVal = I.getArgOperand(1);
980     Info.memVT =
981         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
982     Info.align = Align(
983         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
984         8);
985     Info.size = MemoryLocation::UnknownSize;
986     Info.flags |= MachineMemOperand::MOStore;
987     return true;
988   case Intrinsic::riscv_seg2_load:
989   case Intrinsic::riscv_seg3_load:
990   case Intrinsic::riscv_seg4_load:
991   case Intrinsic::riscv_seg5_load:
992   case Intrinsic::riscv_seg6_load:
993   case Intrinsic::riscv_seg7_load:
994   case Intrinsic::riscv_seg8_load:
995     Info.opc = ISD::INTRINSIC_W_CHAIN;
996     Info.ptrVal = I.getArgOperand(0);
997     Info.memVT =
998         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
999     Info.align =
1000         Align(DL.getTypeSizeInBits(
1001                   I.getType()->getStructElementType(0)->getScalarType()) /
1002               8);
1003     Info.size = MemoryLocation::UnknownSize;
1004     Info.flags |= MachineMemOperand::MOLoad;
1005     return true;
1006   }
1007 }
1008 
1009 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1010                                                 const AddrMode &AM, Type *Ty,
1011                                                 unsigned AS,
1012                                                 Instruction *I) const {
1013   // No global is ever allowed as a base.
1014   if (AM.BaseGV)
1015     return false;
1016 
1017   // Require a 12-bit signed offset.
1018   if (!isInt<12>(AM.BaseOffs))
1019     return false;
1020 
1021   switch (AM.Scale) {
1022   case 0: // "r+i" or just "i", depending on HasBaseReg.
1023     break;
1024   case 1:
1025     if (!AM.HasBaseReg) // allow "r+i".
1026       break;
1027     return false; // disallow "r+r" or "r+r+i".
1028   default:
1029     return false;
1030   }
1031 
1032   return true;
1033 }
1034 
1035 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1036   return isInt<12>(Imm);
1037 }
1038 
1039 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1040   return isInt<12>(Imm);
1041 }
1042 
1043 // On RV32, 64-bit integers are split into their high and low parts and held
1044 // in two different registers, so the trunc is free since the low register can
1045 // just be used.
1046 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1047   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1048     return false;
1049   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1050   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1051   return (SrcBits == 64 && DestBits == 32);
1052 }
1053 
1054 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1055   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1056       !SrcVT.isInteger() || !DstVT.isInteger())
1057     return false;
1058   unsigned SrcBits = SrcVT.getSizeInBits();
1059   unsigned DestBits = DstVT.getSizeInBits();
1060   return (SrcBits == 64 && DestBits == 32);
1061 }
1062 
1063 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1064   // Zexts are free if they can be combined with a load.
1065   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1066   // poorly with type legalization of compares preferring sext.
1067   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1068     EVT MemVT = LD->getMemoryVT();
1069     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1070         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1071          LD->getExtensionType() == ISD::ZEXTLOAD))
1072       return true;
1073   }
1074 
1075   return TargetLowering::isZExtFree(Val, VT2);
1076 }
1077 
1078 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1079   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1080 }
1081 
1082 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1083   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1084 }
1085 
1086 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1087   return Subtarget.hasStdExtZbb();
1088 }
1089 
1090 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1091   return Subtarget.hasStdExtZbb();
1092 }
1093 
1094 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1095   EVT VT = Y.getValueType();
1096 
1097   // FIXME: Support vectors once we have tests.
1098   if (VT.isVector())
1099     return false;
1100 
1101   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1102           Subtarget.hasStdExtZbkb()) &&
1103          !isa<ConstantSDNode>(Y);
1104 }
1105 
1106 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1107   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1108   auto *C = dyn_cast<ConstantSDNode>(Y);
1109   return C && C->getAPIntValue().ule(10);
1110 }
1111 
1112 /// Check if sinking \p I's operands to I's basic block is profitable, because
1113 /// the operands can be folded into a target instruction, e.g.
1114 /// splats of scalars can fold into vector instructions.
1115 bool RISCVTargetLowering::shouldSinkOperands(
1116     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1117   using namespace llvm::PatternMatch;
1118 
1119   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1120     return false;
1121 
1122   auto IsSinker = [&](Instruction *I, int Operand) {
1123     switch (I->getOpcode()) {
1124     case Instruction::Add:
1125     case Instruction::Sub:
1126     case Instruction::Mul:
1127     case Instruction::And:
1128     case Instruction::Or:
1129     case Instruction::Xor:
1130     case Instruction::FAdd:
1131     case Instruction::FSub:
1132     case Instruction::FMul:
1133     case Instruction::FDiv:
1134     case Instruction::ICmp:
1135     case Instruction::FCmp:
1136       return true;
1137     case Instruction::Shl:
1138     case Instruction::LShr:
1139     case Instruction::AShr:
1140     case Instruction::UDiv:
1141     case Instruction::SDiv:
1142     case Instruction::URem:
1143     case Instruction::SRem:
1144       return Operand == 1;
1145     case Instruction::Call:
1146       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1147         switch (II->getIntrinsicID()) {
1148         case Intrinsic::fma:
1149         case Intrinsic::vp_fma:
1150           return Operand == 0 || Operand == 1;
1151         // FIXME: Our patterns can only match vx/vf instructions when the splat
1152         // it on the RHS, because TableGen doesn't recognize our VP operations
1153         // as commutative.
1154         case Intrinsic::vp_add:
1155         case Intrinsic::vp_mul:
1156         case Intrinsic::vp_and:
1157         case Intrinsic::vp_or:
1158         case Intrinsic::vp_xor:
1159         case Intrinsic::vp_fadd:
1160         case Intrinsic::vp_fmul:
1161         case Intrinsic::vp_shl:
1162         case Intrinsic::vp_lshr:
1163         case Intrinsic::vp_ashr:
1164         case Intrinsic::vp_udiv:
1165         case Intrinsic::vp_sdiv:
1166         case Intrinsic::vp_urem:
1167         case Intrinsic::vp_srem:
1168           return Operand == 1;
1169         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1170         // explicit patterns for both LHS and RHS (as 'vr' versions).
1171         case Intrinsic::vp_sub:
1172         case Intrinsic::vp_fsub:
1173         case Intrinsic::vp_fdiv:
1174           return Operand == 0 || Operand == 1;
1175         default:
1176           return false;
1177         }
1178       }
1179       return false;
1180     default:
1181       return false;
1182     }
1183   };
1184 
1185   for (auto OpIdx : enumerate(I->operands())) {
1186     if (!IsSinker(I, OpIdx.index()))
1187       continue;
1188 
1189     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1190     // Make sure we are not already sinking this operand
1191     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1192       continue;
1193 
1194     // We are looking for a splat that can be sunk.
1195     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1196                              m_Undef(), m_ZeroMask())))
1197       continue;
1198 
1199     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1200     // and vector registers
1201     for (Use &U : Op->uses()) {
1202       Instruction *Insn = cast<Instruction>(U.getUser());
1203       if (!IsSinker(Insn, U.getOperandNo()))
1204         return false;
1205     }
1206 
1207     Ops.push_back(&Op->getOperandUse(0));
1208     Ops.push_back(&OpIdx.value());
1209   }
1210   return true;
1211 }
1212 
1213 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1214                                        bool ForCodeSize) const {
1215   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1216   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1217     return false;
1218   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1219     return false;
1220   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1221     return false;
1222   return Imm.isZero();
1223 }
1224 
1225 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1226   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1227          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1228          (VT == MVT::f64 && Subtarget.hasStdExtD());
1229 }
1230 
1231 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1232                                                       CallingConv::ID CC,
1233                                                       EVT VT) const {
1234   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1235   // We might still end up using a GPR but that will be decided based on ABI.
1236   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1237   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1238     return MVT::f32;
1239 
1240   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1241 }
1242 
1243 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1244                                                            CallingConv::ID CC,
1245                                                            EVT VT) const {
1246   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1247   // We might still end up using a GPR but that will be decided based on ABI.
1248   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1249   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1250     return 1;
1251 
1252   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1253 }
1254 
1255 // Changes the condition code and swaps operands if necessary, so the SetCC
1256 // operation matches one of the comparisons supported directly by branches
1257 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1258 // with 1/-1.
1259 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1260                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1261   // Convert X > -1 to X >= 0.
1262   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1263     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1264     CC = ISD::SETGE;
1265     return;
1266   }
1267   // Convert X < 1 to 0 >= X.
1268   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1269     RHS = LHS;
1270     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1271     CC = ISD::SETGE;
1272     return;
1273   }
1274 
1275   switch (CC) {
1276   default:
1277     break;
1278   case ISD::SETGT:
1279   case ISD::SETLE:
1280   case ISD::SETUGT:
1281   case ISD::SETULE:
1282     CC = ISD::getSetCCSwappedOperands(CC);
1283     std::swap(LHS, RHS);
1284     break;
1285   }
1286 }
1287 
1288 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1289   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1290   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1291   if (VT.getVectorElementType() == MVT::i1)
1292     KnownSize *= 8;
1293 
1294   switch (KnownSize) {
1295   default:
1296     llvm_unreachable("Invalid LMUL.");
1297   case 8:
1298     return RISCVII::VLMUL::LMUL_F8;
1299   case 16:
1300     return RISCVII::VLMUL::LMUL_F4;
1301   case 32:
1302     return RISCVII::VLMUL::LMUL_F2;
1303   case 64:
1304     return RISCVII::VLMUL::LMUL_1;
1305   case 128:
1306     return RISCVII::VLMUL::LMUL_2;
1307   case 256:
1308     return RISCVII::VLMUL::LMUL_4;
1309   case 512:
1310     return RISCVII::VLMUL::LMUL_8;
1311   }
1312 }
1313 
1314 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1315   switch (LMul) {
1316   default:
1317     llvm_unreachable("Invalid LMUL.");
1318   case RISCVII::VLMUL::LMUL_F8:
1319   case RISCVII::VLMUL::LMUL_F4:
1320   case RISCVII::VLMUL::LMUL_F2:
1321   case RISCVII::VLMUL::LMUL_1:
1322     return RISCV::VRRegClassID;
1323   case RISCVII::VLMUL::LMUL_2:
1324     return RISCV::VRM2RegClassID;
1325   case RISCVII::VLMUL::LMUL_4:
1326     return RISCV::VRM4RegClassID;
1327   case RISCVII::VLMUL::LMUL_8:
1328     return RISCV::VRM8RegClassID;
1329   }
1330 }
1331 
1332 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1333   RISCVII::VLMUL LMUL = getLMUL(VT);
1334   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1335       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1336       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1337       LMUL == RISCVII::VLMUL::LMUL_1) {
1338     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1339                   "Unexpected subreg numbering");
1340     return RISCV::sub_vrm1_0 + Index;
1341   }
1342   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1343     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1344                   "Unexpected subreg numbering");
1345     return RISCV::sub_vrm2_0 + Index;
1346   }
1347   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1348     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1349                   "Unexpected subreg numbering");
1350     return RISCV::sub_vrm4_0 + Index;
1351   }
1352   llvm_unreachable("Invalid vector type.");
1353 }
1354 
1355 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1356   if (VT.getVectorElementType() == MVT::i1)
1357     return RISCV::VRRegClassID;
1358   return getRegClassIDForLMUL(getLMUL(VT));
1359 }
1360 
1361 // Attempt to decompose a subvector insert/extract between VecVT and
1362 // SubVecVT via subregister indices. Returns the subregister index that
1363 // can perform the subvector insert/extract with the given element index, as
1364 // well as the index corresponding to any leftover subvectors that must be
1365 // further inserted/extracted within the register class for SubVecVT.
1366 std::pair<unsigned, unsigned>
1367 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1368     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1369     const RISCVRegisterInfo *TRI) {
1370   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1371                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1372                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1373                 "Register classes not ordered");
1374   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1375   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1376   // Try to compose a subregister index that takes us from the incoming
1377   // LMUL>1 register class down to the outgoing one. At each step we half
1378   // the LMUL:
1379   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1380   // Note that this is not guaranteed to find a subregister index, such as
1381   // when we are extracting from one VR type to another.
1382   unsigned SubRegIdx = RISCV::NoSubRegister;
1383   for (const unsigned RCID :
1384        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1385     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1386       VecVT = VecVT.getHalfNumVectorElementsVT();
1387       bool IsHi =
1388           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1389       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1390                                             getSubregIndexByMVT(VecVT, IsHi));
1391       if (IsHi)
1392         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1393     }
1394   return {SubRegIdx, InsertExtractIdx};
1395 }
1396 
1397 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1398 // stores for those types.
1399 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1400   return !Subtarget.useRVVForFixedLengthVectors() ||
1401          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1402 }
1403 
1404 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1405   if (ScalarTy->isPointerTy())
1406     return true;
1407 
1408   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1409       ScalarTy->isIntegerTy(32))
1410     return true;
1411 
1412   if (ScalarTy->isIntegerTy(64))
1413     return Subtarget.hasVInstructionsI64();
1414 
1415   if (ScalarTy->isHalfTy())
1416     return Subtarget.hasVInstructionsF16();
1417   if (ScalarTy->isFloatTy())
1418     return Subtarget.hasVInstructionsF32();
1419   if (ScalarTy->isDoubleTy())
1420     return Subtarget.hasVInstructionsF64();
1421 
1422   return false;
1423 }
1424 
1425 static SDValue getVLOperand(SDValue Op) {
1426   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1427           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1428          "Unexpected opcode");
1429   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1430   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1431   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1432       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1433   if (!II)
1434     return SDValue();
1435   return Op.getOperand(II->VLOperand + 1 + HasChain);
1436 }
1437 
1438 static bool useRVVForFixedLengthVectorVT(MVT VT,
1439                                          const RISCVSubtarget &Subtarget) {
1440   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1441   if (!Subtarget.useRVVForFixedLengthVectors())
1442     return false;
1443 
1444   // We only support a set of vector types with a consistent maximum fixed size
1445   // across all supported vector element types to avoid legalization issues.
1446   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1447   // fixed-length vector type we support is 1024 bytes.
1448   if (VT.getFixedSizeInBits() > 1024 * 8)
1449     return false;
1450 
1451   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1452 
1453   MVT EltVT = VT.getVectorElementType();
1454 
1455   // Don't use RVV for vectors we cannot scalarize if required.
1456   switch (EltVT.SimpleTy) {
1457   // i1 is supported but has different rules.
1458   default:
1459     return false;
1460   case MVT::i1:
1461     // Masks can only use a single register.
1462     if (VT.getVectorNumElements() > MinVLen)
1463       return false;
1464     MinVLen /= 8;
1465     break;
1466   case MVT::i8:
1467   case MVT::i16:
1468   case MVT::i32:
1469     break;
1470   case MVT::i64:
1471     if (!Subtarget.hasVInstructionsI64())
1472       return false;
1473     break;
1474   case MVT::f16:
1475     if (!Subtarget.hasVInstructionsF16())
1476       return false;
1477     break;
1478   case MVT::f32:
1479     if (!Subtarget.hasVInstructionsF32())
1480       return false;
1481     break;
1482   case MVT::f64:
1483     if (!Subtarget.hasVInstructionsF64())
1484       return false;
1485     break;
1486   }
1487 
1488   // Reject elements larger than ELEN.
1489   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1490     return false;
1491 
1492   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1493   // Don't use RVV for types that don't fit.
1494   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1495     return false;
1496 
1497   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1498   // the base fixed length RVV support in place.
1499   if (!VT.isPow2VectorType())
1500     return false;
1501 
1502   return true;
1503 }
1504 
1505 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1506   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1507 }
1508 
1509 // Return the largest legal scalable vector type that matches VT's element type.
1510 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1511                                             const RISCVSubtarget &Subtarget) {
1512   // This may be called before legal types are setup.
1513   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1514           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1515          "Expected legal fixed length vector!");
1516 
1517   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1518   unsigned MaxELen = Subtarget.getELEN();
1519 
1520   MVT EltVT = VT.getVectorElementType();
1521   switch (EltVT.SimpleTy) {
1522   default:
1523     llvm_unreachable("unexpected element type for RVV container");
1524   case MVT::i1:
1525   case MVT::i8:
1526   case MVT::i16:
1527   case MVT::i32:
1528   case MVT::i64:
1529   case MVT::f16:
1530   case MVT::f32:
1531   case MVT::f64: {
1532     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1533     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1534     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1535     unsigned NumElts =
1536         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1537     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1538     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1539     return MVT::getScalableVectorVT(EltVT, NumElts);
1540   }
1541   }
1542 }
1543 
1544 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1545                                             const RISCVSubtarget &Subtarget) {
1546   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1547                                           Subtarget);
1548 }
1549 
1550 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1551   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1552 }
1553 
1554 // Grow V to consume an entire RVV register.
1555 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1556                                        const RISCVSubtarget &Subtarget) {
1557   assert(VT.isScalableVector() &&
1558          "Expected to convert into a scalable vector!");
1559   assert(V.getValueType().isFixedLengthVector() &&
1560          "Expected a fixed length vector operand!");
1561   SDLoc DL(V);
1562   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1563   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1564 }
1565 
1566 // Shrink V so it's just big enough to maintain a VT's worth of data.
1567 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1568                                          const RISCVSubtarget &Subtarget) {
1569   assert(VT.isFixedLengthVector() &&
1570          "Expected to convert into a fixed length vector!");
1571   assert(V.getValueType().isScalableVector() &&
1572          "Expected a scalable vector operand!");
1573   SDLoc DL(V);
1574   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1575   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1576 }
1577 
1578 /// Return the type of the mask type suitable for masking the provided
1579 /// vector type.  This is simply an i1 element type vector of the same
1580 /// (possibly scalable) length.
1581 static MVT getMaskTypeFor(EVT VecVT) {
1582   assert(VecVT.isVector());
1583   ElementCount EC = VecVT.getVectorElementCount();
1584   return MVT::getVectorVT(MVT::i1, EC);
1585 }
1586 
1587 /// Creates an all ones mask suitable for masking a vector of type VecTy with
1588 /// vector length VL.  .
1589 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
1590                               SelectionDAG &DAG) {
1591   MVT MaskVT = getMaskTypeFor(VecVT);
1592   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1593 }
1594 
1595 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1596 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1597 // the vector type that it is contained in.
1598 static std::pair<SDValue, SDValue>
1599 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1600                 const RISCVSubtarget &Subtarget) {
1601   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1602   MVT XLenVT = Subtarget.getXLenVT();
1603   SDValue VL = VecVT.isFixedLengthVector()
1604                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1605                    : DAG.getRegister(RISCV::X0, XLenVT);
1606   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
1607   return {Mask, VL};
1608 }
1609 
1610 // As above but assuming the given type is a scalable vector type.
1611 static std::pair<SDValue, SDValue>
1612 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1613                         const RISCVSubtarget &Subtarget) {
1614   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1615   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1616 }
1617 
1618 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1619 // of either is (currently) supported. This can get us into an infinite loop
1620 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1621 // as a ..., etc.
1622 // Until either (or both) of these can reliably lower any node, reporting that
1623 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1624 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1625 // which is not desirable.
1626 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1627     EVT VT, unsigned DefinedValues) const {
1628   return false;
1629 }
1630 
1631 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1632                                   const RISCVSubtarget &Subtarget) {
1633   // RISCV FP-to-int conversions saturate to the destination register size, but
1634   // don't produce 0 for nan. We can use a conversion instruction and fix the
1635   // nan case with a compare and a select.
1636   SDValue Src = Op.getOperand(0);
1637 
1638   EVT DstVT = Op.getValueType();
1639   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1640 
1641   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1642   unsigned Opc;
1643   if (SatVT == DstVT)
1644     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1645   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1646     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1647   else
1648     return SDValue();
1649   // FIXME: Support other SatVTs by clamping before or after the conversion.
1650 
1651   SDLoc DL(Op);
1652   SDValue FpToInt = DAG.getNode(
1653       Opc, DL, DstVT, Src,
1654       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1655 
1656   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1657   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1658 }
1659 
1660 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1661 // and back. Taking care to avoid converting values that are nan or already
1662 // correct.
1663 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1664 // have FRM dependencies modeled yet.
1665 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1666   MVT VT = Op.getSimpleValueType();
1667   assert(VT.isVector() && "Unexpected type");
1668 
1669   SDLoc DL(Op);
1670 
1671   // Freeze the source since we are increasing the number of uses.
1672   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1673 
1674   // Truncate to integer and convert back to FP.
1675   MVT IntVT = VT.changeVectorElementTypeToInteger();
1676   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1677   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1678 
1679   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1680 
1681   if (Op.getOpcode() == ISD::FCEIL) {
1682     // If the truncated value is the greater than or equal to the original
1683     // value, we've computed the ceil. Otherwise, we went the wrong way and
1684     // need to increase by 1.
1685     // FIXME: This should use a masked operation. Handle here or in isel?
1686     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1687                                  DAG.getConstantFP(1.0, DL, VT));
1688     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1689     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1690   } else if (Op.getOpcode() == ISD::FFLOOR) {
1691     // If the truncated value is the less than or equal to the original value,
1692     // we've computed the floor. Otherwise, we went the wrong way and need to
1693     // decrease by 1.
1694     // FIXME: This should use a masked operation. Handle here or in isel?
1695     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1696                                  DAG.getConstantFP(1.0, DL, VT));
1697     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1698     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1699   }
1700 
1701   // Restore the original sign so that -0.0 is preserved.
1702   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1703 
1704   // Determine the largest integer that can be represented exactly. This and
1705   // values larger than it don't have any fractional bits so don't need to
1706   // be converted.
1707   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1708   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1709   APFloat MaxVal = APFloat(FltSem);
1710   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1711                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1712   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1713 
1714   // If abs(Src) was larger than MaxVal or nan, keep it.
1715   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1716   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1717   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1718 }
1719 
1720 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1721 // This mode isn't supported in vector hardware on RISCV. But as long as we
1722 // aren't compiling with trapping math, we can emulate this with
1723 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1724 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1725 // dependencies modeled yet.
1726 // FIXME: Use masked operations to avoid final merge.
1727 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1728   MVT VT = Op.getSimpleValueType();
1729   assert(VT.isVector() && "Unexpected type");
1730 
1731   SDLoc DL(Op);
1732 
1733   // Freeze the source since we are increasing the number of uses.
1734   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1735 
1736   // We do the conversion on the absolute value and fix the sign at the end.
1737   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1738 
1739   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1740   bool Ignored;
1741   APFloat Point5Pred = APFloat(0.5f);
1742   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1743   Point5Pred.next(/*nextDown*/ true);
1744 
1745   // Add the adjustment.
1746   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1747                                DAG.getConstantFP(Point5Pred, DL, VT));
1748 
1749   // Truncate to integer and convert back to fp.
1750   MVT IntVT = VT.changeVectorElementTypeToInteger();
1751   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1752   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1753 
1754   // Restore the original sign.
1755   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1756 
1757   // Determine the largest integer that can be represented exactly. This and
1758   // values larger than it don't have any fractional bits so don't need to
1759   // be converted.
1760   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1761   APFloat MaxVal = APFloat(FltSem);
1762   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1763                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1764   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1765 
1766   // If abs(Src) was larger than MaxVal or nan, keep it.
1767   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1768   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1769   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1770 }
1771 
1772 struct VIDSequence {
1773   int64_t StepNumerator;
1774   unsigned StepDenominator;
1775   int64_t Addend;
1776 };
1777 
1778 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1779 // to the (non-zero) step S and start value X. This can be then lowered as the
1780 // RVV sequence (VID * S) + X, for example.
1781 // The step S is represented as an integer numerator divided by a positive
1782 // denominator. Note that the implementation currently only identifies
1783 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1784 // cannot detect 2/3, for example.
1785 // Note that this method will also match potentially unappealing index
1786 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1787 // determine whether this is worth generating code for.
1788 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1789   unsigned NumElts = Op.getNumOperands();
1790   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1791   if (!Op.getValueType().isInteger())
1792     return None;
1793 
1794   Optional<unsigned> SeqStepDenom;
1795   Optional<int64_t> SeqStepNum, SeqAddend;
1796   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1797   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1798   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1799     // Assume undef elements match the sequence; we just have to be careful
1800     // when interpolating across them.
1801     if (Op.getOperand(Idx).isUndef())
1802       continue;
1803     // The BUILD_VECTOR must be all constants.
1804     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1805       return None;
1806 
1807     uint64_t Val = Op.getConstantOperandVal(Idx) &
1808                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1809 
1810     if (PrevElt) {
1811       // Calculate the step since the last non-undef element, and ensure
1812       // it's consistent across the entire sequence.
1813       unsigned IdxDiff = Idx - PrevElt->second;
1814       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1815 
1816       // A zero-value value difference means that we're somewhere in the middle
1817       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1818       // step change before evaluating the sequence.
1819       if (ValDiff == 0)
1820         continue;
1821 
1822       int64_t Remainder = ValDiff % IdxDiff;
1823       // Normalize the step if it's greater than 1.
1824       if (Remainder != ValDiff) {
1825         // The difference must cleanly divide the element span.
1826         if (Remainder != 0)
1827           return None;
1828         ValDiff /= IdxDiff;
1829         IdxDiff = 1;
1830       }
1831 
1832       if (!SeqStepNum)
1833         SeqStepNum = ValDiff;
1834       else if (ValDiff != SeqStepNum)
1835         return None;
1836 
1837       if (!SeqStepDenom)
1838         SeqStepDenom = IdxDiff;
1839       else if (IdxDiff != *SeqStepDenom)
1840         return None;
1841     }
1842 
1843     // Record this non-undef element for later.
1844     if (!PrevElt || PrevElt->first != Val)
1845       PrevElt = std::make_pair(Val, Idx);
1846   }
1847 
1848   // We need to have logged a step for this to count as a legal index sequence.
1849   if (!SeqStepNum || !SeqStepDenom)
1850     return None;
1851 
1852   // Loop back through the sequence and validate elements we might have skipped
1853   // while waiting for a valid step. While doing this, log any sequence addend.
1854   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1855     if (Op.getOperand(Idx).isUndef())
1856       continue;
1857     uint64_t Val = Op.getConstantOperandVal(Idx) &
1858                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1859     uint64_t ExpectedVal =
1860         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1861     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1862     if (!SeqAddend)
1863       SeqAddend = Addend;
1864     else if (Addend != SeqAddend)
1865       return None;
1866   }
1867 
1868   assert(SeqAddend && "Must have an addend if we have a step");
1869 
1870   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1871 }
1872 
1873 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1874 // and lower it as a VRGATHER_VX_VL from the source vector.
1875 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1876                                   SelectionDAG &DAG,
1877                                   const RISCVSubtarget &Subtarget) {
1878   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1879     return SDValue();
1880   SDValue Vec = SplatVal.getOperand(0);
1881   // Only perform this optimization on vectors of the same size for simplicity.
1882   if (Vec.getValueType() != VT)
1883     return SDValue();
1884   SDValue Idx = SplatVal.getOperand(1);
1885   // The index must be a legal type.
1886   if (Idx.getValueType() != Subtarget.getXLenVT())
1887     return SDValue();
1888 
1889   MVT ContainerVT = VT;
1890   if (VT.isFixedLengthVector()) {
1891     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1892     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
1893   }
1894 
1895   SDValue Mask, VL;
1896   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1897 
1898   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
1899                                Idx, Mask, VL);
1900 
1901   if (!VT.isFixedLengthVector())
1902     return Gather;
1903 
1904   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1905 }
1906 
1907 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1908                                  const RISCVSubtarget &Subtarget) {
1909   MVT VT = Op.getSimpleValueType();
1910   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1911 
1912   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1913 
1914   SDLoc DL(Op);
1915   SDValue Mask, VL;
1916   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1917 
1918   MVT XLenVT = Subtarget.getXLenVT();
1919   unsigned NumElts = Op.getNumOperands();
1920 
1921   if (VT.getVectorElementType() == MVT::i1) {
1922     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1923       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1924       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1925     }
1926 
1927     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1928       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1929       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1930     }
1931 
1932     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1933     // scalar integer chunks whose bit-width depends on the number of mask
1934     // bits and XLEN.
1935     // First, determine the most appropriate scalar integer type to use. This
1936     // is at most XLenVT, but may be shrunk to a smaller vector element type
1937     // according to the size of the final vector - use i8 chunks rather than
1938     // XLenVT if we're producing a v8i1. This results in more consistent
1939     // codegen across RV32 and RV64.
1940     unsigned NumViaIntegerBits =
1941         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1942     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
1943     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1944       // If we have to use more than one INSERT_VECTOR_ELT then this
1945       // optimization is likely to increase code size; avoid peforming it in
1946       // such a case. We can use a load from a constant pool in this case.
1947       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1948         return SDValue();
1949       // Now we can create our integer vector type. Note that it may be larger
1950       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1951       MVT IntegerViaVecVT =
1952           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1953                            divideCeil(NumElts, NumViaIntegerBits));
1954 
1955       uint64_t Bits = 0;
1956       unsigned BitPos = 0, IntegerEltIdx = 0;
1957       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1958 
1959       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1960         // Once we accumulate enough bits to fill our scalar type, insert into
1961         // our vector and clear our accumulated data.
1962         if (I != 0 && I % NumViaIntegerBits == 0) {
1963           if (NumViaIntegerBits <= 32)
1964             Bits = SignExtend64(Bits, 32);
1965           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1966           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1967                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1968           Bits = 0;
1969           BitPos = 0;
1970           IntegerEltIdx++;
1971         }
1972         SDValue V = Op.getOperand(I);
1973         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1974         Bits |= ((uint64_t)BitValue << BitPos);
1975       }
1976 
1977       // Insert the (remaining) scalar value into position in our integer
1978       // vector type.
1979       if (NumViaIntegerBits <= 32)
1980         Bits = SignExtend64(Bits, 32);
1981       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1982       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1983                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1984 
1985       if (NumElts < NumViaIntegerBits) {
1986         // If we're producing a smaller vector than our minimum legal integer
1987         // type, bitcast to the equivalent (known-legal) mask type, and extract
1988         // our final mask.
1989         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1990         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1991         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1992                           DAG.getConstant(0, DL, XLenVT));
1993       } else {
1994         // Else we must have produced an integer type with the same size as the
1995         // mask type; bitcast for the final result.
1996         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1997         Vec = DAG.getBitcast(VT, Vec);
1998       }
1999 
2000       return Vec;
2001     }
2002 
2003     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2004     // vector type, we have a legal equivalently-sized i8 type, so we can use
2005     // that.
2006     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2007     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2008 
2009     SDValue WideVec;
2010     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2011       // For a splat, perform a scalar truncate before creating the wider
2012       // vector.
2013       assert(Splat.getValueType() == XLenVT &&
2014              "Unexpected type for i1 splat value");
2015       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2016                           DAG.getConstant(1, DL, XLenVT));
2017       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2018     } else {
2019       SmallVector<SDValue, 8> Ops(Op->op_values());
2020       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2021       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2022       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2023     }
2024 
2025     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2026   }
2027 
2028   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2029     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2030       return Gather;
2031     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2032                                         : RISCVISD::VMV_V_X_VL;
2033     Splat =
2034         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2035     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2036   }
2037 
2038   // Try and match index sequences, which we can lower to the vid instruction
2039   // with optional modifications. An all-undef vector is matched by
2040   // getSplatValue, above.
2041   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2042     int64_t StepNumerator = SimpleVID->StepNumerator;
2043     unsigned StepDenominator = SimpleVID->StepDenominator;
2044     int64_t Addend = SimpleVID->Addend;
2045 
2046     assert(StepNumerator != 0 && "Invalid step");
2047     bool Negate = false;
2048     int64_t SplatStepVal = StepNumerator;
2049     unsigned StepOpcode = ISD::MUL;
2050     if (StepNumerator != 1) {
2051       if (isPowerOf2_64(std::abs(StepNumerator))) {
2052         Negate = StepNumerator < 0;
2053         StepOpcode = ISD::SHL;
2054         SplatStepVal = Log2_64(std::abs(StepNumerator));
2055       }
2056     }
2057 
2058     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2059     // threshold since it's the immediate value many RVV instructions accept.
2060     // There is no vmul.vi instruction so ensure multiply constant can fit in
2061     // a single addi instruction.
2062     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2063          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2064         isPowerOf2_32(StepDenominator) &&
2065         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
2066       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2067       // Convert right out of the scalable type so we can use standard ISD
2068       // nodes for the rest of the computation. If we used scalable types with
2069       // these, we'd lose the fixed-length vector info and generate worse
2070       // vsetvli code.
2071       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2072       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2073           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2074         SDValue SplatStep = DAG.getSplatBuildVector(
2075             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2076         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2077       }
2078       if (StepDenominator != 1) {
2079         SDValue SplatStep = DAG.getSplatBuildVector(
2080             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2081         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2082       }
2083       if (Addend != 0 || Negate) {
2084         SDValue SplatAddend = DAG.getSplatBuildVector(
2085             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2086         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2087       }
2088       return VID;
2089     }
2090   }
2091 
2092   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2093   // when re-interpreted as a vector with a larger element type. For example,
2094   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2095   // could be instead splat as
2096   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2097   // TODO: This optimization could also work on non-constant splats, but it
2098   // would require bit-manipulation instructions to construct the splat value.
2099   SmallVector<SDValue> Sequence;
2100   unsigned EltBitSize = VT.getScalarSizeInBits();
2101   const auto *BV = cast<BuildVectorSDNode>(Op);
2102   if (VT.isInteger() && EltBitSize < 64 &&
2103       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2104       BV->getRepeatedSequence(Sequence) &&
2105       (Sequence.size() * EltBitSize) <= 64) {
2106     unsigned SeqLen = Sequence.size();
2107     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2108     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2109     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2110             ViaIntVT == MVT::i64) &&
2111            "Unexpected sequence type");
2112 
2113     unsigned EltIdx = 0;
2114     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2115     uint64_t SplatValue = 0;
2116     // Construct the amalgamated value which can be splatted as this larger
2117     // vector type.
2118     for (const auto &SeqV : Sequence) {
2119       if (!SeqV.isUndef())
2120         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2121                        << (EltIdx * EltBitSize));
2122       EltIdx++;
2123     }
2124 
2125     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2126     // achieve better constant materializion.
2127     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2128       SplatValue = SignExtend64(SplatValue, 32);
2129 
2130     // Since we can't introduce illegal i64 types at this stage, we can only
2131     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2132     // way we can use RVV instructions to splat.
2133     assert((ViaIntVT.bitsLE(XLenVT) ||
2134             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2135            "Unexpected bitcast sequence");
2136     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2137       SDValue ViaVL =
2138           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2139       MVT ViaContainerVT =
2140           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2141       SDValue Splat =
2142           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2143                       DAG.getUNDEF(ViaContainerVT),
2144                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2145       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2146       return DAG.getBitcast(VT, Splat);
2147     }
2148   }
2149 
2150   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2151   // which constitute a large proportion of the elements. In such cases we can
2152   // splat a vector with the dominant element and make up the shortfall with
2153   // INSERT_VECTOR_ELTs.
2154   // Note that this includes vectors of 2 elements by association. The
2155   // upper-most element is the "dominant" one, allowing us to use a splat to
2156   // "insert" the upper element, and an insert of the lower element at position
2157   // 0, which improves codegen.
2158   SDValue DominantValue;
2159   unsigned MostCommonCount = 0;
2160   DenseMap<SDValue, unsigned> ValueCounts;
2161   unsigned NumUndefElts =
2162       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2163 
2164   // Track the number of scalar loads we know we'd be inserting, estimated as
2165   // any non-zero floating-point constant. Other kinds of element are either
2166   // already in registers or are materialized on demand. The threshold at which
2167   // a vector load is more desirable than several scalar materializion and
2168   // vector-insertion instructions is not known.
2169   unsigned NumScalarLoads = 0;
2170 
2171   for (SDValue V : Op->op_values()) {
2172     if (V.isUndef())
2173       continue;
2174 
2175     ValueCounts.insert(std::make_pair(V, 0));
2176     unsigned &Count = ValueCounts[V];
2177 
2178     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2179       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2180 
2181     // Is this value dominant? In case of a tie, prefer the highest element as
2182     // it's cheaper to insert near the beginning of a vector than it is at the
2183     // end.
2184     if (++Count >= MostCommonCount) {
2185       DominantValue = V;
2186       MostCommonCount = Count;
2187     }
2188   }
2189 
2190   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2191   unsigned NumDefElts = NumElts - NumUndefElts;
2192   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2193 
2194   // Don't perform this optimization when optimizing for size, since
2195   // materializing elements and inserting them tends to cause code bloat.
2196   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2197       ((MostCommonCount > DominantValueCountThreshold) ||
2198        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2199     // Start by splatting the most common element.
2200     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2201 
2202     DenseSet<SDValue> Processed{DominantValue};
2203     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2204     for (const auto &OpIdx : enumerate(Op->ops())) {
2205       const SDValue &V = OpIdx.value();
2206       if (V.isUndef() || !Processed.insert(V).second)
2207         continue;
2208       if (ValueCounts[V] == 1) {
2209         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2210                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2211       } else {
2212         // Blend in all instances of this value using a VSELECT, using a
2213         // mask where each bit signals whether that element is the one
2214         // we're after.
2215         SmallVector<SDValue> Ops;
2216         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2217           return DAG.getConstant(V == V1, DL, XLenVT);
2218         });
2219         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2220                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2221                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2222       }
2223     }
2224 
2225     return Vec;
2226   }
2227 
2228   return SDValue();
2229 }
2230 
2231 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2232                                    SDValue Lo, SDValue Hi, SDValue VL,
2233                                    SelectionDAG &DAG) {
2234   if (!Passthru)
2235     Passthru = DAG.getUNDEF(VT);
2236   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2237     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2238     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2239     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2240     // node in order to try and match RVV vector/scalar instructions.
2241     if ((LoC >> 31) == HiC)
2242       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2243 
2244     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2245     // vmv.v.x whose EEW = 32 to lower it.
2246     auto *Const = dyn_cast<ConstantSDNode>(VL);
2247     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2248       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2249       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2250       // access the subtarget here now.
2251       auto InterVec = DAG.getNode(
2252           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2253                                   DAG.getRegister(RISCV::X0, MVT::i32));
2254       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2255     }
2256   }
2257 
2258   // Fall back to a stack store and stride x0 vector load.
2259   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2260                      Hi, VL);
2261 }
2262 
2263 // Called by type legalization to handle splat of i64 on RV32.
2264 // FIXME: We can optimize this when the type has sign or zero bits in one
2265 // of the halves.
2266 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2267                                    SDValue Scalar, SDValue VL,
2268                                    SelectionDAG &DAG) {
2269   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2270   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2271                            DAG.getConstant(0, DL, MVT::i32));
2272   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2273                            DAG.getConstant(1, DL, MVT::i32));
2274   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2275 }
2276 
2277 // This function lowers a splat of a scalar operand Splat with the vector
2278 // length VL. It ensures the final sequence is type legal, which is useful when
2279 // lowering a splat after type legalization.
2280 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2281                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2282                                 const RISCVSubtarget &Subtarget) {
2283   bool HasPassthru = Passthru && !Passthru.isUndef();
2284   if (!HasPassthru && !Passthru)
2285     Passthru = DAG.getUNDEF(VT);
2286   if (VT.isFloatingPoint()) {
2287     // If VL is 1, we could use vfmv.s.f.
2288     if (isOneConstant(VL))
2289       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2290     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2291   }
2292 
2293   MVT XLenVT = Subtarget.getXLenVT();
2294 
2295   // Simplest case is that the operand needs to be promoted to XLenVT.
2296   if (Scalar.getValueType().bitsLE(XLenVT)) {
2297     // If the operand is a constant, sign extend to increase our chances
2298     // of being able to use a .vi instruction. ANY_EXTEND would become a
2299     // a zero extend and the simm5 check in isel would fail.
2300     // FIXME: Should we ignore the upper bits in isel instead?
2301     unsigned ExtOpc =
2302         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2303     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2304     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2305     // If VL is 1 and the scalar value won't benefit from immediate, we could
2306     // use vmv.s.x.
2307     if (isOneConstant(VL) &&
2308         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2309       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2310     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2311   }
2312 
2313   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2314          "Unexpected scalar for splat lowering!");
2315 
2316   if (isOneConstant(VL) && isNullConstant(Scalar))
2317     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2318                        DAG.getConstant(0, DL, XLenVT), VL);
2319 
2320   // Otherwise use the more complicated splatting algorithm.
2321   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2322 }
2323 
2324 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2325                                 const RISCVSubtarget &Subtarget) {
2326   // We need to be able to widen elements to the next larger integer type.
2327   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2328     return false;
2329 
2330   int Size = Mask.size();
2331   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2332 
2333   int Srcs[] = {-1, -1};
2334   for (int i = 0; i != Size; ++i) {
2335     // Ignore undef elements.
2336     if (Mask[i] < 0)
2337       continue;
2338 
2339     // Is this an even or odd element.
2340     int Pol = i % 2;
2341 
2342     // Ensure we consistently use the same source for this element polarity.
2343     int Src = Mask[i] / Size;
2344     if (Srcs[Pol] < 0)
2345       Srcs[Pol] = Src;
2346     if (Srcs[Pol] != Src)
2347       return false;
2348 
2349     // Make sure the element within the source is appropriate for this element
2350     // in the destination.
2351     int Elt = Mask[i] % Size;
2352     if (Elt != i / 2)
2353       return false;
2354   }
2355 
2356   // We need to find a source for each polarity and they can't be the same.
2357   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2358     return false;
2359 
2360   // Swap the sources if the second source was in the even polarity.
2361   SwapSources = Srcs[0] > Srcs[1];
2362 
2363   return true;
2364 }
2365 
2366 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2367 /// and then extract the original number of elements from the rotated result.
2368 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2369 /// returned rotation amount is for a rotate right, where elements move from
2370 /// higher elements to lower elements. \p LoSrc indicates the first source
2371 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2372 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2373 /// 0 or 1 if a rotation is found.
2374 ///
2375 /// NOTE: We talk about rotate to the right which matches how bit shift and
2376 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2377 /// and the table below write vectors with the lowest elements on the left.
2378 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2379   int Size = Mask.size();
2380 
2381   // We need to detect various ways of spelling a rotation:
2382   //   [11, 12, 13, 14, 15,  0,  1,  2]
2383   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2384   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2385   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2386   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2387   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2388   int Rotation = 0;
2389   LoSrc = -1;
2390   HiSrc = -1;
2391   for (int i = 0; i != Size; ++i) {
2392     int M = Mask[i];
2393     if (M < 0)
2394       continue;
2395 
2396     // Determine where a rotate vector would have started.
2397     int StartIdx = i - (M % Size);
2398     // The identity rotation isn't interesting, stop.
2399     if (StartIdx == 0)
2400       return -1;
2401 
2402     // If we found the tail of a vector the rotation must be the missing
2403     // front. If we found the head of a vector, it must be how much of the
2404     // head.
2405     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2406 
2407     if (Rotation == 0)
2408       Rotation = CandidateRotation;
2409     else if (Rotation != CandidateRotation)
2410       // The rotations don't match, so we can't match this mask.
2411       return -1;
2412 
2413     // Compute which value this mask is pointing at.
2414     int MaskSrc = M < Size ? 0 : 1;
2415 
2416     // Compute which of the two target values this index should be assigned to.
2417     // This reflects whether the high elements are remaining or the low elemnts
2418     // are remaining.
2419     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2420 
2421     // Either set up this value if we've not encountered it before, or check
2422     // that it remains consistent.
2423     if (TargetSrc < 0)
2424       TargetSrc = MaskSrc;
2425     else if (TargetSrc != MaskSrc)
2426       // This may be a rotation, but it pulls from the inputs in some
2427       // unsupported interleaving.
2428       return -1;
2429   }
2430 
2431   // Check that we successfully analyzed the mask, and normalize the results.
2432   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2433   assert((LoSrc >= 0 || HiSrc >= 0) &&
2434          "Failed to find a rotated input vector!");
2435 
2436   return Rotation;
2437 }
2438 
2439 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2440                                    const RISCVSubtarget &Subtarget) {
2441   SDValue V1 = Op.getOperand(0);
2442   SDValue V2 = Op.getOperand(1);
2443   SDLoc DL(Op);
2444   MVT XLenVT = Subtarget.getXLenVT();
2445   MVT VT = Op.getSimpleValueType();
2446   unsigned NumElts = VT.getVectorNumElements();
2447   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2448 
2449   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2450 
2451   SDValue TrueMask, VL;
2452   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2453 
2454   if (SVN->isSplat()) {
2455     const int Lane = SVN->getSplatIndex();
2456     if (Lane >= 0) {
2457       MVT SVT = VT.getVectorElementType();
2458 
2459       // Turn splatted vector load into a strided load with an X0 stride.
2460       SDValue V = V1;
2461       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2462       // with undef.
2463       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2464       int Offset = Lane;
2465       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2466         int OpElements =
2467             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2468         V = V.getOperand(Offset / OpElements);
2469         Offset %= OpElements;
2470       }
2471 
2472       // We need to ensure the load isn't atomic or volatile.
2473       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2474         auto *Ld = cast<LoadSDNode>(V);
2475         Offset *= SVT.getStoreSize();
2476         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2477                                                    TypeSize::Fixed(Offset), DL);
2478 
2479         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2480         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2481           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2482           SDValue IntID =
2483               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2484           SDValue Ops[] = {Ld->getChain(),
2485                            IntID,
2486                            DAG.getUNDEF(ContainerVT),
2487                            NewAddr,
2488                            DAG.getRegister(RISCV::X0, XLenVT),
2489                            VL};
2490           SDValue NewLoad = DAG.getMemIntrinsicNode(
2491               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2492               DAG.getMachineFunction().getMachineMemOperand(
2493                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2494           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2495           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2496         }
2497 
2498         // Otherwise use a scalar load and splat. This will give the best
2499         // opportunity to fold a splat into the operation. ISel can turn it into
2500         // the x0 strided load if we aren't able to fold away the select.
2501         if (SVT.isFloatingPoint())
2502           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2503                           Ld->getPointerInfo().getWithOffset(Offset),
2504                           Ld->getOriginalAlign(),
2505                           Ld->getMemOperand()->getFlags());
2506         else
2507           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2508                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2509                              Ld->getOriginalAlign(),
2510                              Ld->getMemOperand()->getFlags());
2511         DAG.makeEquivalentMemoryOrdering(Ld, V);
2512 
2513         unsigned Opc =
2514             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2515         SDValue Splat =
2516             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2517         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2518       }
2519 
2520       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2521       assert(Lane < (int)NumElts && "Unexpected lane!");
2522       SDValue Gather =
2523           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2524                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2525       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2526     }
2527   }
2528 
2529   ArrayRef<int> Mask = SVN->getMask();
2530 
2531   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2532   // be undef which can be handled with a single SLIDEDOWN/UP.
2533   int LoSrc, HiSrc;
2534   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2535   if (Rotation > 0) {
2536     SDValue LoV, HiV;
2537     if (LoSrc >= 0) {
2538       LoV = LoSrc == 0 ? V1 : V2;
2539       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2540     }
2541     if (HiSrc >= 0) {
2542       HiV = HiSrc == 0 ? V1 : V2;
2543       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2544     }
2545 
2546     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2547     // to slide LoV up by (NumElts - Rotation).
2548     unsigned InvRotate = NumElts - Rotation;
2549 
2550     SDValue Res = DAG.getUNDEF(ContainerVT);
2551     if (HiV) {
2552       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2553       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2554       // causes multiple vsetvlis in some test cases such as lowering
2555       // reduce.mul
2556       SDValue DownVL = VL;
2557       if (LoV)
2558         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2559       Res =
2560           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2561                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2562     }
2563     if (LoV)
2564       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2565                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2566 
2567     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2568   }
2569 
2570   // Detect an interleave shuffle and lower to
2571   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2572   bool SwapSources;
2573   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2574     // Swap sources if needed.
2575     if (SwapSources)
2576       std::swap(V1, V2);
2577 
2578     // Extract the lower half of the vectors.
2579     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2580     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2581                      DAG.getConstant(0, DL, XLenVT));
2582     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2583                      DAG.getConstant(0, DL, XLenVT));
2584 
2585     // Double the element width and halve the number of elements in an int type.
2586     unsigned EltBits = VT.getScalarSizeInBits();
2587     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2588     MVT WideIntVT =
2589         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2590     // Convert this to a scalable vector. We need to base this on the
2591     // destination size to ensure there's always a type with a smaller LMUL.
2592     MVT WideIntContainerVT =
2593         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2594 
2595     // Convert sources to scalable vectors with the same element count as the
2596     // larger type.
2597     MVT HalfContainerVT = MVT::getVectorVT(
2598         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2599     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2600     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2601 
2602     // Cast sources to integer.
2603     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2604     MVT IntHalfVT =
2605         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2606     V1 = DAG.getBitcast(IntHalfVT, V1);
2607     V2 = DAG.getBitcast(IntHalfVT, V2);
2608 
2609     // Freeze V2 since we use it twice and we need to be sure that the add and
2610     // multiply see the same value.
2611     V2 = DAG.getFreeze(V2);
2612 
2613     // Recreate TrueMask using the widened type's element count.
2614     TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
2615 
2616     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2617     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2618                               V2, TrueMask, VL);
2619     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2620     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2621                                      DAG.getUNDEF(IntHalfVT),
2622                                      DAG.getAllOnesConstant(DL, XLenVT));
2623     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2624                                    V2, Multiplier, TrueMask, VL);
2625     // Add the new copies to our previous addition giving us 2^eltbits copies of
2626     // V2. This is equivalent to shifting V2 left by eltbits. This should
2627     // combine with the vwmulu.vv above to form vwmaccu.vv.
2628     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2629                       TrueMask, VL);
2630     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2631     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2632     // vector VT.
2633     ContainerVT =
2634         MVT::getVectorVT(VT.getVectorElementType(),
2635                          WideIntContainerVT.getVectorElementCount() * 2);
2636     Add = DAG.getBitcast(ContainerVT, Add);
2637     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2638   }
2639 
2640   // Detect shuffles which can be re-expressed as vector selects; these are
2641   // shuffles in which each element in the destination is taken from an element
2642   // at the corresponding index in either source vectors.
2643   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2644     int MaskIndex = MaskIdx.value();
2645     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2646   });
2647 
2648   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2649 
2650   SmallVector<SDValue> MaskVals;
2651   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2652   // merged with a second vrgather.
2653   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2654 
2655   // By default we preserve the original operand order, and use a mask to
2656   // select LHS as true and RHS as false. However, since RVV vector selects may
2657   // feature splats but only on the LHS, we may choose to invert our mask and
2658   // instead select between RHS and LHS.
2659   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2660   bool InvertMask = IsSelect == SwapOps;
2661 
2662   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2663   // half.
2664   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2665 
2666   // Now construct the mask that will be used by the vselect or blended
2667   // vrgather operation. For vrgathers, construct the appropriate indices into
2668   // each vector.
2669   for (int MaskIndex : Mask) {
2670     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2671     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2672     if (!IsSelect) {
2673       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2674       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2675                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2676                                      : DAG.getUNDEF(XLenVT));
2677       GatherIndicesRHS.push_back(
2678           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2679                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2680       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2681         ++LHSIndexCounts[MaskIndex];
2682       if (!IsLHSOrUndefIndex)
2683         ++RHSIndexCounts[MaskIndex - NumElts];
2684     }
2685   }
2686 
2687   if (SwapOps) {
2688     std::swap(V1, V2);
2689     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2690   }
2691 
2692   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2693   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2694   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2695 
2696   if (IsSelect)
2697     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2698 
2699   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2700     // On such a large vector we're unable to use i8 as the index type.
2701     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2702     // may involve vector splitting if we're already at LMUL=8, or our
2703     // user-supplied maximum fixed-length LMUL.
2704     return SDValue();
2705   }
2706 
2707   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2708   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2709   MVT IndexVT = VT.changeTypeToInteger();
2710   // Since we can't introduce illegal index types at this stage, use i16 and
2711   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2712   // than XLenVT.
2713   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2714     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2715     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2716   }
2717 
2718   MVT IndexContainerVT =
2719       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2720 
2721   SDValue Gather;
2722   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2723   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2724   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2725     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2726                               Subtarget);
2727   } else {
2728     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2729     // If only one index is used, we can use a "splat" vrgather.
2730     // TODO: We can splat the most-common index and fix-up any stragglers, if
2731     // that's beneficial.
2732     if (LHSIndexCounts.size() == 1) {
2733       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2734       Gather =
2735           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2736                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2737     } else {
2738       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2739       LHSIndices =
2740           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2741 
2742       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2743                            TrueMask, VL);
2744     }
2745   }
2746 
2747   // If a second vector operand is used by this shuffle, blend it in with an
2748   // additional vrgather.
2749   if (!V2.isUndef()) {
2750     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2751     // If only one index is used, we can use a "splat" vrgather.
2752     // TODO: We can splat the most-common index and fix-up any stragglers, if
2753     // that's beneficial.
2754     if (RHSIndexCounts.size() == 1) {
2755       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2756       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2757                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2758     } else {
2759       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2760       RHSIndices =
2761           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2762       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2763                        VL);
2764     }
2765 
2766     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2767     SelectMask =
2768         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2769 
2770     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2771                          Gather, VL);
2772   }
2773 
2774   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2775 }
2776 
2777 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2778   // Support splats for any type. These should type legalize well.
2779   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2780     return true;
2781 
2782   // Only support legal VTs for other shuffles for now.
2783   if (!isTypeLegal(VT))
2784     return false;
2785 
2786   MVT SVT = VT.getSimpleVT();
2787 
2788   bool SwapSources;
2789   int LoSrc, HiSrc;
2790   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2791          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2792 }
2793 
2794 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2795                                      SDLoc DL, SelectionDAG &DAG,
2796                                      const RISCVSubtarget &Subtarget) {
2797   if (VT.isScalableVector())
2798     return DAG.getFPExtendOrRound(Op, DL, VT);
2799   assert(VT.isFixedLengthVector() &&
2800          "Unexpected value type for RVV FP extend/round lowering");
2801   SDValue Mask, VL;
2802   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2803   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2804                         ? RISCVISD::FP_EXTEND_VL
2805                         : RISCVISD::FP_ROUND_VL;
2806   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2807 }
2808 
2809 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2810 // the exponent.
2811 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2812   MVT VT = Op.getSimpleValueType();
2813   unsigned EltSize = VT.getScalarSizeInBits();
2814   SDValue Src = Op.getOperand(0);
2815   SDLoc DL(Op);
2816 
2817   // We need a FP type that can represent the value.
2818   // TODO: Use f16 for i8 when possible?
2819   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2820   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2821 
2822   // Legal types should have been checked in the RISCVTargetLowering
2823   // constructor.
2824   // TODO: Splitting may make sense in some cases.
2825   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2826          "Expected legal float type!");
2827 
2828   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2829   // The trailing zero count is equal to log2 of this single bit value.
2830   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2831     SDValue Neg =
2832         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2833     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2834   }
2835 
2836   // We have a legal FP type, convert to it.
2837   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2838   // Bitcast to integer and shift the exponent to the LSB.
2839   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2840   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2841   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2842   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2843                               DAG.getConstant(ShiftAmt, DL, IntVT));
2844   // Truncate back to original type to allow vnsrl.
2845   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2846   // The exponent contains log2 of the value in biased form.
2847   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2848 
2849   // For trailing zeros, we just need to subtract the bias.
2850   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2851     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2852                        DAG.getConstant(ExponentBias, DL, VT));
2853 
2854   // For leading zeros, we need to remove the bias and convert from log2 to
2855   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2856   unsigned Adjust = ExponentBias + (EltSize - 1);
2857   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2858 }
2859 
2860 // While RVV has alignment restrictions, we should always be able to load as a
2861 // legal equivalently-sized byte-typed vector instead. This method is
2862 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2863 // the load is already correctly-aligned, it returns SDValue().
2864 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2865                                                     SelectionDAG &DAG) const {
2866   auto *Load = cast<LoadSDNode>(Op);
2867   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2868 
2869   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2870                                      Load->getMemoryVT(),
2871                                      *Load->getMemOperand()))
2872     return SDValue();
2873 
2874   SDLoc DL(Op);
2875   MVT VT = Op.getSimpleValueType();
2876   unsigned EltSizeBits = VT.getScalarSizeInBits();
2877   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2878          "Unexpected unaligned RVV load type");
2879   MVT NewVT =
2880       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2881   assert(NewVT.isValid() &&
2882          "Expecting equally-sized RVV vector types to be legal");
2883   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2884                           Load->getPointerInfo(), Load->getOriginalAlign(),
2885                           Load->getMemOperand()->getFlags());
2886   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2887 }
2888 
2889 // While RVV has alignment restrictions, we should always be able to store as a
2890 // legal equivalently-sized byte-typed vector instead. This method is
2891 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2892 // returns SDValue() if the store is already correctly aligned.
2893 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2894                                                      SelectionDAG &DAG) const {
2895   auto *Store = cast<StoreSDNode>(Op);
2896   assert(Store && Store->getValue().getValueType().isVector() &&
2897          "Expected vector store");
2898 
2899   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2900                                      Store->getMemoryVT(),
2901                                      *Store->getMemOperand()))
2902     return SDValue();
2903 
2904   SDLoc DL(Op);
2905   SDValue StoredVal = Store->getValue();
2906   MVT VT = StoredVal.getSimpleValueType();
2907   unsigned EltSizeBits = VT.getScalarSizeInBits();
2908   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2909          "Unexpected unaligned RVV store type");
2910   MVT NewVT =
2911       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2912   assert(NewVT.isValid() &&
2913          "Expecting equally-sized RVV vector types to be legal");
2914   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2915   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2916                       Store->getPointerInfo(), Store->getOriginalAlign(),
2917                       Store->getMemOperand()->getFlags());
2918 }
2919 
2920 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2921                                             SelectionDAG &DAG) const {
2922   switch (Op.getOpcode()) {
2923   default:
2924     report_fatal_error("unimplemented operand");
2925   case ISD::GlobalAddress:
2926     return lowerGlobalAddress(Op, DAG);
2927   case ISD::BlockAddress:
2928     return lowerBlockAddress(Op, DAG);
2929   case ISD::ConstantPool:
2930     return lowerConstantPool(Op, DAG);
2931   case ISD::JumpTable:
2932     return lowerJumpTable(Op, DAG);
2933   case ISD::GlobalTLSAddress:
2934     return lowerGlobalTLSAddress(Op, DAG);
2935   case ISD::SELECT:
2936     return lowerSELECT(Op, DAG);
2937   case ISD::BRCOND:
2938     return lowerBRCOND(Op, DAG);
2939   case ISD::VASTART:
2940     return lowerVASTART(Op, DAG);
2941   case ISD::FRAMEADDR:
2942     return lowerFRAMEADDR(Op, DAG);
2943   case ISD::RETURNADDR:
2944     return lowerRETURNADDR(Op, DAG);
2945   case ISD::SHL_PARTS:
2946     return lowerShiftLeftParts(Op, DAG);
2947   case ISD::SRA_PARTS:
2948     return lowerShiftRightParts(Op, DAG, true);
2949   case ISD::SRL_PARTS:
2950     return lowerShiftRightParts(Op, DAG, false);
2951   case ISD::BITCAST: {
2952     SDLoc DL(Op);
2953     EVT VT = Op.getValueType();
2954     SDValue Op0 = Op.getOperand(0);
2955     EVT Op0VT = Op0.getValueType();
2956     MVT XLenVT = Subtarget.getXLenVT();
2957     if (VT.isFixedLengthVector()) {
2958       // We can handle fixed length vector bitcasts with a simple replacement
2959       // in isel.
2960       if (Op0VT.isFixedLengthVector())
2961         return Op;
2962       // When bitcasting from scalar to fixed-length vector, insert the scalar
2963       // into a one-element vector of the result type, and perform a vector
2964       // bitcast.
2965       if (!Op0VT.isVector()) {
2966         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2967         if (!isTypeLegal(BVT))
2968           return SDValue();
2969         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2970                                               DAG.getUNDEF(BVT), Op0,
2971                                               DAG.getConstant(0, DL, XLenVT)));
2972       }
2973       return SDValue();
2974     }
2975     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2976     // thus: bitcast the vector to a one-element vector type whose element type
2977     // is the same as the result type, and extract the first element.
2978     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2979       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2980       if (!isTypeLegal(BVT))
2981         return SDValue();
2982       SDValue BVec = DAG.getBitcast(BVT, Op0);
2983       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2984                          DAG.getConstant(0, DL, XLenVT));
2985     }
2986     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2987       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2988       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2989       return FPConv;
2990     }
2991     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2992         Subtarget.hasStdExtF()) {
2993       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2994       SDValue FPConv =
2995           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2996       return FPConv;
2997     }
2998     return SDValue();
2999   }
3000   case ISD::INTRINSIC_WO_CHAIN:
3001     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3002   case ISD::INTRINSIC_W_CHAIN:
3003     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3004   case ISD::INTRINSIC_VOID:
3005     return LowerINTRINSIC_VOID(Op, DAG);
3006   case ISD::BSWAP:
3007   case ISD::BITREVERSE: {
3008     MVT VT = Op.getSimpleValueType();
3009     SDLoc DL(Op);
3010     if (Subtarget.hasStdExtZbp()) {
3011       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3012       // Start with the maximum immediate value which is the bitwidth - 1.
3013       unsigned Imm = VT.getSizeInBits() - 1;
3014       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3015       if (Op.getOpcode() == ISD::BSWAP)
3016         Imm &= ~0x7U;
3017       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3018                          DAG.getConstant(Imm, DL, VT));
3019     }
3020     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3021     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3022     // Expand bitreverse to a bswap(rev8) followed by brev8.
3023     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3024     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3025     // as brev8 by an isel pattern.
3026     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3027                        DAG.getConstant(7, DL, VT));
3028   }
3029   case ISD::FSHL:
3030   case ISD::FSHR: {
3031     MVT VT = Op.getSimpleValueType();
3032     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3033     SDLoc DL(Op);
3034     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3035     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3036     // accidentally setting the extra bit.
3037     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3038     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3039                                 DAG.getConstant(ShAmtWidth, DL, VT));
3040     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3041     // instruction use different orders. fshl will return its first operand for
3042     // shift of zero, fshr will return its second operand. fsl and fsr both
3043     // return rs1 so the ISD nodes need to have different operand orders.
3044     // Shift amount is in rs2.
3045     SDValue Op0 = Op.getOperand(0);
3046     SDValue Op1 = Op.getOperand(1);
3047     unsigned Opc = RISCVISD::FSL;
3048     if (Op.getOpcode() == ISD::FSHR) {
3049       std::swap(Op0, Op1);
3050       Opc = RISCVISD::FSR;
3051     }
3052     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3053   }
3054   case ISD::TRUNCATE:
3055     // Only custom-lower vector truncates
3056     if (!Op.getSimpleValueType().isVector())
3057       return Op;
3058     return lowerVectorTruncLike(Op, DAG);
3059   case ISD::ANY_EXTEND:
3060   case ISD::ZERO_EXTEND:
3061     if (Op.getOperand(0).getValueType().isVector() &&
3062         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3063       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3064     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3065   case ISD::SIGN_EXTEND:
3066     if (Op.getOperand(0).getValueType().isVector() &&
3067         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3068       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3069     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3070   case ISD::SPLAT_VECTOR_PARTS:
3071     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3072   case ISD::INSERT_VECTOR_ELT:
3073     return lowerINSERT_VECTOR_ELT(Op, DAG);
3074   case ISD::EXTRACT_VECTOR_ELT:
3075     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3076   case ISD::VSCALE: {
3077     MVT VT = Op.getSimpleValueType();
3078     SDLoc DL(Op);
3079     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3080     // We define our scalable vector types for lmul=1 to use a 64 bit known
3081     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3082     // vscale as VLENB / 8.
3083     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3084     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3085       report_fatal_error("Support for VLEN==32 is incomplete.");
3086     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3087       // We assume VLENB is a multiple of 8. We manually choose the best shift
3088       // here because SimplifyDemandedBits isn't always able to simplify it.
3089       uint64_t Val = Op.getConstantOperandVal(0);
3090       if (isPowerOf2_64(Val)) {
3091         uint64_t Log2 = Log2_64(Val);
3092         if (Log2 < 3)
3093           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3094                              DAG.getConstant(3 - Log2, DL, VT));
3095         if (Log2 > 3)
3096           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3097                              DAG.getConstant(Log2 - 3, DL, VT));
3098         return VLENB;
3099       }
3100       // If the multiplier is a multiple of 8, scale it down to avoid needing
3101       // to shift the VLENB value.
3102       if ((Val % 8) == 0)
3103         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3104                            DAG.getConstant(Val / 8, DL, VT));
3105     }
3106 
3107     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3108                                  DAG.getConstant(3, DL, VT));
3109     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3110   }
3111   case ISD::FPOWI: {
3112     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3113     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3114     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3115         Op.getOperand(1).getValueType() == MVT::i32) {
3116       SDLoc DL(Op);
3117       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3118       SDValue Powi =
3119           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3120       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3121                          DAG.getIntPtrConstant(0, DL));
3122     }
3123     return SDValue();
3124   }
3125   case ISD::FP_EXTEND: {
3126     // RVV can only do fp_extend to types double the size as the source. We
3127     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3128     // via f32.
3129     SDLoc DL(Op);
3130     MVT VT = Op.getSimpleValueType();
3131     SDValue Src = Op.getOperand(0);
3132     MVT SrcVT = Src.getSimpleValueType();
3133 
3134     // Prepare any fixed-length vector operands.
3135     MVT ContainerVT = VT;
3136     if (SrcVT.isFixedLengthVector()) {
3137       ContainerVT = getContainerForFixedLengthVector(VT);
3138       MVT SrcContainerVT =
3139           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3140       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3141     }
3142 
3143     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3144         SrcVT.getVectorElementType() != MVT::f16) {
3145       // For scalable vectors, we only need to close the gap between
3146       // vXf16->vXf64.
3147       if (!VT.isFixedLengthVector())
3148         return Op;
3149       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3150       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3151       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3152     }
3153 
3154     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3155     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3156     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3157         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3158 
3159     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3160                                            DL, DAG, Subtarget);
3161     if (VT.isFixedLengthVector())
3162       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3163     return Extend;
3164   }
3165   case ISD::FP_ROUND:
3166     if (!Op.getValueType().isVector())
3167       return Op;
3168     return lowerVectorFPRoundLike(Op, DAG);
3169   case ISD::FP_TO_SINT:
3170   case ISD::FP_TO_UINT:
3171   case ISD::SINT_TO_FP:
3172   case ISD::UINT_TO_FP: {
3173     // RVV can only do fp<->int conversions to types half/double the size as
3174     // the source. We custom-lower any conversions that do two hops into
3175     // sequences.
3176     MVT VT = Op.getSimpleValueType();
3177     if (!VT.isVector())
3178       return Op;
3179     SDLoc DL(Op);
3180     SDValue Src = Op.getOperand(0);
3181     MVT EltVT = VT.getVectorElementType();
3182     MVT SrcVT = Src.getSimpleValueType();
3183     MVT SrcEltVT = SrcVT.getVectorElementType();
3184     unsigned EltSize = EltVT.getSizeInBits();
3185     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3186     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3187            "Unexpected vector element types");
3188 
3189     bool IsInt2FP = SrcEltVT.isInteger();
3190     // Widening conversions
3191     if (EltSize > (2 * SrcEltSize)) {
3192       if (IsInt2FP) {
3193         // Do a regular integer sign/zero extension then convert to float.
3194         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3195                                       VT.getVectorElementCount());
3196         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3197                                  ? ISD::ZERO_EXTEND
3198                                  : ISD::SIGN_EXTEND;
3199         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3200         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3201       }
3202       // FP2Int
3203       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3204       // Do one doubling fp_extend then complete the operation by converting
3205       // to int.
3206       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3207       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3208       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3209     }
3210 
3211     // Narrowing conversions
3212     if (SrcEltSize > (2 * EltSize)) {
3213       if (IsInt2FP) {
3214         // One narrowing int_to_fp, then an fp_round.
3215         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3216         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3217         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3218         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3219       }
3220       // FP2Int
3221       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3222       // representable by the integer, the result is poison.
3223       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3224                                     VT.getVectorElementCount());
3225       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3226       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3227     }
3228 
3229     // Scalable vectors can exit here. Patterns will handle equally-sized
3230     // conversions halving/doubling ones.
3231     if (!VT.isFixedLengthVector())
3232       return Op;
3233 
3234     // For fixed-length vectors we lower to a custom "VL" node.
3235     unsigned RVVOpc = 0;
3236     switch (Op.getOpcode()) {
3237     default:
3238       llvm_unreachable("Impossible opcode");
3239     case ISD::FP_TO_SINT:
3240       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3241       break;
3242     case ISD::FP_TO_UINT:
3243       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3244       break;
3245     case ISD::SINT_TO_FP:
3246       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3247       break;
3248     case ISD::UINT_TO_FP:
3249       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3250       break;
3251     }
3252 
3253     MVT ContainerVT, SrcContainerVT;
3254     // Derive the reference container type from the larger vector type.
3255     if (SrcEltSize > EltSize) {
3256       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3257       ContainerVT =
3258           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3259     } else {
3260       ContainerVT = getContainerForFixedLengthVector(VT);
3261       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3262     }
3263 
3264     SDValue Mask, VL;
3265     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3266 
3267     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3268     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3269     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3270   }
3271   case ISD::FP_TO_SINT_SAT:
3272   case ISD::FP_TO_UINT_SAT:
3273     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3274   case ISD::FTRUNC:
3275   case ISD::FCEIL:
3276   case ISD::FFLOOR:
3277     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3278   case ISD::FROUND:
3279     return lowerFROUND(Op, DAG);
3280   case ISD::VECREDUCE_ADD:
3281   case ISD::VECREDUCE_UMAX:
3282   case ISD::VECREDUCE_SMAX:
3283   case ISD::VECREDUCE_UMIN:
3284   case ISD::VECREDUCE_SMIN:
3285     return lowerVECREDUCE(Op, DAG);
3286   case ISD::VECREDUCE_AND:
3287   case ISD::VECREDUCE_OR:
3288   case ISD::VECREDUCE_XOR:
3289     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3290       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3291     return lowerVECREDUCE(Op, DAG);
3292   case ISD::VECREDUCE_FADD:
3293   case ISD::VECREDUCE_SEQ_FADD:
3294   case ISD::VECREDUCE_FMIN:
3295   case ISD::VECREDUCE_FMAX:
3296     return lowerFPVECREDUCE(Op, DAG);
3297   case ISD::VP_REDUCE_ADD:
3298   case ISD::VP_REDUCE_UMAX:
3299   case ISD::VP_REDUCE_SMAX:
3300   case ISD::VP_REDUCE_UMIN:
3301   case ISD::VP_REDUCE_SMIN:
3302   case ISD::VP_REDUCE_FADD:
3303   case ISD::VP_REDUCE_SEQ_FADD:
3304   case ISD::VP_REDUCE_FMIN:
3305   case ISD::VP_REDUCE_FMAX:
3306     return lowerVPREDUCE(Op, DAG);
3307   case ISD::VP_REDUCE_AND:
3308   case ISD::VP_REDUCE_OR:
3309   case ISD::VP_REDUCE_XOR:
3310     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3311       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3312     return lowerVPREDUCE(Op, DAG);
3313   case ISD::INSERT_SUBVECTOR:
3314     return lowerINSERT_SUBVECTOR(Op, DAG);
3315   case ISD::EXTRACT_SUBVECTOR:
3316     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3317   case ISD::STEP_VECTOR:
3318     return lowerSTEP_VECTOR(Op, DAG);
3319   case ISD::VECTOR_REVERSE:
3320     return lowerVECTOR_REVERSE(Op, DAG);
3321   case ISD::VECTOR_SPLICE:
3322     return lowerVECTOR_SPLICE(Op, DAG);
3323   case ISD::BUILD_VECTOR:
3324     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3325   case ISD::SPLAT_VECTOR:
3326     if (Op.getValueType().getVectorElementType() == MVT::i1)
3327       return lowerVectorMaskSplat(Op, DAG);
3328     return SDValue();
3329   case ISD::VECTOR_SHUFFLE:
3330     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3331   case ISD::CONCAT_VECTORS: {
3332     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3333     // better than going through the stack, as the default expansion does.
3334     SDLoc DL(Op);
3335     MVT VT = Op.getSimpleValueType();
3336     unsigned NumOpElts =
3337         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3338     SDValue Vec = DAG.getUNDEF(VT);
3339     for (const auto &OpIdx : enumerate(Op->ops())) {
3340       SDValue SubVec = OpIdx.value();
3341       // Don't insert undef subvectors.
3342       if (SubVec.isUndef())
3343         continue;
3344       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3345                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3346     }
3347     return Vec;
3348   }
3349   case ISD::LOAD:
3350     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3351       return V;
3352     if (Op.getValueType().isFixedLengthVector())
3353       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3354     return Op;
3355   case ISD::STORE:
3356     if (auto V = expandUnalignedRVVStore(Op, DAG))
3357       return V;
3358     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3359       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3360     return Op;
3361   case ISD::MLOAD:
3362   case ISD::VP_LOAD:
3363     return lowerMaskedLoad(Op, DAG);
3364   case ISD::MSTORE:
3365   case ISD::VP_STORE:
3366     return lowerMaskedStore(Op, DAG);
3367   case ISD::SETCC:
3368     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3369   case ISD::ADD:
3370     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3371   case ISD::SUB:
3372     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3373   case ISD::MUL:
3374     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3375   case ISD::MULHS:
3376     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3377   case ISD::MULHU:
3378     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3379   case ISD::AND:
3380     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3381                                               RISCVISD::AND_VL);
3382   case ISD::OR:
3383     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3384                                               RISCVISD::OR_VL);
3385   case ISD::XOR:
3386     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3387                                               RISCVISD::XOR_VL);
3388   case ISD::SDIV:
3389     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3390   case ISD::SREM:
3391     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3392   case ISD::UDIV:
3393     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3394   case ISD::UREM:
3395     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3396   case ISD::SHL:
3397   case ISD::SRA:
3398   case ISD::SRL:
3399     if (Op.getSimpleValueType().isFixedLengthVector())
3400       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3401     // This can be called for an i32 shift amount that needs to be promoted.
3402     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3403            "Unexpected custom legalisation");
3404     return SDValue();
3405   case ISD::SADDSAT:
3406     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3407   case ISD::UADDSAT:
3408     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3409   case ISD::SSUBSAT:
3410     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3411   case ISD::USUBSAT:
3412     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3413   case ISD::FADD:
3414     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3415   case ISD::FSUB:
3416     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3417   case ISD::FMUL:
3418     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3419   case ISD::FDIV:
3420     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3421   case ISD::FNEG:
3422     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3423   case ISD::FABS:
3424     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3425   case ISD::FSQRT:
3426     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3427   case ISD::FMA:
3428     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3429   case ISD::SMIN:
3430     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3431   case ISD::SMAX:
3432     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3433   case ISD::UMIN:
3434     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3435   case ISD::UMAX:
3436     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3437   case ISD::FMINNUM:
3438     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3439   case ISD::FMAXNUM:
3440     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3441   case ISD::ABS:
3442     return lowerABS(Op, DAG);
3443   case ISD::CTLZ_ZERO_UNDEF:
3444   case ISD::CTTZ_ZERO_UNDEF:
3445     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3446   case ISD::VSELECT:
3447     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3448   case ISD::FCOPYSIGN:
3449     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3450   case ISD::MGATHER:
3451   case ISD::VP_GATHER:
3452     return lowerMaskedGather(Op, DAG);
3453   case ISD::MSCATTER:
3454   case ISD::VP_SCATTER:
3455     return lowerMaskedScatter(Op, DAG);
3456   case ISD::FLT_ROUNDS_:
3457     return lowerGET_ROUNDING(Op, DAG);
3458   case ISD::SET_ROUNDING:
3459     return lowerSET_ROUNDING(Op, DAG);
3460   case ISD::VP_SELECT:
3461     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3462   case ISD::VP_MERGE:
3463     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3464   case ISD::VP_ADD:
3465     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3466   case ISD::VP_SUB:
3467     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3468   case ISD::VP_MUL:
3469     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3470   case ISD::VP_SDIV:
3471     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3472   case ISD::VP_UDIV:
3473     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3474   case ISD::VP_SREM:
3475     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3476   case ISD::VP_UREM:
3477     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3478   case ISD::VP_AND:
3479     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3480   case ISD::VP_OR:
3481     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3482   case ISD::VP_XOR:
3483     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3484   case ISD::VP_ASHR:
3485     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3486   case ISD::VP_LSHR:
3487     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3488   case ISD::VP_SHL:
3489     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3490   case ISD::VP_FADD:
3491     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3492   case ISD::VP_FSUB:
3493     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3494   case ISD::VP_FMUL:
3495     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3496   case ISD::VP_FDIV:
3497     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3498   case ISD::VP_FNEG:
3499     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3500   case ISD::VP_FMA:
3501     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3502   case ISD::VP_SEXT:
3503   case ISD::VP_ZEXT:
3504     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3505       return lowerVPExtMaskOp(Op, DAG);
3506     return lowerVPOp(Op, DAG,
3507                      Op.getOpcode() == ISD::VP_SEXT ? RISCVISD::VSEXT_VL
3508                                                     : RISCVISD::VZEXT_VL);
3509   case ISD::VP_TRUNC:
3510     return lowerVectorTruncLike(Op, DAG);
3511   case ISD::VP_FP_ROUND:
3512     return lowerVectorFPRoundLike(Op, DAG);
3513   case ISD::VP_FPTOSI:
3514     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3515   case ISD::VP_FPTOUI:
3516     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3517   case ISD::VP_SITOFP:
3518     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3519   case ISD::VP_UITOFP:
3520     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3521   case ISD::VP_SETCC:
3522     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3523       return lowerVPSetCCMaskOp(Op, DAG);
3524     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3525   }
3526 }
3527 
3528 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3529                              SelectionDAG &DAG, unsigned Flags) {
3530   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3531 }
3532 
3533 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3534                              SelectionDAG &DAG, unsigned Flags) {
3535   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3536                                    Flags);
3537 }
3538 
3539 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3540                              SelectionDAG &DAG, unsigned Flags) {
3541   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3542                                    N->getOffset(), Flags);
3543 }
3544 
3545 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3546                              SelectionDAG &DAG, unsigned Flags) {
3547   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3548 }
3549 
3550 template <class NodeTy>
3551 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3552                                      bool IsLocal) const {
3553   SDLoc DL(N);
3554   EVT Ty = getPointerTy(DAG.getDataLayout());
3555 
3556   if (isPositionIndependent()) {
3557     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3558     if (IsLocal)
3559       // Use PC-relative addressing to access the symbol. This generates the
3560       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3561       // %pcrel_lo(auipc)).
3562       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3563 
3564     // Use PC-relative addressing to access the GOT for this symbol, then load
3565     // the address from the GOT. This generates the pattern (PseudoLA sym),
3566     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3567     SDValue Load =
3568         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3569     MachineFunction &MF = DAG.getMachineFunction();
3570     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3571         MachinePointerInfo::getGOT(MF),
3572         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3573             MachineMemOperand::MOInvariant,
3574         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3575     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3576     return Load;
3577   }
3578 
3579   switch (getTargetMachine().getCodeModel()) {
3580   default:
3581     report_fatal_error("Unsupported code model for lowering");
3582   case CodeModel::Small: {
3583     // Generate a sequence for accessing addresses within the first 2 GiB of
3584     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3585     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3586     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3587     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3588     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3589   }
3590   case CodeModel::Medium: {
3591     // Generate a sequence for accessing addresses within any 2GiB range within
3592     // the address space. This generates the pattern (PseudoLLA sym), which
3593     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3594     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3595     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3596   }
3597   }
3598 }
3599 
3600 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>(
3601     GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3602 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>(
3603     BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3604 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>(
3605     ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3606 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>(
3607     JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3608 
3609 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3610                                                 SelectionDAG &DAG) const {
3611   SDLoc DL(Op);
3612   EVT Ty = Op.getValueType();
3613   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3614   int64_t Offset = N->getOffset();
3615   MVT XLenVT = Subtarget.getXLenVT();
3616 
3617   const GlobalValue *GV = N->getGlobal();
3618   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3619   SDValue Addr = getAddr(N, DAG, IsLocal);
3620 
3621   // In order to maximise the opportunity for common subexpression elimination,
3622   // emit a separate ADD node for the global address offset instead of folding
3623   // it in the global address node. Later peephole optimisations may choose to
3624   // fold it back in when profitable.
3625   if (Offset != 0)
3626     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3627                        DAG.getConstant(Offset, DL, XLenVT));
3628   return Addr;
3629 }
3630 
3631 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3632                                                SelectionDAG &DAG) const {
3633   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3634 
3635   return getAddr(N, DAG);
3636 }
3637 
3638 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3639                                                SelectionDAG &DAG) const {
3640   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3641 
3642   return getAddr(N, DAG);
3643 }
3644 
3645 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3646                                             SelectionDAG &DAG) const {
3647   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3648 
3649   return getAddr(N, DAG);
3650 }
3651 
3652 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3653                                               SelectionDAG &DAG,
3654                                               bool UseGOT) const {
3655   SDLoc DL(N);
3656   EVT Ty = getPointerTy(DAG.getDataLayout());
3657   const GlobalValue *GV = N->getGlobal();
3658   MVT XLenVT = Subtarget.getXLenVT();
3659 
3660   if (UseGOT) {
3661     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3662     // load the address from the GOT and add the thread pointer. This generates
3663     // the pattern (PseudoLA_TLS_IE sym), which expands to
3664     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3665     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3666     SDValue Load =
3667         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3668     MachineFunction &MF = DAG.getMachineFunction();
3669     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3670         MachinePointerInfo::getGOT(MF),
3671         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3672             MachineMemOperand::MOInvariant,
3673         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3674     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3675 
3676     // Add the thread pointer.
3677     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3678     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3679   }
3680 
3681   // Generate a sequence for accessing the address relative to the thread
3682   // pointer, with the appropriate adjustment for the thread pointer offset.
3683   // This generates the pattern
3684   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3685   SDValue AddrHi =
3686       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3687   SDValue AddrAdd =
3688       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3689   SDValue AddrLo =
3690       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3691 
3692   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3693   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3694   SDValue MNAdd = SDValue(
3695       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3696       0);
3697   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3698 }
3699 
3700 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3701                                                SelectionDAG &DAG) const {
3702   SDLoc DL(N);
3703   EVT Ty = getPointerTy(DAG.getDataLayout());
3704   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3705   const GlobalValue *GV = N->getGlobal();
3706 
3707   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3708   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3709   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3710   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3711   SDValue Load =
3712       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3713 
3714   // Prepare argument list to generate call.
3715   ArgListTy Args;
3716   ArgListEntry Entry;
3717   Entry.Node = Load;
3718   Entry.Ty = CallTy;
3719   Args.push_back(Entry);
3720 
3721   // Setup call to __tls_get_addr.
3722   TargetLowering::CallLoweringInfo CLI(DAG);
3723   CLI.setDebugLoc(DL)
3724       .setChain(DAG.getEntryNode())
3725       .setLibCallee(CallingConv::C, CallTy,
3726                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3727                     std::move(Args));
3728 
3729   return LowerCallTo(CLI).first;
3730 }
3731 
3732 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3733                                                    SelectionDAG &DAG) const {
3734   SDLoc DL(Op);
3735   EVT Ty = Op.getValueType();
3736   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3737   int64_t Offset = N->getOffset();
3738   MVT XLenVT = Subtarget.getXLenVT();
3739 
3740   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3741 
3742   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3743       CallingConv::GHC)
3744     report_fatal_error("In GHC calling convention TLS is not supported");
3745 
3746   SDValue Addr;
3747   switch (Model) {
3748   case TLSModel::LocalExec:
3749     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3750     break;
3751   case TLSModel::InitialExec:
3752     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3753     break;
3754   case TLSModel::LocalDynamic:
3755   case TLSModel::GeneralDynamic:
3756     Addr = getDynamicTLSAddr(N, DAG);
3757     break;
3758   }
3759 
3760   // In order to maximise the opportunity for common subexpression elimination,
3761   // emit a separate ADD node for the global address offset instead of folding
3762   // it in the global address node. Later peephole optimisations may choose to
3763   // fold it back in when profitable.
3764   if (Offset != 0)
3765     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3766                        DAG.getConstant(Offset, DL, XLenVT));
3767   return Addr;
3768 }
3769 
3770 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3771   SDValue CondV = Op.getOperand(0);
3772   SDValue TrueV = Op.getOperand(1);
3773   SDValue FalseV = Op.getOperand(2);
3774   SDLoc DL(Op);
3775   MVT VT = Op.getSimpleValueType();
3776   MVT XLenVT = Subtarget.getXLenVT();
3777 
3778   // Lower vector SELECTs to VSELECTs by splatting the condition.
3779   if (VT.isVector()) {
3780     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3781     SDValue CondSplat = VT.isScalableVector()
3782                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3783                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3784     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3785   }
3786 
3787   // If the result type is XLenVT and CondV is the output of a SETCC node
3788   // which also operated on XLenVT inputs, then merge the SETCC node into the
3789   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3790   // compare+branch instructions. i.e.:
3791   // (select (setcc lhs, rhs, cc), truev, falsev)
3792   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3793   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3794       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3795     SDValue LHS = CondV.getOperand(0);
3796     SDValue RHS = CondV.getOperand(1);
3797     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3798     ISD::CondCode CCVal = CC->get();
3799 
3800     // Special case for a select of 2 constants that have a diffence of 1.
3801     // Normally this is done by DAGCombine, but if the select is introduced by
3802     // type legalization or op legalization, we miss it. Restricting to SETLT
3803     // case for now because that is what signed saturating add/sub need.
3804     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3805     // but we would probably want to swap the true/false values if the condition
3806     // is SETGE/SETLE to avoid an XORI.
3807     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3808         CCVal == ISD::SETLT) {
3809       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3810       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3811       if (TrueVal - 1 == FalseVal)
3812         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3813       if (TrueVal + 1 == FalseVal)
3814         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3815     }
3816 
3817     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3818 
3819     SDValue TargetCC = DAG.getCondCode(CCVal);
3820     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3821     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3822   }
3823 
3824   // Otherwise:
3825   // (select condv, truev, falsev)
3826   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3827   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3828   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3829 
3830   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3831 
3832   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3833 }
3834 
3835 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3836   SDValue CondV = Op.getOperand(1);
3837   SDLoc DL(Op);
3838   MVT XLenVT = Subtarget.getXLenVT();
3839 
3840   if (CondV.getOpcode() == ISD::SETCC &&
3841       CondV.getOperand(0).getValueType() == XLenVT) {
3842     SDValue LHS = CondV.getOperand(0);
3843     SDValue RHS = CondV.getOperand(1);
3844     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3845 
3846     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3847 
3848     SDValue TargetCC = DAG.getCondCode(CCVal);
3849     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3850                        LHS, RHS, TargetCC, Op.getOperand(2));
3851   }
3852 
3853   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3854                      CondV, DAG.getConstant(0, DL, XLenVT),
3855                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3856 }
3857 
3858 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3859   MachineFunction &MF = DAG.getMachineFunction();
3860   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3861 
3862   SDLoc DL(Op);
3863   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3864                                  getPointerTy(MF.getDataLayout()));
3865 
3866   // vastart just stores the address of the VarArgsFrameIndex slot into the
3867   // memory location argument.
3868   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3869   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3870                       MachinePointerInfo(SV));
3871 }
3872 
3873 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3874                                             SelectionDAG &DAG) const {
3875   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3876   MachineFunction &MF = DAG.getMachineFunction();
3877   MachineFrameInfo &MFI = MF.getFrameInfo();
3878   MFI.setFrameAddressIsTaken(true);
3879   Register FrameReg = RI.getFrameRegister(MF);
3880   int XLenInBytes = Subtarget.getXLen() / 8;
3881 
3882   EVT VT = Op.getValueType();
3883   SDLoc DL(Op);
3884   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3885   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3886   while (Depth--) {
3887     int Offset = -(XLenInBytes * 2);
3888     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3889                               DAG.getIntPtrConstant(Offset, DL));
3890     FrameAddr =
3891         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3892   }
3893   return FrameAddr;
3894 }
3895 
3896 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3897                                              SelectionDAG &DAG) const {
3898   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3899   MachineFunction &MF = DAG.getMachineFunction();
3900   MachineFrameInfo &MFI = MF.getFrameInfo();
3901   MFI.setReturnAddressIsTaken(true);
3902   MVT XLenVT = Subtarget.getXLenVT();
3903   int XLenInBytes = Subtarget.getXLen() / 8;
3904 
3905   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3906     return SDValue();
3907 
3908   EVT VT = Op.getValueType();
3909   SDLoc DL(Op);
3910   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3911   if (Depth) {
3912     int Off = -XLenInBytes;
3913     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3914     SDValue Offset = DAG.getConstant(Off, DL, VT);
3915     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3916                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3917                        MachinePointerInfo());
3918   }
3919 
3920   // Return the value of the return address register, marking it an implicit
3921   // live-in.
3922   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3923   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3924 }
3925 
3926 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3927                                                  SelectionDAG &DAG) const {
3928   SDLoc DL(Op);
3929   SDValue Lo = Op.getOperand(0);
3930   SDValue Hi = Op.getOperand(1);
3931   SDValue Shamt = Op.getOperand(2);
3932   EVT VT = Lo.getValueType();
3933 
3934   // if Shamt-XLEN < 0: // Shamt < XLEN
3935   //   Lo = Lo << Shamt
3936   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
3937   // else:
3938   //   Lo = 0
3939   //   Hi = Lo << (Shamt-XLEN)
3940 
3941   SDValue Zero = DAG.getConstant(0, DL, VT);
3942   SDValue One = DAG.getConstant(1, DL, VT);
3943   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3944   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3945   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3946   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3947 
3948   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3949   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3950   SDValue ShiftRightLo =
3951       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3952   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3953   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3954   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3955 
3956   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3957 
3958   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3959   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3960 
3961   SDValue Parts[2] = {Lo, Hi};
3962   return DAG.getMergeValues(Parts, DL);
3963 }
3964 
3965 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3966                                                   bool IsSRA) const {
3967   SDLoc DL(Op);
3968   SDValue Lo = Op.getOperand(0);
3969   SDValue Hi = Op.getOperand(1);
3970   SDValue Shamt = Op.getOperand(2);
3971   EVT VT = Lo.getValueType();
3972 
3973   // SRA expansion:
3974   //   if Shamt-XLEN < 0: // Shamt < XLEN
3975   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3976   //     Hi = Hi >>s Shamt
3977   //   else:
3978   //     Lo = Hi >>s (Shamt-XLEN);
3979   //     Hi = Hi >>s (XLEN-1)
3980   //
3981   // SRL expansion:
3982   //   if Shamt-XLEN < 0: // Shamt < XLEN
3983   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3984   //     Hi = Hi >>u Shamt
3985   //   else:
3986   //     Lo = Hi >>u (Shamt-XLEN);
3987   //     Hi = 0;
3988 
3989   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3990 
3991   SDValue Zero = DAG.getConstant(0, DL, VT);
3992   SDValue One = DAG.getConstant(1, DL, VT);
3993   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3994   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3995   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3996   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3997 
3998   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3999   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4000   SDValue ShiftLeftHi =
4001       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4002   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4003   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4004   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4005   SDValue HiFalse =
4006       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4007 
4008   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4009 
4010   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4011   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4012 
4013   SDValue Parts[2] = {Lo, Hi};
4014   return DAG.getMergeValues(Parts, DL);
4015 }
4016 
4017 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4018 // legal equivalently-sized i8 type, so we can use that as a go-between.
4019 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4020                                                   SelectionDAG &DAG) const {
4021   SDLoc DL(Op);
4022   MVT VT = Op.getSimpleValueType();
4023   SDValue SplatVal = Op.getOperand(0);
4024   // All-zeros or all-ones splats are handled specially.
4025   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4026     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4027     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4028   }
4029   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4030     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4031     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4032   }
4033   MVT XLenVT = Subtarget.getXLenVT();
4034   assert(SplatVal.getValueType() == XLenVT &&
4035          "Unexpected type for i1 splat value");
4036   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4037   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4038                          DAG.getConstant(1, DL, XLenVT));
4039   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4040   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4041   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4042 }
4043 
4044 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4045 // illegal (currently only vXi64 RV32).
4046 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4047 // them to VMV_V_X_VL.
4048 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4049                                                      SelectionDAG &DAG) const {
4050   SDLoc DL(Op);
4051   MVT VecVT = Op.getSimpleValueType();
4052   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4053          "Unexpected SPLAT_VECTOR_PARTS lowering");
4054 
4055   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4056   SDValue Lo = Op.getOperand(0);
4057   SDValue Hi = Op.getOperand(1);
4058 
4059   if (VecVT.isFixedLengthVector()) {
4060     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4061     SDLoc DL(Op);
4062     SDValue Mask, VL;
4063     std::tie(Mask, VL) =
4064         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4065 
4066     SDValue Res =
4067         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4068     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4069   }
4070 
4071   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4072     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4073     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4074     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4075     // node in order to try and match RVV vector/scalar instructions.
4076     if ((LoC >> 31) == HiC)
4077       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4078                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4079   }
4080 
4081   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4082   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4083       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4084       Hi.getConstantOperandVal(1) == 31)
4085     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4086                        DAG.getRegister(RISCV::X0, MVT::i32));
4087 
4088   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4089   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4090                      DAG.getUNDEF(VecVT), Lo, Hi,
4091                      DAG.getRegister(RISCV::X0, MVT::i32));
4092 }
4093 
4094 // Custom-lower extensions from mask vectors by using a vselect either with 1
4095 // for zero/any-extension or -1 for sign-extension:
4096 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4097 // Note that any-extension is lowered identically to zero-extension.
4098 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4099                                                 int64_t ExtTrueVal) const {
4100   SDLoc DL(Op);
4101   MVT VecVT = Op.getSimpleValueType();
4102   SDValue Src = Op.getOperand(0);
4103   // Only custom-lower extensions from mask types
4104   assert(Src.getValueType().isVector() &&
4105          Src.getValueType().getVectorElementType() == MVT::i1);
4106 
4107   if (VecVT.isScalableVector()) {
4108     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4109     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4110     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4111   }
4112 
4113   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4114   MVT I1ContainerVT =
4115       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4116 
4117   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4118 
4119   SDValue Mask, VL;
4120   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4121 
4122   MVT XLenVT = Subtarget.getXLenVT();
4123   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4124   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4125 
4126   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4127                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4128   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4129                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4130   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4131                                SplatTrueVal, SplatZero, VL);
4132 
4133   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4134 }
4135 
4136 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4137     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4138   MVT ExtVT = Op.getSimpleValueType();
4139   // Only custom-lower extensions from fixed-length vector types.
4140   if (!ExtVT.isFixedLengthVector())
4141     return Op;
4142   MVT VT = Op.getOperand(0).getSimpleValueType();
4143   // Grab the canonical container type for the extended type. Infer the smaller
4144   // type from that to ensure the same number of vector elements, as we know
4145   // the LMUL will be sufficient to hold the smaller type.
4146   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4147   // Get the extended container type manually to ensure the same number of
4148   // vector elements between source and dest.
4149   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4150                                      ContainerExtVT.getVectorElementCount());
4151 
4152   SDValue Op1 =
4153       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4154 
4155   SDLoc DL(Op);
4156   SDValue Mask, VL;
4157   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4158 
4159   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4160 
4161   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4162 }
4163 
4164 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4165 // setcc operation:
4166 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4167 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
4168                                                       SelectionDAG &DAG) const {
4169   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNC;
4170   SDLoc DL(Op);
4171   EVT MaskVT = Op.getValueType();
4172   // Only expect to custom-lower truncations to mask types
4173   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4174          "Unexpected type for vector mask lowering");
4175   SDValue Src = Op.getOperand(0);
4176   MVT VecVT = Src.getSimpleValueType();
4177   SDValue Mask, VL;
4178   if (IsVPTrunc) {
4179     Mask = Op.getOperand(1);
4180     VL = Op.getOperand(2);
4181   }
4182   // If this is a fixed vector, we need to convert it to a scalable vector.
4183   MVT ContainerVT = VecVT;
4184 
4185   if (VecVT.isFixedLengthVector()) {
4186     ContainerVT = getContainerForFixedLengthVector(VecVT);
4187     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4188     if (IsVPTrunc) {
4189       MVT MaskContainerVT =
4190           getContainerForFixedLengthVector(Mask.getSimpleValueType());
4191       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
4192     }
4193   }
4194 
4195   if (!IsVPTrunc) {
4196     std::tie(Mask, VL) =
4197         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4198   }
4199 
4200   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4201   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4202 
4203   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4204                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
4205   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4206                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4207 
4208   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4209   SDValue Trunc =
4210       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4211   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4212                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4213   if (MaskVT.isFixedLengthVector())
4214     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4215   return Trunc;
4216 }
4217 
4218 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
4219                                                   SelectionDAG &DAG) const {
4220   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNC;
4221   SDLoc DL(Op);
4222 
4223   MVT VT = Op.getSimpleValueType();
4224   // Only custom-lower vector truncates
4225   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4226 
4227   // Truncates to mask types are handled differently
4228   if (VT.getVectorElementType() == MVT::i1)
4229     return lowerVectorMaskTruncLike(Op, DAG);
4230 
4231   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
4232   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
4233   // truncate by one power of two at a time.
4234   MVT DstEltVT = VT.getVectorElementType();
4235 
4236   SDValue Src = Op.getOperand(0);
4237   MVT SrcVT = Src.getSimpleValueType();
4238   MVT SrcEltVT = SrcVT.getVectorElementType();
4239 
4240   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
4241          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
4242          "Unexpected vector truncate lowering");
4243 
4244   MVT ContainerVT = SrcVT;
4245   SDValue Mask, VL;
4246   if (IsVPTrunc) {
4247     Mask = Op.getOperand(1);
4248     VL = Op.getOperand(2);
4249   }
4250   if (SrcVT.isFixedLengthVector()) {
4251     ContainerVT = getContainerForFixedLengthVector(SrcVT);
4252     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4253     if (IsVPTrunc) {
4254       MVT MaskVT = getMaskTypeFor(ContainerVT);
4255       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4256     }
4257   }
4258 
4259   SDValue Result = Src;
4260   if (!IsVPTrunc) {
4261     std::tie(Mask, VL) =
4262         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4263   }
4264 
4265   LLVMContext &Context = *DAG.getContext();
4266   const ElementCount Count = ContainerVT.getVectorElementCount();
4267   do {
4268     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
4269     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
4270     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
4271                          Mask, VL);
4272   } while (SrcEltVT != DstEltVT);
4273 
4274   if (SrcVT.isFixedLengthVector())
4275     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4276 
4277   return Result;
4278 }
4279 
4280 SDValue RISCVTargetLowering::lowerVectorFPRoundLike(SDValue Op,
4281                                                     SelectionDAG &DAG) const {
4282   bool IsVPFPTrunc = Op.getOpcode() == ISD::VP_FP_ROUND;
4283   // RVV can only do truncate fp to types half the size as the source. We
4284   // custom-lower f64->f16 rounds via RVV's round-to-odd float
4285   // conversion instruction.
4286   SDLoc DL(Op);
4287   MVT VT = Op.getSimpleValueType();
4288 
4289   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4290 
4291   SDValue Src = Op.getOperand(0);
4292   MVT SrcVT = Src.getSimpleValueType();
4293 
4294   bool IsDirectConv = VT.getVectorElementType() != MVT::f16 ||
4295                       SrcVT.getVectorElementType() != MVT::f64;
4296 
4297   // For FP_ROUND of scalable vectors, leave it to the pattern.
4298   if (!VT.isFixedLengthVector() && !IsVPFPTrunc && IsDirectConv)
4299     return Op;
4300 
4301   // Prepare any fixed-length vector operands.
4302   MVT ContainerVT = VT;
4303   SDValue Mask, VL;
4304   if (IsVPFPTrunc) {
4305     Mask = Op.getOperand(1);
4306     VL = Op.getOperand(2);
4307   }
4308   if (VT.isFixedLengthVector()) {
4309     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
4310     ContainerVT =
4311         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
4312     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4313     if (IsVPFPTrunc) {
4314       MVT MaskVT = getMaskTypeFor(ContainerVT);
4315       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4316     }
4317   }
4318 
4319   if (!IsVPFPTrunc)
4320     std::tie(Mask, VL) =
4321         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4322 
4323   if (IsDirectConv) {
4324     Src = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, ContainerVT, Src, Mask, VL);
4325     if (VT.isFixedLengthVector())
4326       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
4327     return Src;
4328   }
4329 
4330   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
4331   SDValue IntermediateRound =
4332       DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
4333   SDValue Round = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, ContainerVT,
4334                               IntermediateRound, Mask, VL);
4335   if (VT.isFixedLengthVector())
4336     return convertFromScalableVector(VT, Round, DAG, Subtarget);
4337   return Round;
4338 }
4339 
4340 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4341 // first position of a vector, and that vector is slid up to the insert index.
4342 // By limiting the active vector length to index+1 and merging with the
4343 // original vector (with an undisturbed tail policy for elements >= VL), we
4344 // achieve the desired result of leaving all elements untouched except the one
4345 // at VL-1, which is replaced with the desired value.
4346 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4347                                                     SelectionDAG &DAG) const {
4348   SDLoc DL(Op);
4349   MVT VecVT = Op.getSimpleValueType();
4350   SDValue Vec = Op.getOperand(0);
4351   SDValue Val = Op.getOperand(1);
4352   SDValue Idx = Op.getOperand(2);
4353 
4354   if (VecVT.getVectorElementType() == MVT::i1) {
4355     // FIXME: For now we just promote to an i8 vector and insert into that,
4356     // but this is probably not optimal.
4357     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4358     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4359     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4360     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4361   }
4362 
4363   MVT ContainerVT = VecVT;
4364   // If the operand is a fixed-length vector, convert to a scalable one.
4365   if (VecVT.isFixedLengthVector()) {
4366     ContainerVT = getContainerForFixedLengthVector(VecVT);
4367     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4368   }
4369 
4370   MVT XLenVT = Subtarget.getXLenVT();
4371 
4372   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4373   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4374   // Even i64-element vectors on RV32 can be lowered without scalar
4375   // legalization if the most-significant 32 bits of the value are not affected
4376   // by the sign-extension of the lower 32 bits.
4377   // TODO: We could also catch sign extensions of a 32-bit value.
4378   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4379     const auto *CVal = cast<ConstantSDNode>(Val);
4380     if (isInt<32>(CVal->getSExtValue())) {
4381       IsLegalInsert = true;
4382       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4383     }
4384   }
4385 
4386   SDValue Mask, VL;
4387   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4388 
4389   SDValue ValInVec;
4390 
4391   if (IsLegalInsert) {
4392     unsigned Opc =
4393         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4394     if (isNullConstant(Idx)) {
4395       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4396       if (!VecVT.isFixedLengthVector())
4397         return Vec;
4398       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4399     }
4400     ValInVec =
4401         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4402   } else {
4403     // On RV32, i64-element vectors must be specially handled to place the
4404     // value at element 0, by using two vslide1up instructions in sequence on
4405     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4406     // this.
4407     SDValue One = DAG.getConstant(1, DL, XLenVT);
4408     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4409     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4410     MVT I32ContainerVT =
4411         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4412     SDValue I32Mask =
4413         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4414     // Limit the active VL to two.
4415     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4416     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4417     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4418     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4419                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4420     // First slide in the hi value, then the lo in underneath it.
4421     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4422                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4423                            I32Mask, InsertI64VL);
4424     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4425                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4426                            I32Mask, InsertI64VL);
4427     // Bitcast back to the right container type.
4428     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4429   }
4430 
4431   // Now that the value is in a vector, slide it into position.
4432   SDValue InsertVL =
4433       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4434   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4435                                 ValInVec, Idx, Mask, InsertVL);
4436   if (!VecVT.isFixedLengthVector())
4437     return Slideup;
4438   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4439 }
4440 
4441 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4442 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4443 // types this is done using VMV_X_S to allow us to glean information about the
4444 // sign bits of the result.
4445 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4446                                                      SelectionDAG &DAG) const {
4447   SDLoc DL(Op);
4448   SDValue Idx = Op.getOperand(1);
4449   SDValue Vec = Op.getOperand(0);
4450   EVT EltVT = Op.getValueType();
4451   MVT VecVT = Vec.getSimpleValueType();
4452   MVT XLenVT = Subtarget.getXLenVT();
4453 
4454   if (VecVT.getVectorElementType() == MVT::i1) {
4455     if (VecVT.isFixedLengthVector()) {
4456       unsigned NumElts = VecVT.getVectorNumElements();
4457       if (NumElts >= 8) {
4458         MVT WideEltVT;
4459         unsigned WidenVecLen;
4460         SDValue ExtractElementIdx;
4461         SDValue ExtractBitIdx;
4462         unsigned MaxEEW = Subtarget.getELEN();
4463         MVT LargestEltVT = MVT::getIntegerVT(
4464             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4465         if (NumElts <= LargestEltVT.getSizeInBits()) {
4466           assert(isPowerOf2_32(NumElts) &&
4467                  "the number of elements should be power of 2");
4468           WideEltVT = MVT::getIntegerVT(NumElts);
4469           WidenVecLen = 1;
4470           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4471           ExtractBitIdx = Idx;
4472         } else {
4473           WideEltVT = LargestEltVT;
4474           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4475           // extract element index = index / element width
4476           ExtractElementIdx = DAG.getNode(
4477               ISD::SRL, DL, XLenVT, Idx,
4478               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4479           // mask bit index = index % element width
4480           ExtractBitIdx = DAG.getNode(
4481               ISD::AND, DL, XLenVT, Idx,
4482               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4483         }
4484         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4485         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4486         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4487                                          Vec, ExtractElementIdx);
4488         // Extract the bit from GPR.
4489         SDValue ShiftRight =
4490             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4491         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4492                            DAG.getConstant(1, DL, XLenVT));
4493       }
4494     }
4495     // Otherwise, promote to an i8 vector and extract from that.
4496     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4497     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4498     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4499   }
4500 
4501   // If this is a fixed vector, we need to convert it to a scalable vector.
4502   MVT ContainerVT = VecVT;
4503   if (VecVT.isFixedLengthVector()) {
4504     ContainerVT = getContainerForFixedLengthVector(VecVT);
4505     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4506   }
4507 
4508   // If the index is 0, the vector is already in the right position.
4509   if (!isNullConstant(Idx)) {
4510     // Use a VL of 1 to avoid processing more elements than we need.
4511     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4512     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
4513     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4514                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4515   }
4516 
4517   if (!EltVT.isInteger()) {
4518     // Floating-point extracts are handled in TableGen.
4519     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4520                        DAG.getConstant(0, DL, XLenVT));
4521   }
4522 
4523   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4524   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4525 }
4526 
4527 // Some RVV intrinsics may claim that they want an integer operand to be
4528 // promoted or expanded.
4529 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4530                                            const RISCVSubtarget &Subtarget) {
4531   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4532           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4533          "Unexpected opcode");
4534 
4535   if (!Subtarget.hasVInstructions())
4536     return SDValue();
4537 
4538   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4539   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4540   SDLoc DL(Op);
4541 
4542   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4543       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4544   if (!II || !II->hasScalarOperand())
4545     return SDValue();
4546 
4547   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4548   assert(SplatOp < Op.getNumOperands());
4549 
4550   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4551   SDValue &ScalarOp = Operands[SplatOp];
4552   MVT OpVT = ScalarOp.getSimpleValueType();
4553   MVT XLenVT = Subtarget.getXLenVT();
4554 
4555   // If this isn't a scalar, or its type is XLenVT we're done.
4556   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4557     return SDValue();
4558 
4559   // Simplest case is that the operand needs to be promoted to XLenVT.
4560   if (OpVT.bitsLT(XLenVT)) {
4561     // If the operand is a constant, sign extend to increase our chances
4562     // of being able to use a .vi instruction. ANY_EXTEND would become a
4563     // a zero extend and the simm5 check in isel would fail.
4564     // FIXME: Should we ignore the upper bits in isel instead?
4565     unsigned ExtOpc =
4566         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4567     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4568     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4569   }
4570 
4571   // Use the previous operand to get the vXi64 VT. The result might be a mask
4572   // VT for compares. Using the previous operand assumes that the previous
4573   // operand will never have a smaller element size than a scalar operand and
4574   // that a widening operation never uses SEW=64.
4575   // NOTE: If this fails the below assert, we can probably just find the
4576   // element count from any operand or result and use it to construct the VT.
4577   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4578   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4579 
4580   // The more complex case is when the scalar is larger than XLenVT.
4581   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4582          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4583 
4584   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4585   // instruction to sign-extend since SEW>XLEN.
4586   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4587     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4588     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4589   }
4590 
4591   switch (IntNo) {
4592   case Intrinsic::riscv_vslide1up:
4593   case Intrinsic::riscv_vslide1down:
4594   case Intrinsic::riscv_vslide1up_mask:
4595   case Intrinsic::riscv_vslide1down_mask: {
4596     // We need to special case these when the scalar is larger than XLen.
4597     unsigned NumOps = Op.getNumOperands();
4598     bool IsMasked = NumOps == 7;
4599 
4600     // Convert the vector source to the equivalent nxvXi32 vector.
4601     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4602     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4603 
4604     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4605                                    DAG.getConstant(0, DL, XLenVT));
4606     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4607                                    DAG.getConstant(1, DL, XLenVT));
4608 
4609     // Double the VL since we halved SEW.
4610     SDValue AVL = getVLOperand(Op);
4611     SDValue I32VL;
4612 
4613     // Optimize for constant AVL
4614     if (isa<ConstantSDNode>(AVL)) {
4615       unsigned EltSize = VT.getScalarSizeInBits();
4616       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4617 
4618       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4619       unsigned MaxVLMAX =
4620           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4621 
4622       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4623       unsigned MinVLMAX =
4624           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4625 
4626       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4627       if (AVLInt <= MinVLMAX) {
4628         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4629       } else if (AVLInt >= 2 * MaxVLMAX) {
4630         // Just set vl to VLMAX in this situation
4631         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4632         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4633         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4634         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4635         SDValue SETVLMAX = DAG.getTargetConstant(
4636             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4637         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4638                             LMUL);
4639       } else {
4640         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4641         // is related to the hardware implementation.
4642         // So let the following code handle
4643       }
4644     }
4645     if (!I32VL) {
4646       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4647       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4648       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4649       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4650       SDValue SETVL =
4651           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4652       // Using vsetvli instruction to get actually used length which related to
4653       // the hardware implementation
4654       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4655                                SEW, LMUL);
4656       I32VL =
4657           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4658     }
4659 
4660     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
4661 
4662     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4663     // instructions.
4664     SDValue Passthru;
4665     if (IsMasked)
4666       Passthru = DAG.getUNDEF(I32VT);
4667     else
4668       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4669 
4670     if (IntNo == Intrinsic::riscv_vslide1up ||
4671         IntNo == Intrinsic::riscv_vslide1up_mask) {
4672       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4673                         ScalarHi, I32Mask, I32VL);
4674       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4675                         ScalarLo, I32Mask, I32VL);
4676     } else {
4677       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4678                         ScalarLo, I32Mask, I32VL);
4679       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4680                         ScalarHi, I32Mask, I32VL);
4681     }
4682 
4683     // Convert back to nxvXi64.
4684     Vec = DAG.getBitcast(VT, Vec);
4685 
4686     if (!IsMasked)
4687       return Vec;
4688     // Apply mask after the operation.
4689     SDValue Mask = Operands[NumOps - 3];
4690     SDValue MaskedOff = Operands[1];
4691     // Assume Policy operand is the last operand.
4692     uint64_t Policy =
4693         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4694     // We don't need to select maskedoff if it's undef.
4695     if (MaskedOff.isUndef())
4696       return Vec;
4697     // TAMU
4698     if (Policy == RISCVII::TAIL_AGNOSTIC)
4699       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4700                          AVL);
4701     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4702     // It's fine because vmerge does not care mask policy.
4703     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4704                        AVL);
4705   }
4706   }
4707 
4708   // We need to convert the scalar to a splat vector.
4709   SDValue VL = getVLOperand(Op);
4710   assert(VL.getValueType() == XLenVT);
4711   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4712   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4713 }
4714 
4715 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4716                                                      SelectionDAG &DAG) const {
4717   unsigned IntNo = Op.getConstantOperandVal(0);
4718   SDLoc DL(Op);
4719   MVT XLenVT = Subtarget.getXLenVT();
4720 
4721   switch (IntNo) {
4722   default:
4723     break; // Don't custom lower most intrinsics.
4724   case Intrinsic::thread_pointer: {
4725     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4726     return DAG.getRegister(RISCV::X4, PtrVT);
4727   }
4728   case Intrinsic::riscv_orc_b:
4729   case Intrinsic::riscv_brev8: {
4730     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4731     unsigned Opc =
4732         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4733     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4734                        DAG.getConstant(7, DL, XLenVT));
4735   }
4736   case Intrinsic::riscv_grev:
4737   case Intrinsic::riscv_gorc: {
4738     unsigned Opc =
4739         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4740     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4741   }
4742   case Intrinsic::riscv_zip:
4743   case Intrinsic::riscv_unzip: {
4744     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4745     // For i32 the immediate is 15. For i64 the immediate is 31.
4746     unsigned Opc =
4747         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4748     unsigned BitWidth = Op.getValueSizeInBits();
4749     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4750     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4751                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4752   }
4753   case Intrinsic::riscv_shfl:
4754   case Intrinsic::riscv_unshfl: {
4755     unsigned Opc =
4756         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4757     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4758   }
4759   case Intrinsic::riscv_bcompress:
4760   case Intrinsic::riscv_bdecompress: {
4761     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4762                                                        : RISCVISD::BDECOMPRESS;
4763     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4764   }
4765   case Intrinsic::riscv_bfp:
4766     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4767                        Op.getOperand(2));
4768   case Intrinsic::riscv_fsl:
4769     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4770                        Op.getOperand(2), Op.getOperand(3));
4771   case Intrinsic::riscv_fsr:
4772     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4773                        Op.getOperand(2), Op.getOperand(3));
4774   case Intrinsic::riscv_vmv_x_s:
4775     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4776     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4777                        Op.getOperand(1));
4778   case Intrinsic::riscv_vmv_v_x:
4779     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4780                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4781                             Subtarget);
4782   case Intrinsic::riscv_vfmv_v_f:
4783     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4784                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4785   case Intrinsic::riscv_vmv_s_x: {
4786     SDValue Scalar = Op.getOperand(2);
4787 
4788     if (Scalar.getValueType().bitsLE(XLenVT)) {
4789       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4790       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4791                          Op.getOperand(1), Scalar, Op.getOperand(3));
4792     }
4793 
4794     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4795 
4796     // This is an i64 value that lives in two scalar registers. We have to
4797     // insert this in a convoluted way. First we build vXi64 splat containing
4798     // the two values that we assemble using some bit math. Next we'll use
4799     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4800     // to merge element 0 from our splat into the source vector.
4801     // FIXME: This is probably not the best way to do this, but it is
4802     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4803     // point.
4804     //   sw lo, (a0)
4805     //   sw hi, 4(a0)
4806     //   vlse vX, (a0)
4807     //
4808     //   vid.v      vVid
4809     //   vmseq.vx   mMask, vVid, 0
4810     //   vmerge.vvm vDest, vSrc, vVal, mMask
4811     MVT VT = Op.getSimpleValueType();
4812     SDValue Vec = Op.getOperand(1);
4813     SDValue VL = getVLOperand(Op);
4814 
4815     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4816     if (Op.getOperand(1).isUndef())
4817       return SplattedVal;
4818     SDValue SplattedIdx =
4819         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4820                     DAG.getConstant(0, DL, MVT::i32), VL);
4821 
4822     MVT MaskVT = getMaskTypeFor(VT);
4823     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
4824     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4825     SDValue SelectCond =
4826         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4827                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4828     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4829                        Vec, VL);
4830   }
4831   }
4832 
4833   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4834 }
4835 
4836 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4837                                                     SelectionDAG &DAG) const {
4838   unsigned IntNo = Op.getConstantOperandVal(1);
4839   switch (IntNo) {
4840   default:
4841     break;
4842   case Intrinsic::riscv_masked_strided_load: {
4843     SDLoc DL(Op);
4844     MVT XLenVT = Subtarget.getXLenVT();
4845 
4846     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4847     // the selection of the masked intrinsics doesn't do this for us.
4848     SDValue Mask = Op.getOperand(5);
4849     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4850 
4851     MVT VT = Op->getSimpleValueType(0);
4852     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4853 
4854     SDValue PassThru = Op.getOperand(2);
4855     if (!IsUnmasked) {
4856       MVT MaskVT = getMaskTypeFor(ContainerVT);
4857       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4858       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4859     }
4860 
4861     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4862 
4863     SDValue IntID = DAG.getTargetConstant(
4864         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4865         XLenVT);
4866 
4867     auto *Load = cast<MemIntrinsicSDNode>(Op);
4868     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4869     if (IsUnmasked)
4870       Ops.push_back(DAG.getUNDEF(ContainerVT));
4871     else
4872       Ops.push_back(PassThru);
4873     Ops.push_back(Op.getOperand(3)); // Ptr
4874     Ops.push_back(Op.getOperand(4)); // Stride
4875     if (!IsUnmasked)
4876       Ops.push_back(Mask);
4877     Ops.push_back(VL);
4878     if (!IsUnmasked) {
4879       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4880       Ops.push_back(Policy);
4881     }
4882 
4883     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4884     SDValue Result =
4885         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4886                                 Load->getMemoryVT(), Load->getMemOperand());
4887     SDValue Chain = Result.getValue(1);
4888     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4889     return DAG.getMergeValues({Result, Chain}, DL);
4890   }
4891   case Intrinsic::riscv_seg2_load:
4892   case Intrinsic::riscv_seg3_load:
4893   case Intrinsic::riscv_seg4_load:
4894   case Intrinsic::riscv_seg5_load:
4895   case Intrinsic::riscv_seg6_load:
4896   case Intrinsic::riscv_seg7_load:
4897   case Intrinsic::riscv_seg8_load: {
4898     SDLoc DL(Op);
4899     static const Intrinsic::ID VlsegInts[7] = {
4900         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4901         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4902         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4903         Intrinsic::riscv_vlseg8};
4904     unsigned NF = Op->getNumValues() - 1;
4905     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4906     MVT XLenVT = Subtarget.getXLenVT();
4907     MVT VT = Op->getSimpleValueType(0);
4908     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4909 
4910     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4911     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4912     auto *Load = cast<MemIntrinsicSDNode>(Op);
4913     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4914     ContainerVTs.push_back(MVT::Other);
4915     SDVTList VTs = DAG.getVTList(ContainerVTs);
4916     SDValue Result =
4917         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
4918                                 {Load->getChain(), IntID, Op.getOperand(2), VL},
4919                                 Load->getMemoryVT(), Load->getMemOperand());
4920     SmallVector<SDValue, 9> Results;
4921     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4922       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4923                                                   DAG, Subtarget));
4924     Results.push_back(Result.getValue(NF));
4925     return DAG.getMergeValues(Results, DL);
4926   }
4927   }
4928 
4929   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4930 }
4931 
4932 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4933                                                  SelectionDAG &DAG) const {
4934   unsigned IntNo = Op.getConstantOperandVal(1);
4935   switch (IntNo) {
4936   default:
4937     break;
4938   case Intrinsic::riscv_masked_strided_store: {
4939     SDLoc DL(Op);
4940     MVT XLenVT = Subtarget.getXLenVT();
4941 
4942     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4943     // the selection of the masked intrinsics doesn't do this for us.
4944     SDValue Mask = Op.getOperand(5);
4945     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4946 
4947     SDValue Val = Op.getOperand(2);
4948     MVT VT = Val.getSimpleValueType();
4949     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4950 
4951     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4952     if (!IsUnmasked) {
4953       MVT MaskVT = getMaskTypeFor(ContainerVT);
4954       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4955     }
4956 
4957     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4958 
4959     SDValue IntID = DAG.getTargetConstant(
4960         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4961         XLenVT);
4962 
4963     auto *Store = cast<MemIntrinsicSDNode>(Op);
4964     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4965     Ops.push_back(Val);
4966     Ops.push_back(Op.getOperand(3)); // Ptr
4967     Ops.push_back(Op.getOperand(4)); // Stride
4968     if (!IsUnmasked)
4969       Ops.push_back(Mask);
4970     Ops.push_back(VL);
4971 
4972     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4973                                    Ops, Store->getMemoryVT(),
4974                                    Store->getMemOperand());
4975   }
4976   }
4977 
4978   return SDValue();
4979 }
4980 
4981 static MVT getLMUL1VT(MVT VT) {
4982   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4983          "Unexpected vector MVT");
4984   return MVT::getScalableVectorVT(
4985       VT.getVectorElementType(),
4986       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4987 }
4988 
4989 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4990   switch (ISDOpcode) {
4991   default:
4992     llvm_unreachable("Unhandled reduction");
4993   case ISD::VECREDUCE_ADD:
4994     return RISCVISD::VECREDUCE_ADD_VL;
4995   case ISD::VECREDUCE_UMAX:
4996     return RISCVISD::VECREDUCE_UMAX_VL;
4997   case ISD::VECREDUCE_SMAX:
4998     return RISCVISD::VECREDUCE_SMAX_VL;
4999   case ISD::VECREDUCE_UMIN:
5000     return RISCVISD::VECREDUCE_UMIN_VL;
5001   case ISD::VECREDUCE_SMIN:
5002     return RISCVISD::VECREDUCE_SMIN_VL;
5003   case ISD::VECREDUCE_AND:
5004     return RISCVISD::VECREDUCE_AND_VL;
5005   case ISD::VECREDUCE_OR:
5006     return RISCVISD::VECREDUCE_OR_VL;
5007   case ISD::VECREDUCE_XOR:
5008     return RISCVISD::VECREDUCE_XOR_VL;
5009   }
5010 }
5011 
5012 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5013                                                          SelectionDAG &DAG,
5014                                                          bool IsVP) const {
5015   SDLoc DL(Op);
5016   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5017   MVT VecVT = Vec.getSimpleValueType();
5018   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5019           Op.getOpcode() == ISD::VECREDUCE_OR ||
5020           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5021           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5022           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5023           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5024          "Unexpected reduction lowering");
5025 
5026   MVT XLenVT = Subtarget.getXLenVT();
5027   assert(Op.getValueType() == XLenVT &&
5028          "Expected reduction output to be legalized to XLenVT");
5029 
5030   MVT ContainerVT = VecVT;
5031   if (VecVT.isFixedLengthVector()) {
5032     ContainerVT = getContainerForFixedLengthVector(VecVT);
5033     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5034   }
5035 
5036   SDValue Mask, VL;
5037   if (IsVP) {
5038     Mask = Op.getOperand(2);
5039     VL = Op.getOperand(3);
5040   } else {
5041     std::tie(Mask, VL) =
5042         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5043   }
5044 
5045   unsigned BaseOpc;
5046   ISD::CondCode CC;
5047   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5048 
5049   switch (Op.getOpcode()) {
5050   default:
5051     llvm_unreachable("Unhandled reduction");
5052   case ISD::VECREDUCE_AND:
5053   case ISD::VP_REDUCE_AND: {
5054     // vcpop ~x == 0
5055     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5056     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5057     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5058     CC = ISD::SETEQ;
5059     BaseOpc = ISD::AND;
5060     break;
5061   }
5062   case ISD::VECREDUCE_OR:
5063   case ISD::VP_REDUCE_OR:
5064     // vcpop x != 0
5065     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5066     CC = ISD::SETNE;
5067     BaseOpc = ISD::OR;
5068     break;
5069   case ISD::VECREDUCE_XOR:
5070   case ISD::VP_REDUCE_XOR: {
5071     // ((vcpop x) & 1) != 0
5072     SDValue One = DAG.getConstant(1, DL, XLenVT);
5073     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5074     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5075     CC = ISD::SETNE;
5076     BaseOpc = ISD::XOR;
5077     break;
5078   }
5079   }
5080 
5081   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5082 
5083   if (!IsVP)
5084     return SetCC;
5085 
5086   // Now include the start value in the operation.
5087   // Note that we must return the start value when no elements are operated
5088   // upon. The vcpop instructions we've emitted in each case above will return
5089   // 0 for an inactive vector, and so we've already received the neutral value:
5090   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5091   // can simply include the start value.
5092   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5093 }
5094 
5095 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5096                                             SelectionDAG &DAG) const {
5097   SDLoc DL(Op);
5098   SDValue Vec = Op.getOperand(0);
5099   EVT VecEVT = Vec.getValueType();
5100 
5101   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5102 
5103   // Due to ordering in legalize types we may have a vector type that needs to
5104   // be split. Do that manually so we can get down to a legal type.
5105   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5106          TargetLowering::TypeSplitVector) {
5107     SDValue Lo, Hi;
5108     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5109     VecEVT = Lo.getValueType();
5110     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5111   }
5112 
5113   // TODO: The type may need to be widened rather than split. Or widened before
5114   // it can be split.
5115   if (!isTypeLegal(VecEVT))
5116     return SDValue();
5117 
5118   MVT VecVT = VecEVT.getSimpleVT();
5119   MVT VecEltVT = VecVT.getVectorElementType();
5120   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5121 
5122   MVT ContainerVT = VecVT;
5123   if (VecVT.isFixedLengthVector()) {
5124     ContainerVT = getContainerForFixedLengthVector(VecVT);
5125     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5126   }
5127 
5128   MVT M1VT = getLMUL1VT(ContainerVT);
5129   MVT XLenVT = Subtarget.getXLenVT();
5130 
5131   SDValue Mask, VL;
5132   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5133 
5134   SDValue NeutralElem =
5135       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5136   SDValue IdentitySplat =
5137       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5138                        M1VT, DL, DAG, Subtarget);
5139   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5140                                   IdentitySplat, Mask, VL);
5141   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5142                              DAG.getConstant(0, DL, XLenVT));
5143   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5144 }
5145 
5146 // Given a reduction op, this function returns the matching reduction opcode,
5147 // the vector SDValue and the scalar SDValue required to lower this to a
5148 // RISCVISD node.
5149 static std::tuple<unsigned, SDValue, SDValue>
5150 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5151   SDLoc DL(Op);
5152   auto Flags = Op->getFlags();
5153   unsigned Opcode = Op.getOpcode();
5154   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5155   switch (Opcode) {
5156   default:
5157     llvm_unreachable("Unhandled reduction");
5158   case ISD::VECREDUCE_FADD: {
5159     // Use positive zero if we can. It is cheaper to materialize.
5160     SDValue Zero =
5161         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5162     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5163   }
5164   case ISD::VECREDUCE_SEQ_FADD:
5165     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5166                            Op.getOperand(0));
5167   case ISD::VECREDUCE_FMIN:
5168     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5169                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5170   case ISD::VECREDUCE_FMAX:
5171     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5172                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5173   }
5174 }
5175 
5176 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5177                                               SelectionDAG &DAG) const {
5178   SDLoc DL(Op);
5179   MVT VecEltVT = Op.getSimpleValueType();
5180 
5181   unsigned RVVOpcode;
5182   SDValue VectorVal, ScalarVal;
5183   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5184       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5185   MVT VecVT = VectorVal.getSimpleValueType();
5186 
5187   MVT ContainerVT = VecVT;
5188   if (VecVT.isFixedLengthVector()) {
5189     ContainerVT = getContainerForFixedLengthVector(VecVT);
5190     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5191   }
5192 
5193   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5194   MVT XLenVT = Subtarget.getXLenVT();
5195 
5196   SDValue Mask, VL;
5197   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5198 
5199   SDValue ScalarSplat =
5200       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5201                        M1VT, DL, DAG, Subtarget);
5202   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5203                                   VectorVal, ScalarSplat, Mask, VL);
5204   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5205                      DAG.getConstant(0, DL, XLenVT));
5206 }
5207 
5208 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5209   switch (ISDOpcode) {
5210   default:
5211     llvm_unreachable("Unhandled reduction");
5212   case ISD::VP_REDUCE_ADD:
5213     return RISCVISD::VECREDUCE_ADD_VL;
5214   case ISD::VP_REDUCE_UMAX:
5215     return RISCVISD::VECREDUCE_UMAX_VL;
5216   case ISD::VP_REDUCE_SMAX:
5217     return RISCVISD::VECREDUCE_SMAX_VL;
5218   case ISD::VP_REDUCE_UMIN:
5219     return RISCVISD::VECREDUCE_UMIN_VL;
5220   case ISD::VP_REDUCE_SMIN:
5221     return RISCVISD::VECREDUCE_SMIN_VL;
5222   case ISD::VP_REDUCE_AND:
5223     return RISCVISD::VECREDUCE_AND_VL;
5224   case ISD::VP_REDUCE_OR:
5225     return RISCVISD::VECREDUCE_OR_VL;
5226   case ISD::VP_REDUCE_XOR:
5227     return RISCVISD::VECREDUCE_XOR_VL;
5228   case ISD::VP_REDUCE_FADD:
5229     return RISCVISD::VECREDUCE_FADD_VL;
5230   case ISD::VP_REDUCE_SEQ_FADD:
5231     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5232   case ISD::VP_REDUCE_FMAX:
5233     return RISCVISD::VECREDUCE_FMAX_VL;
5234   case ISD::VP_REDUCE_FMIN:
5235     return RISCVISD::VECREDUCE_FMIN_VL;
5236   }
5237 }
5238 
5239 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5240                                            SelectionDAG &DAG) const {
5241   SDLoc DL(Op);
5242   SDValue Vec = Op.getOperand(1);
5243   EVT VecEVT = Vec.getValueType();
5244 
5245   // TODO: The type may need to be widened rather than split. Or widened before
5246   // it can be split.
5247   if (!isTypeLegal(VecEVT))
5248     return SDValue();
5249 
5250   MVT VecVT = VecEVT.getSimpleVT();
5251   MVT VecEltVT = VecVT.getVectorElementType();
5252   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5253 
5254   MVT ContainerVT = VecVT;
5255   if (VecVT.isFixedLengthVector()) {
5256     ContainerVT = getContainerForFixedLengthVector(VecVT);
5257     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5258   }
5259 
5260   SDValue VL = Op.getOperand(3);
5261   SDValue Mask = Op.getOperand(2);
5262 
5263   MVT M1VT = getLMUL1VT(ContainerVT);
5264   MVT XLenVT = Subtarget.getXLenVT();
5265   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5266 
5267   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5268                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5269                                         DL, DAG, Subtarget);
5270   SDValue Reduction =
5271       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5272   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5273                              DAG.getConstant(0, DL, XLenVT));
5274   if (!VecVT.isInteger())
5275     return Elt0;
5276   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5277 }
5278 
5279 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5280                                                    SelectionDAG &DAG) const {
5281   SDValue Vec = Op.getOperand(0);
5282   SDValue SubVec = Op.getOperand(1);
5283   MVT VecVT = Vec.getSimpleValueType();
5284   MVT SubVecVT = SubVec.getSimpleValueType();
5285 
5286   SDLoc DL(Op);
5287   MVT XLenVT = Subtarget.getXLenVT();
5288   unsigned OrigIdx = Op.getConstantOperandVal(2);
5289   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5290 
5291   // We don't have the ability to slide mask vectors up indexed by their i1
5292   // elements; the smallest we can do is i8. Often we are able to bitcast to
5293   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5294   // into a scalable one, we might not necessarily have enough scalable
5295   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5296   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5297       (OrigIdx != 0 || !Vec.isUndef())) {
5298     if (VecVT.getVectorMinNumElements() >= 8 &&
5299         SubVecVT.getVectorMinNumElements() >= 8) {
5300       assert(OrigIdx % 8 == 0 && "Invalid index");
5301       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5302              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5303              "Unexpected mask vector lowering");
5304       OrigIdx /= 8;
5305       SubVecVT =
5306           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5307                            SubVecVT.isScalableVector());
5308       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5309                                VecVT.isScalableVector());
5310       Vec = DAG.getBitcast(VecVT, Vec);
5311       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5312     } else {
5313       // We can't slide this mask vector up indexed by its i1 elements.
5314       // This poses a problem when we wish to insert a scalable vector which
5315       // can't be re-expressed as a larger type. Just choose the slow path and
5316       // extend to a larger type, then truncate back down.
5317       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5318       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5319       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5320       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5321       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5322                         Op.getOperand(2));
5323       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5324       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5325     }
5326   }
5327 
5328   // If the subvector vector is a fixed-length type, we cannot use subregister
5329   // manipulation to simplify the codegen; we don't know which register of a
5330   // LMUL group contains the specific subvector as we only know the minimum
5331   // register size. Therefore we must slide the vector group up the full
5332   // amount.
5333   if (SubVecVT.isFixedLengthVector()) {
5334     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5335       return Op;
5336     MVT ContainerVT = VecVT;
5337     if (VecVT.isFixedLengthVector()) {
5338       ContainerVT = getContainerForFixedLengthVector(VecVT);
5339       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5340     }
5341     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5342                          DAG.getUNDEF(ContainerVT), SubVec,
5343                          DAG.getConstant(0, DL, XLenVT));
5344     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5345       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5346       return DAG.getBitcast(Op.getValueType(), SubVec);
5347     }
5348     SDValue Mask =
5349         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5350     // Set the vector length to only the number of elements we care about. Note
5351     // that for slideup this includes the offset.
5352     SDValue VL =
5353         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5354     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5355     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5356                                   SubVec, SlideupAmt, Mask, VL);
5357     if (VecVT.isFixedLengthVector())
5358       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5359     return DAG.getBitcast(Op.getValueType(), Slideup);
5360   }
5361 
5362   unsigned SubRegIdx, RemIdx;
5363   std::tie(SubRegIdx, RemIdx) =
5364       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5365           VecVT, SubVecVT, OrigIdx, TRI);
5366 
5367   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5368   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5369                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5370                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5371 
5372   // 1. If the Idx has been completely eliminated and this subvector's size is
5373   // a vector register or a multiple thereof, or the surrounding elements are
5374   // undef, then this is a subvector insert which naturally aligns to a vector
5375   // register. These can easily be handled using subregister manipulation.
5376   // 2. If the subvector is smaller than a vector register, then the insertion
5377   // must preserve the undisturbed elements of the register. We do this by
5378   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5379   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5380   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5381   // LMUL=1 type back into the larger vector (resolving to another subregister
5382   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5383   // to avoid allocating a large register group to hold our subvector.
5384   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5385     return Op;
5386 
5387   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5388   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5389   // (in our case undisturbed). This means we can set up a subvector insertion
5390   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5391   // size of the subvector.
5392   MVT InterSubVT = VecVT;
5393   SDValue AlignedExtract = Vec;
5394   unsigned AlignedIdx = OrigIdx - RemIdx;
5395   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5396     InterSubVT = getLMUL1VT(VecVT);
5397     // Extract a subvector equal to the nearest full vector register type. This
5398     // should resolve to a EXTRACT_SUBREG instruction.
5399     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5400                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5401   }
5402 
5403   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5404   // For scalable vectors this must be further multiplied by vscale.
5405   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5406 
5407   SDValue Mask, VL;
5408   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5409 
5410   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5411   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5412   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5413   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5414 
5415   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5416                        DAG.getUNDEF(InterSubVT), SubVec,
5417                        DAG.getConstant(0, DL, XLenVT));
5418 
5419   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5420                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5421 
5422   // If required, insert this subvector back into the correct vector register.
5423   // This should resolve to an INSERT_SUBREG instruction.
5424   if (VecVT.bitsGT(InterSubVT))
5425     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5426                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5427 
5428   // We might have bitcast from a mask type: cast back to the original type if
5429   // required.
5430   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5431 }
5432 
5433 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5434                                                     SelectionDAG &DAG) const {
5435   SDValue Vec = Op.getOperand(0);
5436   MVT SubVecVT = Op.getSimpleValueType();
5437   MVT VecVT = Vec.getSimpleValueType();
5438 
5439   SDLoc DL(Op);
5440   MVT XLenVT = Subtarget.getXLenVT();
5441   unsigned OrigIdx = Op.getConstantOperandVal(1);
5442   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5443 
5444   // We don't have the ability to slide mask vectors down indexed by their i1
5445   // elements; the smallest we can do is i8. Often we are able to bitcast to
5446   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5447   // from a scalable one, we might not necessarily have enough scalable
5448   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5449   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5450     if (VecVT.getVectorMinNumElements() >= 8 &&
5451         SubVecVT.getVectorMinNumElements() >= 8) {
5452       assert(OrigIdx % 8 == 0 && "Invalid index");
5453       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5454              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5455              "Unexpected mask vector lowering");
5456       OrigIdx /= 8;
5457       SubVecVT =
5458           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5459                            SubVecVT.isScalableVector());
5460       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5461                                VecVT.isScalableVector());
5462       Vec = DAG.getBitcast(VecVT, Vec);
5463     } else {
5464       // We can't slide this mask vector down, indexed by its i1 elements.
5465       // This poses a problem when we wish to extract a scalable vector which
5466       // can't be re-expressed as a larger type. Just choose the slow path and
5467       // extend to a larger type, then truncate back down.
5468       // TODO: We could probably improve this when extracting certain fixed
5469       // from fixed, where we can extract as i8 and shift the correct element
5470       // right to reach the desired subvector?
5471       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5472       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5473       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5474       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5475                         Op.getOperand(1));
5476       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5477       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5478     }
5479   }
5480 
5481   // If the subvector vector is a fixed-length type, we cannot use subregister
5482   // manipulation to simplify the codegen; we don't know which register of a
5483   // LMUL group contains the specific subvector as we only know the minimum
5484   // register size. Therefore we must slide the vector group down the full
5485   // amount.
5486   if (SubVecVT.isFixedLengthVector()) {
5487     // With an index of 0 this is a cast-like subvector, which can be performed
5488     // with subregister operations.
5489     if (OrigIdx == 0)
5490       return Op;
5491     MVT ContainerVT = VecVT;
5492     if (VecVT.isFixedLengthVector()) {
5493       ContainerVT = getContainerForFixedLengthVector(VecVT);
5494       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5495     }
5496     SDValue Mask =
5497         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5498     // Set the vector length to only the number of elements we care about. This
5499     // avoids sliding down elements we're going to discard straight away.
5500     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5501     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5502     SDValue Slidedown =
5503         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5504                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5505     // Now we can use a cast-like subvector extract to get the result.
5506     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5507                             DAG.getConstant(0, DL, XLenVT));
5508     return DAG.getBitcast(Op.getValueType(), Slidedown);
5509   }
5510 
5511   unsigned SubRegIdx, RemIdx;
5512   std::tie(SubRegIdx, RemIdx) =
5513       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5514           VecVT, SubVecVT, OrigIdx, TRI);
5515 
5516   // If the Idx has been completely eliminated then this is a subvector extract
5517   // which naturally aligns to a vector register. These can easily be handled
5518   // using subregister manipulation.
5519   if (RemIdx == 0)
5520     return Op;
5521 
5522   // Else we must shift our vector register directly to extract the subvector.
5523   // Do this using VSLIDEDOWN.
5524 
5525   // If the vector type is an LMUL-group type, extract a subvector equal to the
5526   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5527   // instruction.
5528   MVT InterSubVT = VecVT;
5529   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5530     InterSubVT = getLMUL1VT(VecVT);
5531     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5532                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5533   }
5534 
5535   // Slide this vector register down by the desired number of elements in order
5536   // to place the desired subvector starting at element 0.
5537   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5538   // For scalable vectors this must be further multiplied by vscale.
5539   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5540 
5541   SDValue Mask, VL;
5542   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5543   SDValue Slidedown =
5544       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5545                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5546 
5547   // Now the vector is in the right position, extract our final subvector. This
5548   // should resolve to a COPY.
5549   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5550                           DAG.getConstant(0, DL, XLenVT));
5551 
5552   // We might have bitcast from a mask type: cast back to the original type if
5553   // required.
5554   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5555 }
5556 
5557 // Lower step_vector to the vid instruction. Any non-identity step value must
5558 // be accounted for my manual expansion.
5559 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5560                                               SelectionDAG &DAG) const {
5561   SDLoc DL(Op);
5562   MVT VT = Op.getSimpleValueType();
5563   MVT XLenVT = Subtarget.getXLenVT();
5564   SDValue Mask, VL;
5565   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5566   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5567   uint64_t StepValImm = Op.getConstantOperandVal(0);
5568   if (StepValImm != 1) {
5569     if (isPowerOf2_64(StepValImm)) {
5570       SDValue StepVal =
5571           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5572                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5573       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5574     } else {
5575       SDValue StepVal = lowerScalarSplat(
5576           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5577           VL, VT, DL, DAG, Subtarget);
5578       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5579     }
5580   }
5581   return StepVec;
5582 }
5583 
5584 // Implement vector_reverse using vrgather.vv with indices determined by
5585 // subtracting the id of each element from (VLMAX-1). This will convert
5586 // the indices like so:
5587 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5588 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5589 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5590                                                  SelectionDAG &DAG) const {
5591   SDLoc DL(Op);
5592   MVT VecVT = Op.getSimpleValueType();
5593   unsigned EltSize = VecVT.getScalarSizeInBits();
5594   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5595 
5596   unsigned MaxVLMAX = 0;
5597   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5598   if (VectorBitsMax != 0)
5599     MaxVLMAX =
5600         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5601 
5602   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5603   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5604 
5605   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5606   // to use vrgatherei16.vv.
5607   // TODO: It's also possible to use vrgatherei16.vv for other types to
5608   // decrease register width for the index calculation.
5609   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5610     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5611     // Reverse each half, then reassemble them in reverse order.
5612     // NOTE: It's also possible that after splitting that VLMAX no longer
5613     // requires vrgatherei16.vv.
5614     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5615       SDValue Lo, Hi;
5616       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5617       EVT LoVT, HiVT;
5618       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5619       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5620       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5621       // Reassemble the low and high pieces reversed.
5622       // FIXME: This is a CONCAT_VECTORS.
5623       SDValue Res =
5624           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5625                       DAG.getIntPtrConstant(0, DL));
5626       return DAG.getNode(
5627           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5628           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5629     }
5630 
5631     // Just promote the int type to i16 which will double the LMUL.
5632     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5633     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5634   }
5635 
5636   MVT XLenVT = Subtarget.getXLenVT();
5637   SDValue Mask, VL;
5638   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5639 
5640   // Calculate VLMAX-1 for the desired SEW.
5641   unsigned MinElts = VecVT.getVectorMinNumElements();
5642   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5643                               DAG.getConstant(MinElts, DL, XLenVT));
5644   SDValue VLMinus1 =
5645       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5646 
5647   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5648   bool IsRV32E64 =
5649       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5650   SDValue SplatVL;
5651   if (!IsRV32E64)
5652     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5653   else
5654     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5655                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5656 
5657   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5658   SDValue Indices =
5659       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5660 
5661   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5662 }
5663 
5664 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5665                                                 SelectionDAG &DAG) const {
5666   SDLoc DL(Op);
5667   SDValue V1 = Op.getOperand(0);
5668   SDValue V2 = Op.getOperand(1);
5669   MVT XLenVT = Subtarget.getXLenVT();
5670   MVT VecVT = Op.getSimpleValueType();
5671 
5672   unsigned MinElts = VecVT.getVectorMinNumElements();
5673   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5674                               DAG.getConstant(MinElts, DL, XLenVT));
5675 
5676   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5677   SDValue DownOffset, UpOffset;
5678   if (ImmValue >= 0) {
5679     // The operand is a TargetConstant, we need to rebuild it as a regular
5680     // constant.
5681     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5682     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5683   } else {
5684     // The operand is a TargetConstant, we need to rebuild it as a regular
5685     // constant rather than negating the original operand.
5686     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5687     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5688   }
5689 
5690   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
5691 
5692   SDValue SlideDown =
5693       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5694                   DownOffset, TrueMask, UpOffset);
5695   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5696                      TrueMask,
5697                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5698 }
5699 
5700 SDValue
5701 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5702                                                      SelectionDAG &DAG) const {
5703   SDLoc DL(Op);
5704   auto *Load = cast<LoadSDNode>(Op);
5705 
5706   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5707                                         Load->getMemoryVT(),
5708                                         *Load->getMemOperand()) &&
5709          "Expecting a correctly-aligned load");
5710 
5711   MVT VT = Op.getSimpleValueType();
5712   MVT XLenVT = Subtarget.getXLenVT();
5713   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5714 
5715   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5716 
5717   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5718   SDValue IntID = DAG.getTargetConstant(
5719       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5720   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5721   if (!IsMaskOp)
5722     Ops.push_back(DAG.getUNDEF(ContainerVT));
5723   Ops.push_back(Load->getBasePtr());
5724   Ops.push_back(VL);
5725   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5726   SDValue NewLoad =
5727       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5728                               Load->getMemoryVT(), Load->getMemOperand());
5729 
5730   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5731   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5732 }
5733 
5734 SDValue
5735 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5736                                                       SelectionDAG &DAG) const {
5737   SDLoc DL(Op);
5738   auto *Store = cast<StoreSDNode>(Op);
5739 
5740   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5741                                         Store->getMemoryVT(),
5742                                         *Store->getMemOperand()) &&
5743          "Expecting a correctly-aligned store");
5744 
5745   SDValue StoreVal = Store->getValue();
5746   MVT VT = StoreVal.getSimpleValueType();
5747   MVT XLenVT = Subtarget.getXLenVT();
5748 
5749   // If the size less than a byte, we need to pad with zeros to make a byte.
5750   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5751     VT = MVT::v8i1;
5752     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5753                            DAG.getConstant(0, DL, VT), StoreVal,
5754                            DAG.getIntPtrConstant(0, DL));
5755   }
5756 
5757   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5758 
5759   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5760 
5761   SDValue NewValue =
5762       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5763 
5764   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5765   SDValue IntID = DAG.getTargetConstant(
5766       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5767   return DAG.getMemIntrinsicNode(
5768       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5769       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5770       Store->getMemoryVT(), Store->getMemOperand());
5771 }
5772 
5773 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5774                                              SelectionDAG &DAG) const {
5775   SDLoc DL(Op);
5776   MVT VT = Op.getSimpleValueType();
5777 
5778   const auto *MemSD = cast<MemSDNode>(Op);
5779   EVT MemVT = MemSD->getMemoryVT();
5780   MachineMemOperand *MMO = MemSD->getMemOperand();
5781   SDValue Chain = MemSD->getChain();
5782   SDValue BasePtr = MemSD->getBasePtr();
5783 
5784   SDValue Mask, PassThru, VL;
5785   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5786     Mask = VPLoad->getMask();
5787     PassThru = DAG.getUNDEF(VT);
5788     VL = VPLoad->getVectorLength();
5789   } else {
5790     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5791     Mask = MLoad->getMask();
5792     PassThru = MLoad->getPassThru();
5793   }
5794 
5795   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5796 
5797   MVT XLenVT = Subtarget.getXLenVT();
5798 
5799   MVT ContainerVT = VT;
5800   if (VT.isFixedLengthVector()) {
5801     ContainerVT = getContainerForFixedLengthVector(VT);
5802     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5803     if (!IsUnmasked) {
5804       MVT MaskVT = getMaskTypeFor(ContainerVT);
5805       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5806     }
5807   }
5808 
5809   if (!VL)
5810     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5811 
5812   unsigned IntID =
5813       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5814   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5815   if (IsUnmasked)
5816     Ops.push_back(DAG.getUNDEF(ContainerVT));
5817   else
5818     Ops.push_back(PassThru);
5819   Ops.push_back(BasePtr);
5820   if (!IsUnmasked)
5821     Ops.push_back(Mask);
5822   Ops.push_back(VL);
5823   if (!IsUnmasked)
5824     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5825 
5826   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5827 
5828   SDValue Result =
5829       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5830   Chain = Result.getValue(1);
5831 
5832   if (VT.isFixedLengthVector())
5833     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5834 
5835   return DAG.getMergeValues({Result, Chain}, DL);
5836 }
5837 
5838 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5839                                               SelectionDAG &DAG) const {
5840   SDLoc DL(Op);
5841 
5842   const auto *MemSD = cast<MemSDNode>(Op);
5843   EVT MemVT = MemSD->getMemoryVT();
5844   MachineMemOperand *MMO = MemSD->getMemOperand();
5845   SDValue Chain = MemSD->getChain();
5846   SDValue BasePtr = MemSD->getBasePtr();
5847   SDValue Val, Mask, VL;
5848 
5849   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5850     Val = VPStore->getValue();
5851     Mask = VPStore->getMask();
5852     VL = VPStore->getVectorLength();
5853   } else {
5854     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5855     Val = MStore->getValue();
5856     Mask = MStore->getMask();
5857   }
5858 
5859   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5860 
5861   MVT VT = Val.getSimpleValueType();
5862   MVT XLenVT = Subtarget.getXLenVT();
5863 
5864   MVT ContainerVT = VT;
5865   if (VT.isFixedLengthVector()) {
5866     ContainerVT = getContainerForFixedLengthVector(VT);
5867 
5868     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5869     if (!IsUnmasked) {
5870       MVT MaskVT = getMaskTypeFor(ContainerVT);
5871       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5872     }
5873   }
5874 
5875   if (!VL)
5876     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5877 
5878   unsigned IntID =
5879       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5880   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5881   Ops.push_back(Val);
5882   Ops.push_back(BasePtr);
5883   if (!IsUnmasked)
5884     Ops.push_back(Mask);
5885   Ops.push_back(VL);
5886 
5887   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5888                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5889 }
5890 
5891 SDValue
5892 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5893                                                       SelectionDAG &DAG) const {
5894   MVT InVT = Op.getOperand(0).getSimpleValueType();
5895   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5896 
5897   MVT VT = Op.getSimpleValueType();
5898 
5899   SDValue Op1 =
5900       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5901   SDValue Op2 =
5902       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5903 
5904   SDLoc DL(Op);
5905   SDValue VL =
5906       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5907 
5908   MVT MaskVT = getMaskTypeFor(ContainerVT);
5909   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
5910 
5911   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5912                             Op.getOperand(2), Mask, VL);
5913 
5914   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5915 }
5916 
5917 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5918     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5919   MVT VT = Op.getSimpleValueType();
5920 
5921   if (VT.getVectorElementType() == MVT::i1)
5922     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5923 
5924   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5925 }
5926 
5927 SDValue
5928 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5929                                                       SelectionDAG &DAG) const {
5930   unsigned Opc;
5931   switch (Op.getOpcode()) {
5932   default: llvm_unreachable("Unexpected opcode!");
5933   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5934   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5935   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5936   }
5937 
5938   return lowerToScalableOp(Op, DAG, Opc);
5939 }
5940 
5941 // Lower vector ABS to smax(X, sub(0, X)).
5942 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5943   SDLoc DL(Op);
5944   MVT VT = Op.getSimpleValueType();
5945   SDValue X = Op.getOperand(0);
5946 
5947   assert(VT.isFixedLengthVector() && "Unexpected type");
5948 
5949   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5950   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5951 
5952   SDValue Mask, VL;
5953   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5954 
5955   SDValue SplatZero = DAG.getNode(
5956       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5957       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5958   SDValue NegX =
5959       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5960   SDValue Max =
5961       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5962 
5963   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5964 }
5965 
5966 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5967     SDValue Op, SelectionDAG &DAG) const {
5968   SDLoc DL(Op);
5969   MVT VT = Op.getSimpleValueType();
5970   SDValue Mag = Op.getOperand(0);
5971   SDValue Sign = Op.getOperand(1);
5972   assert(Mag.getValueType() == Sign.getValueType() &&
5973          "Can only handle COPYSIGN with matching types.");
5974 
5975   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5976   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5977   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5978 
5979   SDValue Mask, VL;
5980   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5981 
5982   SDValue CopySign =
5983       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5984 
5985   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5986 }
5987 
5988 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5989     SDValue Op, SelectionDAG &DAG) const {
5990   MVT VT = Op.getSimpleValueType();
5991   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5992 
5993   MVT I1ContainerVT =
5994       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5995 
5996   SDValue CC =
5997       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5998   SDValue Op1 =
5999       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6000   SDValue Op2 =
6001       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6002 
6003   SDLoc DL(Op);
6004   SDValue Mask, VL;
6005   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6006 
6007   SDValue Select =
6008       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6009 
6010   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6011 }
6012 
6013 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6014                                                unsigned NewOpc,
6015                                                bool HasMask) const {
6016   MVT VT = Op.getSimpleValueType();
6017   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6018 
6019   // Create list of operands by converting existing ones to scalable types.
6020   SmallVector<SDValue, 6> Ops;
6021   for (const SDValue &V : Op->op_values()) {
6022     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6023 
6024     // Pass through non-vector operands.
6025     if (!V.getValueType().isVector()) {
6026       Ops.push_back(V);
6027       continue;
6028     }
6029 
6030     // "cast" fixed length vector to a scalable vector.
6031     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6032            "Only fixed length vectors are supported!");
6033     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6034   }
6035 
6036   SDLoc DL(Op);
6037   SDValue Mask, VL;
6038   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6039   if (HasMask)
6040     Ops.push_back(Mask);
6041   Ops.push_back(VL);
6042 
6043   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6044   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6045 }
6046 
6047 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6048 // * Operands of each node are assumed to be in the same order.
6049 // * The EVL operand is promoted from i32 to i64 on RV64.
6050 // * Fixed-length vectors are converted to their scalable-vector container
6051 //   types.
6052 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6053                                        unsigned RISCVISDOpc) const {
6054   SDLoc DL(Op);
6055   MVT VT = Op.getSimpleValueType();
6056   SmallVector<SDValue, 4> Ops;
6057 
6058   for (const auto &OpIdx : enumerate(Op->ops())) {
6059     SDValue V = OpIdx.value();
6060     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6061     // Pass through operands which aren't fixed-length vectors.
6062     if (!V.getValueType().isFixedLengthVector()) {
6063       Ops.push_back(V);
6064       continue;
6065     }
6066     // "cast" fixed length vector to a scalable vector.
6067     MVT OpVT = V.getSimpleValueType();
6068     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6069     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6070            "Only fixed length vectors are supported!");
6071     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6072   }
6073 
6074   if (!VT.isFixedLengthVector())
6075     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6076 
6077   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6078 
6079   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6080 
6081   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6082 }
6083 
6084 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6085                                               SelectionDAG &DAG) const {
6086   SDLoc DL(Op);
6087   MVT VT = Op.getSimpleValueType();
6088 
6089   SDValue Src = Op.getOperand(0);
6090   // NOTE: Mask is dropped.
6091   SDValue VL = Op.getOperand(2);
6092 
6093   MVT ContainerVT = VT;
6094   if (VT.isFixedLengthVector()) {
6095     ContainerVT = getContainerForFixedLengthVector(VT);
6096     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6097     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6098   }
6099 
6100   MVT XLenVT = Subtarget.getXLenVT();
6101   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6102   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6103                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6104 
6105   SDValue SplatValue =
6106       DAG.getConstant(Op.getOpcode() == ISD::VP_ZEXT ? 1 : -1, DL, XLenVT);
6107   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6108                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6109 
6110   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6111                                Splat, ZeroSplat, VL);
6112   if (!VT.isFixedLengthVector())
6113     return Result;
6114   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6115 }
6116 
6117 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
6118                                                 SelectionDAG &DAG) const {
6119   SDLoc DL(Op);
6120   MVT VT = Op.getSimpleValueType();
6121 
6122   SDValue Op1 = Op.getOperand(0);
6123   SDValue Op2 = Op.getOperand(1);
6124   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6125   // NOTE: Mask is dropped.
6126   SDValue VL = Op.getOperand(4);
6127 
6128   MVT ContainerVT = VT;
6129   if (VT.isFixedLengthVector()) {
6130     ContainerVT = getContainerForFixedLengthVector(VT);
6131     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6132     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6133   }
6134 
6135   SDValue Result;
6136   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
6137 
6138   switch (Condition) {
6139   default:
6140     break;
6141   // X != Y  --> (X^Y)
6142   case ISD::SETNE:
6143     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6144     break;
6145   // X == Y  --> ~(X^Y)
6146   case ISD::SETEQ: {
6147     SDValue Temp =
6148         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6149     Result =
6150         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
6151     break;
6152   }
6153   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
6154   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
6155   case ISD::SETGT:
6156   case ISD::SETULT: {
6157     SDValue Temp =
6158         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6159     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
6160     break;
6161   }
6162   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
6163   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
6164   case ISD::SETLT:
6165   case ISD::SETUGT: {
6166     SDValue Temp =
6167         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6168     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
6169     break;
6170   }
6171   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
6172   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
6173   case ISD::SETGE:
6174   case ISD::SETULE: {
6175     SDValue Temp =
6176         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6177     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
6178     break;
6179   }
6180   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
6181   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
6182   case ISD::SETLE:
6183   case ISD::SETUGE: {
6184     SDValue Temp =
6185         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6186     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
6187     break;
6188   }
6189   }
6190 
6191   if (!VT.isFixedLengthVector())
6192     return Result;
6193   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6194 }
6195 
6196 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6197 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6198                                                 unsigned RISCVISDOpc) const {
6199   SDLoc DL(Op);
6200 
6201   SDValue Src = Op.getOperand(0);
6202   SDValue Mask = Op.getOperand(1);
6203   SDValue VL = Op.getOperand(2);
6204 
6205   MVT DstVT = Op.getSimpleValueType();
6206   MVT SrcVT = Src.getSimpleValueType();
6207   if (DstVT.isFixedLengthVector()) {
6208     DstVT = getContainerForFixedLengthVector(DstVT);
6209     SrcVT = getContainerForFixedLengthVector(SrcVT);
6210     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6211     MVT MaskVT = getMaskTypeFor(DstVT);
6212     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6213   }
6214 
6215   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6216                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6217                                 ? RISCVISD::VSEXT_VL
6218                                 : RISCVISD::VZEXT_VL;
6219 
6220   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6221   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6222 
6223   SDValue Result;
6224   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6225     if (SrcVT.isInteger()) {
6226       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6227 
6228       // Do we need to do any pre-widening before converting?
6229       if (SrcEltSize == 1) {
6230         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6231         MVT XLenVT = Subtarget.getXLenVT();
6232         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6233         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6234                                         DAG.getUNDEF(IntVT), Zero, VL);
6235         SDValue One = DAG.getConstant(
6236             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6237         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6238                                        DAG.getUNDEF(IntVT), One, VL);
6239         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6240                           ZeroSplat, VL);
6241       } else if (DstEltSize > (2 * SrcEltSize)) {
6242         // Widen before converting.
6243         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6244                                      DstVT.getVectorElementCount());
6245         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6246       }
6247 
6248       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6249     } else {
6250       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6251              "Wrong input/output vector types");
6252 
6253       // Convert f16 to f32 then convert f32 to i64.
6254       if (DstEltSize > (2 * SrcEltSize)) {
6255         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6256         MVT InterimFVT =
6257             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6258         Src =
6259             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6260       }
6261 
6262       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6263     }
6264   } else { // Narrowing + Conversion
6265     if (SrcVT.isInteger()) {
6266       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6267       // First do a narrowing convert to an FP type half the size, then round
6268       // the FP type to a small FP type if needed.
6269 
6270       MVT InterimFVT = DstVT;
6271       if (SrcEltSize > (2 * DstEltSize)) {
6272         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6273         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6274         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6275       }
6276 
6277       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6278 
6279       if (InterimFVT != DstVT) {
6280         Src = Result;
6281         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6282       }
6283     } else {
6284       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6285              "Wrong input/output vector types");
6286       // First do a narrowing conversion to an integer half the size, then
6287       // truncate if needed.
6288 
6289       if (DstEltSize == 1) {
6290         // First convert to the same size integer, then convert to mask using
6291         // setcc.
6292         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6293         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6294                                           DstVT.getVectorElementCount());
6295         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6296 
6297         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6298         // otherwise the conversion was undefined.
6299         MVT XLenVT = Subtarget.getXLenVT();
6300         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6301         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6302                                 DAG.getUNDEF(InterimIVT), SplatZero);
6303         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6304                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6305       } else {
6306         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6307                                           DstVT.getVectorElementCount());
6308 
6309         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6310 
6311         while (InterimIVT != DstVT) {
6312           SrcEltSize /= 2;
6313           Src = Result;
6314           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6315                                         DstVT.getVectorElementCount());
6316           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6317                                Src, Mask, VL);
6318         }
6319       }
6320     }
6321   }
6322 
6323   MVT VT = Op.getSimpleValueType();
6324   if (!VT.isFixedLengthVector())
6325     return Result;
6326   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6327 }
6328 
6329 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6330                                             unsigned MaskOpc,
6331                                             unsigned VecOpc) const {
6332   MVT VT = Op.getSimpleValueType();
6333   if (VT.getVectorElementType() != MVT::i1)
6334     return lowerVPOp(Op, DAG, VecOpc);
6335 
6336   // It is safe to drop mask parameter as masked-off elements are undef.
6337   SDValue Op1 = Op->getOperand(0);
6338   SDValue Op2 = Op->getOperand(1);
6339   SDValue VL = Op->getOperand(3);
6340 
6341   MVT ContainerVT = VT;
6342   const bool IsFixed = VT.isFixedLengthVector();
6343   if (IsFixed) {
6344     ContainerVT = getContainerForFixedLengthVector(VT);
6345     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6346     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6347   }
6348 
6349   SDLoc DL(Op);
6350   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6351   if (!IsFixed)
6352     return Val;
6353   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6354 }
6355 
6356 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6357 // matched to a RVV indexed load. The RVV indexed load instructions only
6358 // support the "unsigned unscaled" addressing mode; indices are implicitly
6359 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6360 // signed or scaled indexing is extended to the XLEN value type and scaled
6361 // accordingly.
6362 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6363                                                SelectionDAG &DAG) const {
6364   SDLoc DL(Op);
6365   MVT VT = Op.getSimpleValueType();
6366 
6367   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6368   EVT MemVT = MemSD->getMemoryVT();
6369   MachineMemOperand *MMO = MemSD->getMemOperand();
6370   SDValue Chain = MemSD->getChain();
6371   SDValue BasePtr = MemSD->getBasePtr();
6372 
6373   ISD::LoadExtType LoadExtType;
6374   SDValue Index, Mask, PassThru, VL;
6375 
6376   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6377     Index = VPGN->getIndex();
6378     Mask = VPGN->getMask();
6379     PassThru = DAG.getUNDEF(VT);
6380     VL = VPGN->getVectorLength();
6381     // VP doesn't support extending loads.
6382     LoadExtType = ISD::NON_EXTLOAD;
6383   } else {
6384     // Else it must be a MGATHER.
6385     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6386     Index = MGN->getIndex();
6387     Mask = MGN->getMask();
6388     PassThru = MGN->getPassThru();
6389     LoadExtType = MGN->getExtensionType();
6390   }
6391 
6392   MVT IndexVT = Index.getSimpleValueType();
6393   MVT XLenVT = Subtarget.getXLenVT();
6394 
6395   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6396          "Unexpected VTs!");
6397   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6398   // Targets have to explicitly opt-in for extending vector loads.
6399   assert(LoadExtType == ISD::NON_EXTLOAD &&
6400          "Unexpected extending MGATHER/VP_GATHER");
6401   (void)LoadExtType;
6402 
6403   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6404   // the selection of the masked intrinsics doesn't do this for us.
6405   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6406 
6407   MVT ContainerVT = VT;
6408   if (VT.isFixedLengthVector()) {
6409     // We need to use the larger of the result and index type to determine the
6410     // scalable type to use so we don't increase LMUL for any operand/result.
6411     if (VT.bitsGE(IndexVT)) {
6412       ContainerVT = getContainerForFixedLengthVector(VT);
6413       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6414                                  ContainerVT.getVectorElementCount());
6415     } else {
6416       IndexVT = getContainerForFixedLengthVector(IndexVT);
6417       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6418                                      IndexVT.getVectorElementCount());
6419     }
6420 
6421     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6422 
6423     if (!IsUnmasked) {
6424       MVT MaskVT = getMaskTypeFor(ContainerVT);
6425       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6426       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6427     }
6428   }
6429 
6430   if (!VL)
6431     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6432 
6433   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6434     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6435     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6436                                    VL);
6437     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6438                         TrueMask, VL);
6439   }
6440 
6441   unsigned IntID =
6442       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6443   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6444   if (IsUnmasked)
6445     Ops.push_back(DAG.getUNDEF(ContainerVT));
6446   else
6447     Ops.push_back(PassThru);
6448   Ops.push_back(BasePtr);
6449   Ops.push_back(Index);
6450   if (!IsUnmasked)
6451     Ops.push_back(Mask);
6452   Ops.push_back(VL);
6453   if (!IsUnmasked)
6454     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6455 
6456   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6457   SDValue Result =
6458       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6459   Chain = Result.getValue(1);
6460 
6461   if (VT.isFixedLengthVector())
6462     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6463 
6464   return DAG.getMergeValues({Result, Chain}, DL);
6465 }
6466 
6467 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6468 // matched to a RVV indexed store. The RVV indexed store instructions only
6469 // support the "unsigned unscaled" addressing mode; indices are implicitly
6470 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6471 // signed or scaled indexing is extended to the XLEN value type and scaled
6472 // accordingly.
6473 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6474                                                 SelectionDAG &DAG) const {
6475   SDLoc DL(Op);
6476   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6477   EVT MemVT = MemSD->getMemoryVT();
6478   MachineMemOperand *MMO = MemSD->getMemOperand();
6479   SDValue Chain = MemSD->getChain();
6480   SDValue BasePtr = MemSD->getBasePtr();
6481 
6482   bool IsTruncatingStore = false;
6483   SDValue Index, Mask, Val, VL;
6484 
6485   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6486     Index = VPSN->getIndex();
6487     Mask = VPSN->getMask();
6488     Val = VPSN->getValue();
6489     VL = VPSN->getVectorLength();
6490     // VP doesn't support truncating stores.
6491     IsTruncatingStore = false;
6492   } else {
6493     // Else it must be a MSCATTER.
6494     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6495     Index = MSN->getIndex();
6496     Mask = MSN->getMask();
6497     Val = MSN->getValue();
6498     IsTruncatingStore = MSN->isTruncatingStore();
6499   }
6500 
6501   MVT VT = Val.getSimpleValueType();
6502   MVT IndexVT = Index.getSimpleValueType();
6503   MVT XLenVT = Subtarget.getXLenVT();
6504 
6505   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6506          "Unexpected VTs!");
6507   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6508   // Targets have to explicitly opt-in for extending vector loads and
6509   // truncating vector stores.
6510   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6511   (void)IsTruncatingStore;
6512 
6513   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6514   // the selection of the masked intrinsics doesn't do this for us.
6515   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6516 
6517   MVT ContainerVT = VT;
6518   if (VT.isFixedLengthVector()) {
6519     // We need to use the larger of the value and index type to determine the
6520     // scalable type to use so we don't increase LMUL for any operand/result.
6521     if (VT.bitsGE(IndexVT)) {
6522       ContainerVT = getContainerForFixedLengthVector(VT);
6523       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6524                                  ContainerVT.getVectorElementCount());
6525     } else {
6526       IndexVT = getContainerForFixedLengthVector(IndexVT);
6527       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6528                                      IndexVT.getVectorElementCount());
6529     }
6530 
6531     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6532     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6533 
6534     if (!IsUnmasked) {
6535       MVT MaskVT = getMaskTypeFor(ContainerVT);
6536       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6537     }
6538   }
6539 
6540   if (!VL)
6541     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6542 
6543   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6544     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6545     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6546                                    VL);
6547     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6548                         TrueMask, VL);
6549   }
6550 
6551   unsigned IntID =
6552       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6553   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6554   Ops.push_back(Val);
6555   Ops.push_back(BasePtr);
6556   Ops.push_back(Index);
6557   if (!IsUnmasked)
6558     Ops.push_back(Mask);
6559   Ops.push_back(VL);
6560 
6561   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6562                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6563 }
6564 
6565 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6566                                                SelectionDAG &DAG) const {
6567   const MVT XLenVT = Subtarget.getXLenVT();
6568   SDLoc DL(Op);
6569   SDValue Chain = Op->getOperand(0);
6570   SDValue SysRegNo = DAG.getTargetConstant(
6571       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6572   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6573   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6574 
6575   // Encoding used for rounding mode in RISCV differs from that used in
6576   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6577   // table, which consists of a sequence of 4-bit fields, each representing
6578   // corresponding FLT_ROUNDS mode.
6579   static const int Table =
6580       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6581       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6582       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6583       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6584       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6585 
6586   SDValue Shift =
6587       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6588   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6589                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6590   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6591                                DAG.getConstant(7, DL, XLenVT));
6592 
6593   return DAG.getMergeValues({Masked, Chain}, DL);
6594 }
6595 
6596 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6597                                                SelectionDAG &DAG) const {
6598   const MVT XLenVT = Subtarget.getXLenVT();
6599   SDLoc DL(Op);
6600   SDValue Chain = Op->getOperand(0);
6601   SDValue RMValue = Op->getOperand(1);
6602   SDValue SysRegNo = DAG.getTargetConstant(
6603       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6604 
6605   // Encoding used for rounding mode in RISCV differs from that used in
6606   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6607   // a table, which consists of a sequence of 4-bit fields, each representing
6608   // corresponding RISCV mode.
6609   static const unsigned Table =
6610       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6611       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6612       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6613       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6614       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6615 
6616   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6617                               DAG.getConstant(2, DL, XLenVT));
6618   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6619                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6620   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6621                         DAG.getConstant(0x7, DL, XLenVT));
6622   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6623                      RMValue);
6624 }
6625 
6626 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6627   switch (IntNo) {
6628   default:
6629     llvm_unreachable("Unexpected Intrinsic");
6630   case Intrinsic::riscv_bcompress:
6631     return RISCVISD::BCOMPRESSW;
6632   case Intrinsic::riscv_bdecompress:
6633     return RISCVISD::BDECOMPRESSW;
6634   case Intrinsic::riscv_bfp:
6635     return RISCVISD::BFPW;
6636   case Intrinsic::riscv_fsl:
6637     return RISCVISD::FSLW;
6638   case Intrinsic::riscv_fsr:
6639     return RISCVISD::FSRW;
6640   }
6641 }
6642 
6643 // Converts the given intrinsic to a i64 operation with any extension.
6644 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6645                                          unsigned IntNo) {
6646   SDLoc DL(N);
6647   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6648   // Deal with the Instruction Operands
6649   SmallVector<SDValue, 3> NewOps;
6650   for (SDValue Op : drop_begin(N->ops()))
6651     // Promote the operand to i64 type
6652     NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op));
6653   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps);
6654   // ReplaceNodeResults requires we maintain the same type for the return value.
6655   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6656 }
6657 
6658 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6659 // form of the given Opcode.
6660 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6661   switch (Opcode) {
6662   default:
6663     llvm_unreachable("Unexpected opcode");
6664   case ISD::SHL:
6665     return RISCVISD::SLLW;
6666   case ISD::SRA:
6667     return RISCVISD::SRAW;
6668   case ISD::SRL:
6669     return RISCVISD::SRLW;
6670   case ISD::SDIV:
6671     return RISCVISD::DIVW;
6672   case ISD::UDIV:
6673     return RISCVISD::DIVUW;
6674   case ISD::UREM:
6675     return RISCVISD::REMUW;
6676   case ISD::ROTL:
6677     return RISCVISD::ROLW;
6678   case ISD::ROTR:
6679     return RISCVISD::RORW;
6680   }
6681 }
6682 
6683 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6684 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6685 // otherwise be promoted to i64, making it difficult to select the
6686 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6687 // type i8/i16/i32 is lost.
6688 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6689                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6690   SDLoc DL(N);
6691   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6692   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6693   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6694   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6695   // ReplaceNodeResults requires we maintain the same type for the return value.
6696   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6697 }
6698 
6699 // Converts the given 32-bit operation to a i64 operation with signed extension
6700 // semantic to reduce the signed extension instructions.
6701 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6702   SDLoc DL(N);
6703   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6704   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6705   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6706   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6707                                DAG.getValueType(MVT::i32));
6708   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6709 }
6710 
6711 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6712                                              SmallVectorImpl<SDValue> &Results,
6713                                              SelectionDAG &DAG) const {
6714   SDLoc DL(N);
6715   switch (N->getOpcode()) {
6716   default:
6717     llvm_unreachable("Don't know how to custom type legalize this operation!");
6718   case ISD::STRICT_FP_TO_SINT:
6719   case ISD::STRICT_FP_TO_UINT:
6720   case ISD::FP_TO_SINT:
6721   case ISD::FP_TO_UINT: {
6722     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6723            "Unexpected custom legalisation");
6724     bool IsStrict = N->isStrictFPOpcode();
6725     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6726                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6727     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6728     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6729         TargetLowering::TypeSoftenFloat) {
6730       if (!isTypeLegal(Op0.getValueType()))
6731         return;
6732       if (IsStrict) {
6733         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6734                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6735         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6736         SDValue Res = DAG.getNode(
6737             Opc, DL, VTs, N->getOperand(0), Op0,
6738             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6739         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6740         Results.push_back(Res.getValue(1));
6741         return;
6742       }
6743       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6744       SDValue Res =
6745           DAG.getNode(Opc, DL, MVT::i64, Op0,
6746                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6747       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6748       return;
6749     }
6750     // If the FP type needs to be softened, emit a library call using the 'si'
6751     // version. If we left it to default legalization we'd end up with 'di'. If
6752     // the FP type doesn't need to be softened just let generic type
6753     // legalization promote the result type.
6754     RTLIB::Libcall LC;
6755     if (IsSigned)
6756       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6757     else
6758       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6759     MakeLibCallOptions CallOptions;
6760     EVT OpVT = Op0.getValueType();
6761     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6762     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6763     SDValue Result;
6764     std::tie(Result, Chain) =
6765         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6766     Results.push_back(Result);
6767     if (IsStrict)
6768       Results.push_back(Chain);
6769     break;
6770   }
6771   case ISD::READCYCLECOUNTER: {
6772     assert(!Subtarget.is64Bit() &&
6773            "READCYCLECOUNTER only has custom type legalization on riscv32");
6774 
6775     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6776     SDValue RCW =
6777         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6778 
6779     Results.push_back(
6780         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6781     Results.push_back(RCW.getValue(2));
6782     break;
6783   }
6784   case ISD::MUL: {
6785     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6786     unsigned XLen = Subtarget.getXLen();
6787     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6788     if (Size > XLen) {
6789       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6790       SDValue LHS = N->getOperand(0);
6791       SDValue RHS = N->getOperand(1);
6792       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6793 
6794       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6795       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6796       // We need exactly one side to be unsigned.
6797       if (LHSIsU == RHSIsU)
6798         return;
6799 
6800       auto MakeMULPair = [&](SDValue S, SDValue U) {
6801         MVT XLenVT = Subtarget.getXLenVT();
6802         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6803         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6804         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6805         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6806         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6807       };
6808 
6809       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6810       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6811 
6812       // The other operand should be signed, but still prefer MULH when
6813       // possible.
6814       if (RHSIsU && LHSIsS && !RHSIsS)
6815         Results.push_back(MakeMULPair(LHS, RHS));
6816       else if (LHSIsU && RHSIsS && !LHSIsS)
6817         Results.push_back(MakeMULPair(RHS, LHS));
6818 
6819       return;
6820     }
6821     LLVM_FALLTHROUGH;
6822   }
6823   case ISD::ADD:
6824   case ISD::SUB:
6825     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6826            "Unexpected custom legalisation");
6827     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6828     break;
6829   case ISD::SHL:
6830   case ISD::SRA:
6831   case ISD::SRL:
6832     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6833            "Unexpected custom legalisation");
6834     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6835       // If we can use a BSET instruction, allow default promotion to apply.
6836       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
6837           isOneConstant(N->getOperand(0)))
6838         break;
6839       Results.push_back(customLegalizeToWOp(N, DAG));
6840       break;
6841     }
6842 
6843     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6844     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6845     // shift amount.
6846     if (N->getOpcode() == ISD::SHL) {
6847       SDLoc DL(N);
6848       SDValue NewOp0 =
6849           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6850       SDValue NewOp1 =
6851           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6852       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6853       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6854                                    DAG.getValueType(MVT::i32));
6855       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6856     }
6857 
6858     break;
6859   case ISD::ROTL:
6860   case ISD::ROTR:
6861     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6862            "Unexpected custom legalisation");
6863     Results.push_back(customLegalizeToWOp(N, DAG));
6864     break;
6865   case ISD::CTTZ:
6866   case ISD::CTTZ_ZERO_UNDEF:
6867   case ISD::CTLZ:
6868   case ISD::CTLZ_ZERO_UNDEF: {
6869     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6870            "Unexpected custom legalisation");
6871 
6872     SDValue NewOp0 =
6873         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6874     bool IsCTZ =
6875         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6876     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6877     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6878     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6879     return;
6880   }
6881   case ISD::SDIV:
6882   case ISD::UDIV:
6883   case ISD::UREM: {
6884     MVT VT = N->getSimpleValueType(0);
6885     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6886            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6887            "Unexpected custom legalisation");
6888     // Don't promote division/remainder by constant since we should expand those
6889     // to multiply by magic constant.
6890     // FIXME: What if the expansion is disabled for minsize.
6891     if (N->getOperand(1).getOpcode() == ISD::Constant)
6892       return;
6893 
6894     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6895     // the upper 32 bits. For other types we need to sign or zero extend
6896     // based on the opcode.
6897     unsigned ExtOpc = ISD::ANY_EXTEND;
6898     if (VT != MVT::i32)
6899       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6900                                            : ISD::ZERO_EXTEND;
6901 
6902     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6903     break;
6904   }
6905   case ISD::UADDO:
6906   case ISD::USUBO: {
6907     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6908            "Unexpected custom legalisation");
6909     bool IsAdd = N->getOpcode() == ISD::UADDO;
6910     // Create an ADDW or SUBW.
6911     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6912     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6913     SDValue Res =
6914         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6915     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6916                       DAG.getValueType(MVT::i32));
6917 
6918     SDValue Overflow;
6919     if (IsAdd && isOneConstant(RHS)) {
6920       // Special case uaddo X, 1 overflowed if the addition result is 0.
6921       // The general case (X + C) < C is not necessarily beneficial. Although we
6922       // reduce the live range of X, we may introduce the materialization of
6923       // constant C, especially when the setcc result is used by branch. We have
6924       // no compare with constant and branch instructions.
6925       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6926                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6927     } else {
6928       // Sign extend the LHS and perform an unsigned compare with the ADDW
6929       // result. Since the inputs are sign extended from i32, this is equivalent
6930       // to comparing the lower 32 bits.
6931       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6932       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6933                               IsAdd ? ISD::SETULT : ISD::SETUGT);
6934     }
6935 
6936     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6937     Results.push_back(Overflow);
6938     return;
6939   }
6940   case ISD::UADDSAT:
6941   case ISD::USUBSAT: {
6942     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6943            "Unexpected custom legalisation");
6944     if (Subtarget.hasStdExtZbb()) {
6945       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6946       // sign extend allows overflow of the lower 32 bits to be detected on
6947       // the promoted size.
6948       SDValue LHS =
6949           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6950       SDValue RHS =
6951           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6952       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6953       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6954       return;
6955     }
6956 
6957     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6958     // promotion for UADDO/USUBO.
6959     Results.push_back(expandAddSubSat(N, DAG));
6960     return;
6961   }
6962   case ISD::ABS: {
6963     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6964            "Unexpected custom legalisation");
6965           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6966 
6967     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6968 
6969     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6970 
6971     // Freeze the source so we can increase it's use count.
6972     Src = DAG.getFreeze(Src);
6973 
6974     // Copy sign bit to all bits using the sraiw pattern.
6975     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6976                                    DAG.getValueType(MVT::i32));
6977     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6978                            DAG.getConstant(31, DL, MVT::i64));
6979 
6980     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6981     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6982 
6983     // NOTE: The result is only required to be anyextended, but sext is
6984     // consistent with type legalization of sub.
6985     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6986                          DAG.getValueType(MVT::i32));
6987     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6988     return;
6989   }
6990   case ISD::BITCAST: {
6991     EVT VT = N->getValueType(0);
6992     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6993     SDValue Op0 = N->getOperand(0);
6994     EVT Op0VT = Op0.getValueType();
6995     MVT XLenVT = Subtarget.getXLenVT();
6996     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6997       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6998       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6999     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
7000                Subtarget.hasStdExtF()) {
7001       SDValue FPConv =
7002           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
7003       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
7004     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
7005                isTypeLegal(Op0VT)) {
7006       // Custom-legalize bitcasts from fixed-length vector types to illegal
7007       // scalar types in order to improve codegen. Bitcast the vector to a
7008       // one-element vector type whose element type is the same as the result
7009       // type, and extract the first element.
7010       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
7011       if (isTypeLegal(BVT)) {
7012         SDValue BVec = DAG.getBitcast(BVT, Op0);
7013         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
7014                                       DAG.getConstant(0, DL, XLenVT)));
7015       }
7016     }
7017     break;
7018   }
7019   case RISCVISD::GREV:
7020   case RISCVISD::GORC:
7021   case RISCVISD::SHFL: {
7022     MVT VT = N->getSimpleValueType(0);
7023     MVT XLenVT = Subtarget.getXLenVT();
7024     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
7025            "Unexpected custom legalisation");
7026     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
7027     assert((Subtarget.hasStdExtZbp() ||
7028             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
7029              N->getConstantOperandVal(1) == 7)) &&
7030            "Unexpected extension");
7031     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7032     SDValue NewOp1 =
7033         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7034     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7035     // ReplaceNodeResults requires we maintain the same type for the return
7036     // value.
7037     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7038     break;
7039   }
7040   case ISD::BSWAP:
7041   case ISD::BITREVERSE: {
7042     MVT VT = N->getSimpleValueType(0);
7043     MVT XLenVT = Subtarget.getXLenVT();
7044     assert((VT == MVT::i8 || VT == MVT::i16 ||
7045             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7046            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7047     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7048     unsigned Imm = VT.getSizeInBits() - 1;
7049     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7050     if (N->getOpcode() == ISD::BSWAP)
7051       Imm &= ~0x7U;
7052     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7053                                 DAG.getConstant(Imm, DL, XLenVT));
7054     // ReplaceNodeResults requires we maintain the same type for the return
7055     // value.
7056     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7057     break;
7058   }
7059   case ISD::FSHL:
7060   case ISD::FSHR: {
7061     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7062            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7063     SDValue NewOp0 =
7064         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7065     SDValue NewOp1 =
7066         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7067     SDValue NewShAmt =
7068         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7069     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7070     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7071     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7072                            DAG.getConstant(0x1f, DL, MVT::i64));
7073     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7074     // instruction use different orders. fshl will return its first operand for
7075     // shift of zero, fshr will return its second operand. fsl and fsr both
7076     // return rs1 so the ISD nodes need to have different operand orders.
7077     // Shift amount is in rs2.
7078     unsigned Opc = RISCVISD::FSLW;
7079     if (N->getOpcode() == ISD::FSHR) {
7080       std::swap(NewOp0, NewOp1);
7081       Opc = RISCVISD::FSRW;
7082     }
7083     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7084     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7085     break;
7086   }
7087   case ISD::EXTRACT_VECTOR_ELT: {
7088     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7089     // type is illegal (currently only vXi64 RV32).
7090     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7091     // transferred to the destination register. We issue two of these from the
7092     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7093     // first element.
7094     SDValue Vec = N->getOperand(0);
7095     SDValue Idx = N->getOperand(1);
7096 
7097     // The vector type hasn't been legalized yet so we can't issue target
7098     // specific nodes if it needs legalization.
7099     // FIXME: We would manually legalize if it's important.
7100     if (!isTypeLegal(Vec.getValueType()))
7101       return;
7102 
7103     MVT VecVT = Vec.getSimpleValueType();
7104 
7105     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7106            VecVT.getVectorElementType() == MVT::i64 &&
7107            "Unexpected EXTRACT_VECTOR_ELT legalization");
7108 
7109     // If this is a fixed vector, we need to convert it to a scalable vector.
7110     MVT ContainerVT = VecVT;
7111     if (VecVT.isFixedLengthVector()) {
7112       ContainerVT = getContainerForFixedLengthVector(VecVT);
7113       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7114     }
7115 
7116     MVT XLenVT = Subtarget.getXLenVT();
7117 
7118     // Use a VL of 1 to avoid processing more elements than we need.
7119     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7120     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
7121 
7122     // Unless the index is known to be 0, we must slide the vector down to get
7123     // the desired element into index 0.
7124     if (!isNullConstant(Idx)) {
7125       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7126                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7127     }
7128 
7129     // Extract the lower XLEN bits of the correct vector element.
7130     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7131 
7132     // To extract the upper XLEN bits of the vector element, shift the first
7133     // element right by 32 bits and re-extract the lower XLEN bits.
7134     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7135                                      DAG.getUNDEF(ContainerVT),
7136                                      DAG.getConstant(32, DL, XLenVT), VL);
7137     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7138                                  ThirtyTwoV, Mask, VL);
7139 
7140     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7141 
7142     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7143     break;
7144   }
7145   case ISD::INTRINSIC_WO_CHAIN: {
7146     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7147     switch (IntNo) {
7148     default:
7149       llvm_unreachable(
7150           "Don't know how to custom type legalize this intrinsic!");
7151     case Intrinsic::riscv_grev:
7152     case Intrinsic::riscv_gorc: {
7153       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7154              "Unexpected custom legalisation");
7155       SDValue NewOp1 =
7156           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7157       SDValue NewOp2 =
7158           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7159       unsigned Opc =
7160           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7161       // If the control is a constant, promote the node by clearing any extra
7162       // bits bits in the control. isel will form greviw/gorciw if the result is
7163       // sign extended.
7164       if (isa<ConstantSDNode>(NewOp2)) {
7165         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7166                              DAG.getConstant(0x1f, DL, MVT::i64));
7167         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7168       }
7169       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7170       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7171       break;
7172     }
7173     case Intrinsic::riscv_bcompress:
7174     case Intrinsic::riscv_bdecompress:
7175     case Intrinsic::riscv_bfp:
7176     case Intrinsic::riscv_fsl:
7177     case Intrinsic::riscv_fsr: {
7178       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7179              "Unexpected custom legalisation");
7180       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7181       break;
7182     }
7183     case Intrinsic::riscv_orc_b: {
7184       // Lower to the GORCI encoding for orc.b with the operand extended.
7185       SDValue NewOp =
7186           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7187       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7188                                 DAG.getConstant(7, DL, MVT::i64));
7189       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7190       return;
7191     }
7192     case Intrinsic::riscv_shfl:
7193     case Intrinsic::riscv_unshfl: {
7194       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7195              "Unexpected custom legalisation");
7196       SDValue NewOp1 =
7197           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7198       SDValue NewOp2 =
7199           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7200       unsigned Opc =
7201           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7202       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7203       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7204       // will be shuffled the same way as the lower 32 bit half, but the two
7205       // halves won't cross.
7206       if (isa<ConstantSDNode>(NewOp2)) {
7207         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7208                              DAG.getConstant(0xf, DL, MVT::i64));
7209         Opc =
7210             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7211       }
7212       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7213       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7214       break;
7215     }
7216     case Intrinsic::riscv_vmv_x_s: {
7217       EVT VT = N->getValueType(0);
7218       MVT XLenVT = Subtarget.getXLenVT();
7219       if (VT.bitsLT(XLenVT)) {
7220         // Simple case just extract using vmv.x.s and truncate.
7221         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7222                                       Subtarget.getXLenVT(), N->getOperand(1));
7223         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7224         return;
7225       }
7226 
7227       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7228              "Unexpected custom legalization");
7229 
7230       // We need to do the move in two steps.
7231       SDValue Vec = N->getOperand(1);
7232       MVT VecVT = Vec.getSimpleValueType();
7233 
7234       // First extract the lower XLEN bits of the element.
7235       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7236 
7237       // To extract the upper XLEN bits of the vector element, shift the first
7238       // element right by 32 bits and re-extract the lower XLEN bits.
7239       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7240       SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
7241 
7242       SDValue ThirtyTwoV =
7243           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7244                       DAG.getConstant(32, DL, XLenVT), VL);
7245       SDValue LShr32 =
7246           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7247       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7248 
7249       Results.push_back(
7250           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7251       break;
7252     }
7253     }
7254     break;
7255   }
7256   case ISD::VECREDUCE_ADD:
7257   case ISD::VECREDUCE_AND:
7258   case ISD::VECREDUCE_OR:
7259   case ISD::VECREDUCE_XOR:
7260   case ISD::VECREDUCE_SMAX:
7261   case ISD::VECREDUCE_UMAX:
7262   case ISD::VECREDUCE_SMIN:
7263   case ISD::VECREDUCE_UMIN:
7264     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7265       Results.push_back(V);
7266     break;
7267   case ISD::VP_REDUCE_ADD:
7268   case ISD::VP_REDUCE_AND:
7269   case ISD::VP_REDUCE_OR:
7270   case ISD::VP_REDUCE_XOR:
7271   case ISD::VP_REDUCE_SMAX:
7272   case ISD::VP_REDUCE_UMAX:
7273   case ISD::VP_REDUCE_SMIN:
7274   case ISD::VP_REDUCE_UMIN:
7275     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7276       Results.push_back(V);
7277     break;
7278   case ISD::FLT_ROUNDS_: {
7279     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7280     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7281     Results.push_back(Res.getValue(0));
7282     Results.push_back(Res.getValue(1));
7283     break;
7284   }
7285   }
7286 }
7287 
7288 // A structure to hold one of the bit-manipulation patterns below. Together, a
7289 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7290 //   (or (and (shl x, 1), 0xAAAAAAAA),
7291 //       (and (srl x, 1), 0x55555555))
7292 struct RISCVBitmanipPat {
7293   SDValue Op;
7294   unsigned ShAmt;
7295   bool IsSHL;
7296 
7297   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7298     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7299   }
7300 };
7301 
7302 // Matches patterns of the form
7303 //   (and (shl x, C2), (C1 << C2))
7304 //   (and (srl x, C2), C1)
7305 //   (shl (and x, C1), C2)
7306 //   (srl (and x, (C1 << C2)), C2)
7307 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7308 // The expected masks for each shift amount are specified in BitmanipMasks where
7309 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7310 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7311 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7312 // XLen is 64.
7313 static Optional<RISCVBitmanipPat>
7314 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7315   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7316          "Unexpected number of masks");
7317   Optional<uint64_t> Mask;
7318   // Optionally consume a mask around the shift operation.
7319   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7320     Mask = Op.getConstantOperandVal(1);
7321     Op = Op.getOperand(0);
7322   }
7323   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7324     return None;
7325   bool IsSHL = Op.getOpcode() == ISD::SHL;
7326 
7327   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7328     return None;
7329   uint64_t ShAmt = Op.getConstantOperandVal(1);
7330 
7331   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7332   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7333     return None;
7334   // If we don't have enough masks for 64 bit, then we must be trying to
7335   // match SHFL so we're only allowed to shift 1/4 of the width.
7336   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7337     return None;
7338 
7339   SDValue Src = Op.getOperand(0);
7340 
7341   // The expected mask is shifted left when the AND is found around SHL
7342   // patterns.
7343   //   ((x >> 1) & 0x55555555)
7344   //   ((x << 1) & 0xAAAAAAAA)
7345   bool SHLExpMask = IsSHL;
7346 
7347   if (!Mask) {
7348     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7349     // the mask is all ones: consume that now.
7350     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7351       Mask = Src.getConstantOperandVal(1);
7352       Src = Src.getOperand(0);
7353       // The expected mask is now in fact shifted left for SRL, so reverse the
7354       // decision.
7355       //   ((x & 0xAAAAAAAA) >> 1)
7356       //   ((x & 0x55555555) << 1)
7357       SHLExpMask = !SHLExpMask;
7358     } else {
7359       // Use a default shifted mask of all-ones if there's no AND, truncated
7360       // down to the expected width. This simplifies the logic later on.
7361       Mask = maskTrailingOnes<uint64_t>(Width);
7362       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7363     }
7364   }
7365 
7366   unsigned MaskIdx = Log2_32(ShAmt);
7367   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7368 
7369   if (SHLExpMask)
7370     ExpMask <<= ShAmt;
7371 
7372   if (Mask != ExpMask)
7373     return None;
7374 
7375   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7376 }
7377 
7378 // Matches any of the following bit-manipulation patterns:
7379 //   (and (shl x, 1), (0x55555555 << 1))
7380 //   (and (srl x, 1), 0x55555555)
7381 //   (shl (and x, 0x55555555), 1)
7382 //   (srl (and x, (0x55555555 << 1)), 1)
7383 // where the shift amount and mask may vary thus:
7384 //   [1]  = 0x55555555 / 0xAAAAAAAA
7385 //   [2]  = 0x33333333 / 0xCCCCCCCC
7386 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7387 //   [8]  = 0x00FF00FF / 0xFF00FF00
7388 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7389 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7390 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7391   // These are the unshifted masks which we use to match bit-manipulation
7392   // patterns. They may be shifted left in certain circumstances.
7393   static const uint64_t BitmanipMasks[] = {
7394       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7395       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7396 
7397   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7398 }
7399 
7400 // Try to fold (<bop> x, (reduction.<bop> vec, start))
7401 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
7402   auto BinOpToRVVReduce = [](unsigned Opc) {
7403     switch (Opc) {
7404     default:
7405       llvm_unreachable("Unhandled binary to transfrom reduction");
7406     case ISD::ADD:
7407       return RISCVISD::VECREDUCE_ADD_VL;
7408     case ISD::UMAX:
7409       return RISCVISD::VECREDUCE_UMAX_VL;
7410     case ISD::SMAX:
7411       return RISCVISD::VECREDUCE_SMAX_VL;
7412     case ISD::UMIN:
7413       return RISCVISD::VECREDUCE_UMIN_VL;
7414     case ISD::SMIN:
7415       return RISCVISD::VECREDUCE_SMIN_VL;
7416     case ISD::AND:
7417       return RISCVISD::VECREDUCE_AND_VL;
7418     case ISD::OR:
7419       return RISCVISD::VECREDUCE_OR_VL;
7420     case ISD::XOR:
7421       return RISCVISD::VECREDUCE_XOR_VL;
7422     case ISD::FADD:
7423       return RISCVISD::VECREDUCE_FADD_VL;
7424     case ISD::FMAXNUM:
7425       return RISCVISD::VECREDUCE_FMAX_VL;
7426     case ISD::FMINNUM:
7427       return RISCVISD::VECREDUCE_FMIN_VL;
7428     }
7429   };
7430 
7431   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
7432     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7433            isNullConstant(V.getOperand(1)) &&
7434            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
7435   };
7436 
7437   unsigned Opc = N->getOpcode();
7438   unsigned ReduceIdx;
7439   if (IsReduction(N->getOperand(0), Opc))
7440     ReduceIdx = 0;
7441   else if (IsReduction(N->getOperand(1), Opc))
7442     ReduceIdx = 1;
7443   else
7444     return SDValue();
7445 
7446   // Skip if FADD disallows reassociation but the combiner needs.
7447   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
7448     return SDValue();
7449 
7450   SDValue Extract = N->getOperand(ReduceIdx);
7451   SDValue Reduce = Extract.getOperand(0);
7452   if (!Reduce.hasOneUse())
7453     return SDValue();
7454 
7455   SDValue ScalarV = Reduce.getOperand(2);
7456 
7457   // Make sure that ScalarV is a splat with VL=1.
7458   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
7459       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
7460       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
7461     return SDValue();
7462 
7463   if (!isOneConstant(ScalarV.getOperand(2)))
7464     return SDValue();
7465 
7466   // TODO: Deal with value other than neutral element.
7467   auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) {
7468     if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() &&
7469         isNullFPConstant(V))
7470       return true;
7471     return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(),
7472                                  N->getFlags()) == V;
7473   };
7474 
7475   // Check the scalar of ScalarV is neutral element
7476   if (!IsRVVNeutralElement(N, ScalarV.getOperand(1)))
7477     return SDValue();
7478 
7479   if (!ScalarV.hasOneUse())
7480     return SDValue();
7481 
7482   EVT SplatVT = ScalarV.getValueType();
7483   SDValue NewStart = N->getOperand(1 - ReduceIdx);
7484   unsigned SplatOpc = RISCVISD::VFMV_S_F_VL;
7485   if (SplatVT.isInteger()) {
7486     auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode());
7487     if (!C || C->isZero() || !isInt<5>(C->getSExtValue()))
7488       SplatOpc = RISCVISD::VMV_S_X_VL;
7489     else
7490       SplatOpc = RISCVISD::VMV_V_X_VL;
7491   }
7492 
7493   SDValue NewScalarV =
7494       DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart,
7495                   ScalarV.getOperand(2));
7496   SDValue NewReduce =
7497       DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(),
7498                   Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
7499                   Reduce.getOperand(3), Reduce.getOperand(4));
7500   return DAG.getNode(Extract.getOpcode(), SDLoc(Extract),
7501                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
7502 }
7503 
7504 // Match the following pattern as a GREVI(W) operation
7505 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7506 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7507                                const RISCVSubtarget &Subtarget) {
7508   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7509   EVT VT = Op.getValueType();
7510 
7511   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7512     auto LHS = matchGREVIPat(Op.getOperand(0));
7513     auto RHS = matchGREVIPat(Op.getOperand(1));
7514     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7515       SDLoc DL(Op);
7516       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7517                          DAG.getConstant(LHS->ShAmt, DL, VT));
7518     }
7519   }
7520   return SDValue();
7521 }
7522 
7523 // Matches any the following pattern as a GORCI(W) operation
7524 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7525 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7526 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7527 // Note that with the variant of 3.,
7528 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7529 // the inner pattern will first be matched as GREVI and then the outer
7530 // pattern will be matched to GORC via the first rule above.
7531 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7532 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7533                                const RISCVSubtarget &Subtarget) {
7534   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7535   EVT VT = Op.getValueType();
7536 
7537   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7538     SDLoc DL(Op);
7539     SDValue Op0 = Op.getOperand(0);
7540     SDValue Op1 = Op.getOperand(1);
7541 
7542     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7543       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7544           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7545           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7546         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7547       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7548       if ((Reverse.getOpcode() == ISD::ROTL ||
7549            Reverse.getOpcode() == ISD::ROTR) &&
7550           Reverse.getOperand(0) == X &&
7551           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7552         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7553         if (RotAmt == (VT.getSizeInBits() / 2))
7554           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7555                              DAG.getConstant(RotAmt, DL, VT));
7556       }
7557       return SDValue();
7558     };
7559 
7560     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7561     if (SDValue V = MatchOROfReverse(Op0, Op1))
7562       return V;
7563     if (SDValue V = MatchOROfReverse(Op1, Op0))
7564       return V;
7565 
7566     // OR is commutable so canonicalize its OR operand to the left
7567     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7568       std::swap(Op0, Op1);
7569     if (Op0.getOpcode() != ISD::OR)
7570       return SDValue();
7571     SDValue OrOp0 = Op0.getOperand(0);
7572     SDValue OrOp1 = Op0.getOperand(1);
7573     auto LHS = matchGREVIPat(OrOp0);
7574     // OR is commutable so swap the operands and try again: x might have been
7575     // on the left
7576     if (!LHS) {
7577       std::swap(OrOp0, OrOp1);
7578       LHS = matchGREVIPat(OrOp0);
7579     }
7580     auto RHS = matchGREVIPat(Op1);
7581     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7582       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7583                          DAG.getConstant(LHS->ShAmt, DL, VT));
7584     }
7585   }
7586   return SDValue();
7587 }
7588 
7589 // Matches any of the following bit-manipulation patterns:
7590 //   (and (shl x, 1), (0x22222222 << 1))
7591 //   (and (srl x, 1), 0x22222222)
7592 //   (shl (and x, 0x22222222), 1)
7593 //   (srl (and x, (0x22222222 << 1)), 1)
7594 // where the shift amount and mask may vary thus:
7595 //   [1]  = 0x22222222 / 0x44444444
7596 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7597 //   [4]  = 0x00F000F0 / 0x0F000F00
7598 //   [8]  = 0x0000FF00 / 0x00FF0000
7599 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7600 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7601   // These are the unshifted masks which we use to match bit-manipulation
7602   // patterns. They may be shifted left in certain circumstances.
7603   static const uint64_t BitmanipMasks[] = {
7604       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7605       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7606 
7607   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7608 }
7609 
7610 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7611 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7612                                const RISCVSubtarget &Subtarget) {
7613   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7614   EVT VT = Op.getValueType();
7615 
7616   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7617     return SDValue();
7618 
7619   SDValue Op0 = Op.getOperand(0);
7620   SDValue Op1 = Op.getOperand(1);
7621 
7622   // Or is commutable so canonicalize the second OR to the LHS.
7623   if (Op0.getOpcode() != ISD::OR)
7624     std::swap(Op0, Op1);
7625   if (Op0.getOpcode() != ISD::OR)
7626     return SDValue();
7627 
7628   // We found an inner OR, so our operands are the operands of the inner OR
7629   // and the other operand of the outer OR.
7630   SDValue A = Op0.getOperand(0);
7631   SDValue B = Op0.getOperand(1);
7632   SDValue C = Op1;
7633 
7634   auto Match1 = matchSHFLPat(A);
7635   auto Match2 = matchSHFLPat(B);
7636 
7637   // If neither matched, we failed.
7638   if (!Match1 && !Match2)
7639     return SDValue();
7640 
7641   // We had at least one match. if one failed, try the remaining C operand.
7642   if (!Match1) {
7643     std::swap(A, C);
7644     Match1 = matchSHFLPat(A);
7645     if (!Match1)
7646       return SDValue();
7647   } else if (!Match2) {
7648     std::swap(B, C);
7649     Match2 = matchSHFLPat(B);
7650     if (!Match2)
7651       return SDValue();
7652   }
7653   assert(Match1 && Match2);
7654 
7655   // Make sure our matches pair up.
7656   if (!Match1->formsPairWith(*Match2))
7657     return SDValue();
7658 
7659   // All the remains is to make sure C is an AND with the same input, that masks
7660   // out the bits that are being shuffled.
7661   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7662       C.getOperand(0) != Match1->Op)
7663     return SDValue();
7664 
7665   uint64_t Mask = C.getConstantOperandVal(1);
7666 
7667   static const uint64_t BitmanipMasks[] = {
7668       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7669       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7670   };
7671 
7672   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7673   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7674   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7675 
7676   if (Mask != ExpMask)
7677     return SDValue();
7678 
7679   SDLoc DL(Op);
7680   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7681                      DAG.getConstant(Match1->ShAmt, DL, VT));
7682 }
7683 
7684 // Optimize (add (shl x, c0), (shl y, c1)) ->
7685 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7686 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7687                                   const RISCVSubtarget &Subtarget) {
7688   // Perform this optimization only in the zba extension.
7689   if (!Subtarget.hasStdExtZba())
7690     return SDValue();
7691 
7692   // Skip for vector types and larger types.
7693   EVT VT = N->getValueType(0);
7694   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7695     return SDValue();
7696 
7697   // The two operand nodes must be SHL and have no other use.
7698   SDValue N0 = N->getOperand(0);
7699   SDValue N1 = N->getOperand(1);
7700   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7701       !N0->hasOneUse() || !N1->hasOneUse())
7702     return SDValue();
7703 
7704   // Check c0 and c1.
7705   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7706   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7707   if (!N0C || !N1C)
7708     return SDValue();
7709   int64_t C0 = N0C->getSExtValue();
7710   int64_t C1 = N1C->getSExtValue();
7711   if (C0 <= 0 || C1 <= 0)
7712     return SDValue();
7713 
7714   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7715   int64_t Bits = std::min(C0, C1);
7716   int64_t Diff = std::abs(C0 - C1);
7717   if (Diff != 1 && Diff != 2 && Diff != 3)
7718     return SDValue();
7719 
7720   // Build nodes.
7721   SDLoc DL(N);
7722   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7723   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7724   SDValue NA0 =
7725       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7726   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7727   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7728 }
7729 
7730 // Combine
7731 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7732 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7733 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7734 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7735 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7736 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7737 // The grev patterns represents BSWAP.
7738 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7739 // off the grev.
7740 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7741                                           const RISCVSubtarget &Subtarget) {
7742   bool IsWInstruction =
7743       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7744   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7745           IsWInstruction) &&
7746          "Unexpected opcode!");
7747   SDValue Src = N->getOperand(0);
7748   EVT VT = N->getValueType(0);
7749   SDLoc DL(N);
7750 
7751   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7752     return SDValue();
7753 
7754   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7755       !isa<ConstantSDNode>(Src.getOperand(1)))
7756     return SDValue();
7757 
7758   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7759   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7760 
7761   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7762   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7763   unsigned ShAmt1 = N->getConstantOperandVal(1);
7764   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7765   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7766     return SDValue();
7767 
7768   Src = Src.getOperand(0);
7769 
7770   // Toggle bit the MSB of the shift.
7771   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7772   if (CombinedShAmt == 0)
7773     return Src;
7774 
7775   SDValue Res = DAG.getNode(
7776       RISCVISD::GREV, DL, VT, Src,
7777       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7778   if (!IsWInstruction)
7779     return Res;
7780 
7781   // Sign extend the result to match the behavior of the rotate. This will be
7782   // selected to GREVIW in isel.
7783   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7784                      DAG.getValueType(MVT::i32));
7785 }
7786 
7787 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7788 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7789 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7790 // not undo itself, but they are redundant.
7791 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7792   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7793   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7794   SDValue Src = N->getOperand(0);
7795 
7796   if (Src.getOpcode() != N->getOpcode())
7797     return SDValue();
7798 
7799   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7800       !isa<ConstantSDNode>(Src.getOperand(1)))
7801     return SDValue();
7802 
7803   unsigned ShAmt1 = N->getConstantOperandVal(1);
7804   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7805   Src = Src.getOperand(0);
7806 
7807   unsigned CombinedShAmt;
7808   if (IsGORC)
7809     CombinedShAmt = ShAmt1 | ShAmt2;
7810   else
7811     CombinedShAmt = ShAmt1 ^ ShAmt2;
7812 
7813   if (CombinedShAmt == 0)
7814     return Src;
7815 
7816   SDLoc DL(N);
7817   return DAG.getNode(
7818       N->getOpcode(), DL, N->getValueType(0), Src,
7819       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7820 }
7821 
7822 // Combine a constant select operand into its use:
7823 //
7824 // (and (select cond, -1, c), x)
7825 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7826 // (or  (select cond, 0, c), x)
7827 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7828 // (xor (select cond, 0, c), x)
7829 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7830 // (add (select cond, 0, c), x)
7831 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7832 // (sub x, (select cond, 0, c))
7833 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7834 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7835                                    SelectionDAG &DAG, bool AllOnes) {
7836   EVT VT = N->getValueType(0);
7837 
7838   // Skip vectors.
7839   if (VT.isVector())
7840     return SDValue();
7841 
7842   if ((Slct.getOpcode() != ISD::SELECT &&
7843        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7844       !Slct.hasOneUse())
7845     return SDValue();
7846 
7847   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7848     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7849   };
7850 
7851   bool SwapSelectOps;
7852   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7853   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7854   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7855   SDValue NonConstantVal;
7856   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7857     SwapSelectOps = false;
7858     NonConstantVal = FalseVal;
7859   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7860     SwapSelectOps = true;
7861     NonConstantVal = TrueVal;
7862   } else
7863     return SDValue();
7864 
7865   // Slct is now know to be the desired identity constant when CC is true.
7866   TrueVal = OtherOp;
7867   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7868   // Unless SwapSelectOps says the condition should be false.
7869   if (SwapSelectOps)
7870     std::swap(TrueVal, FalseVal);
7871 
7872   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7873     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7874                        {Slct.getOperand(0), Slct.getOperand(1),
7875                         Slct.getOperand(2), TrueVal, FalseVal});
7876 
7877   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7878                      {Slct.getOperand(0), TrueVal, FalseVal});
7879 }
7880 
7881 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7882 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7883                                               bool AllOnes) {
7884   SDValue N0 = N->getOperand(0);
7885   SDValue N1 = N->getOperand(1);
7886   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7887     return Result;
7888   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7889     return Result;
7890   return SDValue();
7891 }
7892 
7893 // Transform (add (mul x, c0), c1) ->
7894 //           (add (mul (add x, c1/c0), c0), c1%c0).
7895 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7896 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7897 // to an infinite loop in DAGCombine if transformed.
7898 // Or transform (add (mul x, c0), c1) ->
7899 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7900 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7901 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7902 // lead to an infinite loop in DAGCombine if transformed.
7903 // Or transform (add (mul x, c0), c1) ->
7904 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7905 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7906 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7907 // lead to an infinite loop in DAGCombine if transformed.
7908 // Or transform (add (mul x, c0), c1) ->
7909 //              (mul (add x, c1/c0), c0).
7910 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7911 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7912                                      const RISCVSubtarget &Subtarget) {
7913   // Skip for vector types and larger types.
7914   EVT VT = N->getValueType(0);
7915   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7916     return SDValue();
7917   // The first operand node must be a MUL and has no other use.
7918   SDValue N0 = N->getOperand(0);
7919   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7920     return SDValue();
7921   // Check if c0 and c1 match above conditions.
7922   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7923   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7924   if (!N0C || !N1C)
7925     return SDValue();
7926   // If N0C has multiple uses it's possible one of the cases in
7927   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7928   // in an infinite loop.
7929   if (!N0C->hasOneUse())
7930     return SDValue();
7931   int64_t C0 = N0C->getSExtValue();
7932   int64_t C1 = N1C->getSExtValue();
7933   int64_t CA, CB;
7934   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7935     return SDValue();
7936   // Search for proper CA (non-zero) and CB that both are simm12.
7937   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7938       !isInt<12>(C0 * (C1 / C0))) {
7939     CA = C1 / C0;
7940     CB = C1 % C0;
7941   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7942              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7943     CA = C1 / C0 + 1;
7944     CB = C1 % C0 - C0;
7945   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7946              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7947     CA = C1 / C0 - 1;
7948     CB = C1 % C0 + C0;
7949   } else
7950     return SDValue();
7951   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7952   SDLoc DL(N);
7953   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7954                              DAG.getConstant(CA, DL, VT));
7955   SDValue New1 =
7956       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7957   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7958 }
7959 
7960 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7961                                  const RISCVSubtarget &Subtarget) {
7962   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7963     return V;
7964   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7965     return V;
7966   if (SDValue V = combineBinOpToReduce(N, DAG))
7967     return V;
7968   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7969   //      (select lhs, rhs, cc, x, (add x, y))
7970   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7971 }
7972 
7973 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7974   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7975   //      (select lhs, rhs, cc, x, (sub x, y))
7976   SDValue N0 = N->getOperand(0);
7977   SDValue N1 = N->getOperand(1);
7978   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7979 }
7980 
7981 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7982   if (SDValue V = combineBinOpToReduce(N, DAG))
7983     return V;
7984   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7985   //      (select lhs, rhs, cc, x, (and x, y))
7986   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7987 }
7988 
7989 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7990                                 const RISCVSubtarget &Subtarget) {
7991   if (Subtarget.hasStdExtZbp()) {
7992     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7993       return GREV;
7994     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7995       return GORC;
7996     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7997       return SHFL;
7998   }
7999 
8000   if (SDValue V = combineBinOpToReduce(N, DAG))
8001     return V;
8002   // fold (or (select cond, 0, y), x) ->
8003   //      (select cond, x, (or x, y))
8004   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8005 }
8006 
8007 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
8008   SDValue N0 = N->getOperand(0);
8009   SDValue N1 = N->getOperand(1);
8010 
8011   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
8012   // NOTE: Assumes ROL being legal means ROLW is legal.
8013   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8014   if (N0.getOpcode() == RISCVISD::SLLW &&
8015       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
8016       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
8017     SDLoc DL(N);
8018     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
8019                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
8020   }
8021 
8022   if (SDValue V = combineBinOpToReduce(N, DAG))
8023     return V;
8024   // fold (xor (select cond, 0, y), x) ->
8025   //      (select cond, x, (xor x, y))
8026   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8027 }
8028 
8029 static SDValue
8030 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
8031                                 const RISCVSubtarget &Subtarget) {
8032   SDValue Src = N->getOperand(0);
8033   EVT VT = N->getValueType(0);
8034 
8035   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
8036   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8037       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
8038     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
8039                        Src.getOperand(0));
8040 
8041   // Fold (i64 (sext_inreg (abs X), i32)) ->
8042   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
8043   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
8044   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
8045   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
8046   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
8047   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
8048   // may get combined into an earlier operation so we need to use
8049   // ComputeNumSignBits.
8050   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
8051   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
8052   // we can't assume that X has 33 sign bits. We must check.
8053   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
8054       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
8055       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
8056       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
8057     SDLoc DL(N);
8058     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
8059     SDValue Neg =
8060         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
8061     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
8062                       DAG.getValueType(MVT::i32));
8063     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
8064   }
8065 
8066   return SDValue();
8067 }
8068 
8069 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
8070 // vwadd(u).vv/vx or vwsub(u).vv/vx.
8071 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
8072                                              bool Commute = false) {
8073   assert((N->getOpcode() == RISCVISD::ADD_VL ||
8074           N->getOpcode() == RISCVISD::SUB_VL) &&
8075          "Unexpected opcode");
8076   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
8077   SDValue Op0 = N->getOperand(0);
8078   SDValue Op1 = N->getOperand(1);
8079   if (Commute)
8080     std::swap(Op0, Op1);
8081 
8082   MVT VT = N->getSimpleValueType(0);
8083 
8084   // Determine the narrow size for a widening add/sub.
8085   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8086   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8087                                   VT.getVectorElementCount());
8088 
8089   SDValue Mask = N->getOperand(2);
8090   SDValue VL = N->getOperand(3);
8091 
8092   SDLoc DL(N);
8093 
8094   // If the RHS is a sext or zext, we can form a widening op.
8095   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
8096        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
8097       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
8098     unsigned ExtOpc = Op1.getOpcode();
8099     Op1 = Op1.getOperand(0);
8100     // Re-introduce narrower extends if needed.
8101     if (Op1.getValueType() != NarrowVT)
8102       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8103 
8104     unsigned WOpc;
8105     if (ExtOpc == RISCVISD::VSEXT_VL)
8106       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
8107     else
8108       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
8109 
8110     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
8111   }
8112 
8113   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
8114   // sext/zext?
8115 
8116   return SDValue();
8117 }
8118 
8119 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
8120 // vwsub(u).vv/vx.
8121 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
8122   SDValue Op0 = N->getOperand(0);
8123   SDValue Op1 = N->getOperand(1);
8124   SDValue Mask = N->getOperand(2);
8125   SDValue VL = N->getOperand(3);
8126 
8127   MVT VT = N->getSimpleValueType(0);
8128   MVT NarrowVT = Op1.getSimpleValueType();
8129   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
8130 
8131   unsigned VOpc;
8132   switch (N->getOpcode()) {
8133   default: llvm_unreachable("Unexpected opcode");
8134   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
8135   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8136   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8137   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8138   }
8139 
8140   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8141                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8142 
8143   SDLoc DL(N);
8144 
8145   // If the LHS is a sext or zext, we can narrow this op to the same size as
8146   // the RHS.
8147   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8148        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8149       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8150     unsigned ExtOpc = Op0.getOpcode();
8151     Op0 = Op0.getOperand(0);
8152     // Re-introduce narrower extends if needed.
8153     if (Op0.getValueType() != NarrowVT)
8154       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8155     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8156   }
8157 
8158   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8159                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8160 
8161   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8162   // to commute and use a vwadd(u).vx instead.
8163   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8164       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8165     Op0 = Op0.getOperand(1);
8166 
8167     // See if have enough sign bits or zero bits in the scalar to use a
8168     // widening add/sub by splatting to smaller element size.
8169     unsigned EltBits = VT.getScalarSizeInBits();
8170     unsigned ScalarBits = Op0.getValueSizeInBits();
8171     // Make sure we're getting all element bits from the scalar register.
8172     // FIXME: Support implicit sign extension of vmv.v.x?
8173     if (ScalarBits < EltBits)
8174       return SDValue();
8175 
8176     if (IsSigned) {
8177       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8178         return SDValue();
8179     } else {
8180       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8181       if (!DAG.MaskedValueIsZero(Op0, Mask))
8182         return SDValue();
8183     }
8184 
8185     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8186                       DAG.getUNDEF(NarrowVT), Op0, VL);
8187     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8188   }
8189 
8190   return SDValue();
8191 }
8192 
8193 // Try to form VWMUL, VWMULU or VWMULSU.
8194 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8195 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8196                                        bool Commute) {
8197   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8198   SDValue Op0 = N->getOperand(0);
8199   SDValue Op1 = N->getOperand(1);
8200   if (Commute)
8201     std::swap(Op0, Op1);
8202 
8203   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8204   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8205   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8206   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8207     return SDValue();
8208 
8209   SDValue Mask = N->getOperand(2);
8210   SDValue VL = N->getOperand(3);
8211 
8212   // Make sure the mask and VL match.
8213   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8214     return SDValue();
8215 
8216   MVT VT = N->getSimpleValueType(0);
8217 
8218   // Determine the narrow size for a widening multiply.
8219   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8220   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8221                                   VT.getVectorElementCount());
8222 
8223   SDLoc DL(N);
8224 
8225   // See if the other operand is the same opcode.
8226   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8227     if (!Op1.hasOneUse())
8228       return SDValue();
8229 
8230     // Make sure the mask and VL match.
8231     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8232       return SDValue();
8233 
8234     Op1 = Op1.getOperand(0);
8235   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8236     // The operand is a splat of a scalar.
8237 
8238     // The pasthru must be undef for tail agnostic
8239     if (!Op1.getOperand(0).isUndef())
8240       return SDValue();
8241     // The VL must be the same.
8242     if (Op1.getOperand(2) != VL)
8243       return SDValue();
8244 
8245     // Get the scalar value.
8246     Op1 = Op1.getOperand(1);
8247 
8248     // See if have enough sign bits or zero bits in the scalar to use a
8249     // widening multiply by splatting to smaller element size.
8250     unsigned EltBits = VT.getScalarSizeInBits();
8251     unsigned ScalarBits = Op1.getValueSizeInBits();
8252     // Make sure we're getting all element bits from the scalar register.
8253     // FIXME: Support implicit sign extension of vmv.v.x?
8254     if (ScalarBits < EltBits)
8255       return SDValue();
8256 
8257     // If the LHS is a sign extend, try to use vwmul.
8258     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8259       // Can use vwmul.
8260     } else {
8261       // Otherwise try to use vwmulu or vwmulsu.
8262       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8263       if (DAG.MaskedValueIsZero(Op1, Mask))
8264         IsVWMULSU = IsSignExt;
8265       else
8266         return SDValue();
8267     }
8268 
8269     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8270                       DAG.getUNDEF(NarrowVT), Op1, VL);
8271   } else
8272     return SDValue();
8273 
8274   Op0 = Op0.getOperand(0);
8275 
8276   // Re-introduce narrower extends if needed.
8277   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8278   if (Op0.getValueType() != NarrowVT)
8279     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8280   // vwmulsu requires second operand to be zero extended.
8281   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8282   if (Op1.getValueType() != NarrowVT)
8283     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8284 
8285   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8286   if (!IsVWMULSU)
8287     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8288   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8289 }
8290 
8291 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8292   switch (Op.getOpcode()) {
8293   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8294   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8295   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8296   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8297   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8298   }
8299 
8300   return RISCVFPRndMode::Invalid;
8301 }
8302 
8303 // Fold
8304 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8305 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8306 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8307 //   (fp_to_int (fceil X))      -> fcvt X, rup
8308 //   (fp_to_int (fround X))     -> fcvt X, rmm
8309 static SDValue performFP_TO_INTCombine(SDNode *N,
8310                                        TargetLowering::DAGCombinerInfo &DCI,
8311                                        const RISCVSubtarget &Subtarget) {
8312   SelectionDAG &DAG = DCI.DAG;
8313   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8314   MVT XLenVT = Subtarget.getXLenVT();
8315 
8316   // Only handle XLen or i32 types. Other types narrower than XLen will
8317   // eventually be legalized to XLenVT.
8318   EVT VT = N->getValueType(0);
8319   if (VT != MVT::i32 && VT != XLenVT)
8320     return SDValue();
8321 
8322   SDValue Src = N->getOperand(0);
8323 
8324   // Ensure the FP type is also legal.
8325   if (!TLI.isTypeLegal(Src.getValueType()))
8326     return SDValue();
8327 
8328   // Don't do this for f16 with Zfhmin and not Zfh.
8329   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8330     return SDValue();
8331 
8332   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8333   if (FRM == RISCVFPRndMode::Invalid)
8334     return SDValue();
8335 
8336   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8337 
8338   unsigned Opc;
8339   if (VT == XLenVT)
8340     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8341   else
8342     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8343 
8344   SDLoc DL(N);
8345   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8346                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8347   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8348 }
8349 
8350 // Fold
8351 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8352 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8353 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8354 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8355 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8356 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8357                                        TargetLowering::DAGCombinerInfo &DCI,
8358                                        const RISCVSubtarget &Subtarget) {
8359   SelectionDAG &DAG = DCI.DAG;
8360   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8361   MVT XLenVT = Subtarget.getXLenVT();
8362 
8363   // Only handle XLen types. Other types narrower than XLen will eventually be
8364   // legalized to XLenVT.
8365   EVT DstVT = N->getValueType(0);
8366   if (DstVT != XLenVT)
8367     return SDValue();
8368 
8369   SDValue Src = N->getOperand(0);
8370 
8371   // Ensure the FP type is also legal.
8372   if (!TLI.isTypeLegal(Src.getValueType()))
8373     return SDValue();
8374 
8375   // Don't do this for f16 with Zfhmin and not Zfh.
8376   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8377     return SDValue();
8378 
8379   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8380 
8381   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8382   if (FRM == RISCVFPRndMode::Invalid)
8383     return SDValue();
8384 
8385   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8386 
8387   unsigned Opc;
8388   if (SatVT == DstVT)
8389     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8390   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8391     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8392   else
8393     return SDValue();
8394   // FIXME: Support other SatVTs by clamping before or after the conversion.
8395 
8396   Src = Src.getOperand(0);
8397 
8398   SDLoc DL(N);
8399   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8400                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8401 
8402   // RISCV FP-to-int conversions saturate to the destination register size, but
8403   // don't produce 0 for nan.
8404   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8405   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8406 }
8407 
8408 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8409 // smaller than XLenVT.
8410 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8411                                         const RISCVSubtarget &Subtarget) {
8412   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8413 
8414   SDValue Src = N->getOperand(0);
8415   if (Src.getOpcode() != ISD::BSWAP)
8416     return SDValue();
8417 
8418   EVT VT = N->getValueType(0);
8419   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8420       !isPowerOf2_32(VT.getSizeInBits()))
8421     return SDValue();
8422 
8423   SDLoc DL(N);
8424   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8425                      DAG.getConstant(7, DL, VT));
8426 }
8427 
8428 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8429                                                DAGCombinerInfo &DCI) const {
8430   SelectionDAG &DAG = DCI.DAG;
8431 
8432   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8433   // bits are demanded. N will be added to the Worklist if it was not deleted.
8434   // Caller should return SDValue(N, 0) if this returns true.
8435   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8436     SDValue Op = N->getOperand(OpNo);
8437     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8438     if (!SimplifyDemandedBits(Op, Mask, DCI))
8439       return false;
8440 
8441     if (N->getOpcode() != ISD::DELETED_NODE)
8442       DCI.AddToWorklist(N);
8443     return true;
8444   };
8445 
8446   switch (N->getOpcode()) {
8447   default:
8448     break;
8449   case RISCVISD::SplitF64: {
8450     SDValue Op0 = N->getOperand(0);
8451     // If the input to SplitF64 is just BuildPairF64 then the operation is
8452     // redundant. Instead, use BuildPairF64's operands directly.
8453     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8454       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8455 
8456     if (Op0->isUndef()) {
8457       SDValue Lo = DAG.getUNDEF(MVT::i32);
8458       SDValue Hi = DAG.getUNDEF(MVT::i32);
8459       return DCI.CombineTo(N, Lo, Hi);
8460     }
8461 
8462     SDLoc DL(N);
8463 
8464     // It's cheaper to materialise two 32-bit integers than to load a double
8465     // from the constant pool and transfer it to integer registers through the
8466     // stack.
8467     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8468       APInt V = C->getValueAPF().bitcastToAPInt();
8469       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8470       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8471       return DCI.CombineTo(N, Lo, Hi);
8472     }
8473 
8474     // This is a target-specific version of a DAGCombine performed in
8475     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8476     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8477     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8478     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8479         !Op0.getNode()->hasOneUse())
8480       break;
8481     SDValue NewSplitF64 =
8482         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8483                     Op0.getOperand(0));
8484     SDValue Lo = NewSplitF64.getValue(0);
8485     SDValue Hi = NewSplitF64.getValue(1);
8486     APInt SignBit = APInt::getSignMask(32);
8487     if (Op0.getOpcode() == ISD::FNEG) {
8488       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8489                                   DAG.getConstant(SignBit, DL, MVT::i32));
8490       return DCI.CombineTo(N, Lo, NewHi);
8491     }
8492     assert(Op0.getOpcode() == ISD::FABS);
8493     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8494                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8495     return DCI.CombineTo(N, Lo, NewHi);
8496   }
8497   case RISCVISD::SLLW:
8498   case RISCVISD::SRAW:
8499   case RISCVISD::SRLW: {
8500     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8501     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8502         SimplifyDemandedLowBitsHelper(1, 5))
8503       return SDValue(N, 0);
8504 
8505     break;
8506   }
8507   case ISD::ROTR:
8508   case ISD::ROTL:
8509   case RISCVISD::RORW:
8510   case RISCVISD::ROLW: {
8511     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8512       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8513       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8514           SimplifyDemandedLowBitsHelper(1, 5))
8515         return SDValue(N, 0);
8516     }
8517 
8518     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8519   }
8520   case RISCVISD::CLZW:
8521   case RISCVISD::CTZW: {
8522     // Only the lower 32 bits of the first operand are read
8523     if (SimplifyDemandedLowBitsHelper(0, 32))
8524       return SDValue(N, 0);
8525     break;
8526   }
8527   case RISCVISD::GREV:
8528   case RISCVISD::GORC: {
8529     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8530     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8531     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8532     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8533       return SDValue(N, 0);
8534 
8535     return combineGREVI_GORCI(N, DAG);
8536   }
8537   case RISCVISD::GREVW:
8538   case RISCVISD::GORCW: {
8539     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8540     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8541         SimplifyDemandedLowBitsHelper(1, 5))
8542       return SDValue(N, 0);
8543 
8544     break;
8545   }
8546   case RISCVISD::SHFL:
8547   case RISCVISD::UNSHFL: {
8548     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8549     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8550     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8551     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8552       return SDValue(N, 0);
8553 
8554     break;
8555   }
8556   case RISCVISD::SHFLW:
8557   case RISCVISD::UNSHFLW: {
8558     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8559     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8560         SimplifyDemandedLowBitsHelper(1, 4))
8561       return SDValue(N, 0);
8562 
8563     break;
8564   }
8565   case RISCVISD::BCOMPRESSW:
8566   case RISCVISD::BDECOMPRESSW: {
8567     // Only the lower 32 bits of LHS and RHS are read.
8568     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8569         SimplifyDemandedLowBitsHelper(1, 32))
8570       return SDValue(N, 0);
8571 
8572     break;
8573   }
8574   case RISCVISD::FSR:
8575   case RISCVISD::FSL:
8576   case RISCVISD::FSRW:
8577   case RISCVISD::FSLW: {
8578     bool IsWInstruction =
8579         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8580     unsigned BitWidth =
8581         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8582     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8583     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8584     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8585       return SDValue(N, 0);
8586 
8587     break;
8588   }
8589   case RISCVISD::FMV_X_ANYEXTH:
8590   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8591     SDLoc DL(N);
8592     SDValue Op0 = N->getOperand(0);
8593     MVT VT = N->getSimpleValueType(0);
8594     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8595     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8596     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8597     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8598          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8599         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8600          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8601       assert(Op0.getOperand(0).getValueType() == VT &&
8602              "Unexpected value type!");
8603       return Op0.getOperand(0);
8604     }
8605 
8606     // This is a target-specific version of a DAGCombine performed in
8607     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8608     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8609     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8610     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8611         !Op0.getNode()->hasOneUse())
8612       break;
8613     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8614     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8615     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8616     if (Op0.getOpcode() == ISD::FNEG)
8617       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8618                          DAG.getConstant(SignBit, DL, VT));
8619 
8620     assert(Op0.getOpcode() == ISD::FABS);
8621     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8622                        DAG.getConstant(~SignBit, DL, VT));
8623   }
8624   case ISD::ADD:
8625     return performADDCombine(N, DAG, Subtarget);
8626   case ISD::SUB:
8627     return performSUBCombine(N, DAG);
8628   case ISD::AND:
8629     return performANDCombine(N, DAG);
8630   case ISD::OR:
8631     return performORCombine(N, DAG, Subtarget);
8632   case ISD::XOR:
8633     return performXORCombine(N, DAG);
8634   case ISD::FADD:
8635   case ISD::UMAX:
8636   case ISD::UMIN:
8637   case ISD::SMAX:
8638   case ISD::SMIN:
8639   case ISD::FMAXNUM:
8640   case ISD::FMINNUM:
8641     return combineBinOpToReduce(N, DAG);
8642   case ISD::SIGN_EXTEND_INREG:
8643     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8644   case ISD::ZERO_EXTEND:
8645     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8646     // type legalization. This is safe because fp_to_uint produces poison if
8647     // it overflows.
8648     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8649       SDValue Src = N->getOperand(0);
8650       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8651           isTypeLegal(Src.getOperand(0).getValueType()))
8652         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8653                            Src.getOperand(0));
8654       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8655           isTypeLegal(Src.getOperand(1).getValueType())) {
8656         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8657         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8658                                   Src.getOperand(0), Src.getOperand(1));
8659         DCI.CombineTo(N, Res);
8660         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8661         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8662         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8663       }
8664     }
8665     return SDValue();
8666   case RISCVISD::SELECT_CC: {
8667     // Transform
8668     SDValue LHS = N->getOperand(0);
8669     SDValue RHS = N->getOperand(1);
8670     SDValue TrueV = N->getOperand(3);
8671     SDValue FalseV = N->getOperand(4);
8672 
8673     // If the True and False values are the same, we don't need a select_cc.
8674     if (TrueV == FalseV)
8675       return TrueV;
8676 
8677     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8678     if (!ISD::isIntEqualitySetCC(CCVal))
8679       break;
8680 
8681     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8682     //      (select_cc X, Y, lt, trueV, falseV)
8683     // Sometimes the setcc is introduced after select_cc has been formed.
8684     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8685         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8686       // If we're looking for eq 0 instead of ne 0, we need to invert the
8687       // condition.
8688       bool Invert = CCVal == ISD::SETEQ;
8689       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8690       if (Invert)
8691         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8692 
8693       SDLoc DL(N);
8694       RHS = LHS.getOperand(1);
8695       LHS = LHS.getOperand(0);
8696       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8697 
8698       SDValue TargetCC = DAG.getCondCode(CCVal);
8699       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8700                          {LHS, RHS, TargetCC, TrueV, FalseV});
8701     }
8702 
8703     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8704     //      (select_cc X, Y, eq/ne, trueV, falseV)
8705     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8706       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8707                          {LHS.getOperand(0), LHS.getOperand(1),
8708                           N->getOperand(2), TrueV, FalseV});
8709     // (select_cc X, 1, setne, trueV, falseV) ->
8710     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8711     // This can occur when legalizing some floating point comparisons.
8712     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8713     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8714       SDLoc DL(N);
8715       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8716       SDValue TargetCC = DAG.getCondCode(CCVal);
8717       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8718       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8719                          {LHS, RHS, TargetCC, TrueV, FalseV});
8720     }
8721 
8722     break;
8723   }
8724   case RISCVISD::BR_CC: {
8725     SDValue LHS = N->getOperand(1);
8726     SDValue RHS = N->getOperand(2);
8727     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8728     if (!ISD::isIntEqualitySetCC(CCVal))
8729       break;
8730 
8731     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8732     //      (br_cc X, Y, lt, dest)
8733     // Sometimes the setcc is introduced after br_cc has been formed.
8734     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8735         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8736       // If we're looking for eq 0 instead of ne 0, we need to invert the
8737       // condition.
8738       bool Invert = CCVal == ISD::SETEQ;
8739       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8740       if (Invert)
8741         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8742 
8743       SDLoc DL(N);
8744       RHS = LHS.getOperand(1);
8745       LHS = LHS.getOperand(0);
8746       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8747 
8748       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8749                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8750                          N->getOperand(4));
8751     }
8752 
8753     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8754     //      (br_cc X, Y, eq/ne, trueV, falseV)
8755     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8756       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8757                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8758                          N->getOperand(3), N->getOperand(4));
8759 
8760     // (br_cc X, 1, setne, br_cc) ->
8761     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8762     // This can occur when legalizing some floating point comparisons.
8763     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8764     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8765       SDLoc DL(N);
8766       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8767       SDValue TargetCC = DAG.getCondCode(CCVal);
8768       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8769       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8770                          N->getOperand(0), LHS, RHS, TargetCC,
8771                          N->getOperand(4));
8772     }
8773     break;
8774   }
8775   case ISD::BITREVERSE:
8776     return performBITREVERSECombine(N, DAG, Subtarget);
8777   case ISD::FP_TO_SINT:
8778   case ISD::FP_TO_UINT:
8779     return performFP_TO_INTCombine(N, DCI, Subtarget);
8780   case ISD::FP_TO_SINT_SAT:
8781   case ISD::FP_TO_UINT_SAT:
8782     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8783   case ISD::FCOPYSIGN: {
8784     EVT VT = N->getValueType(0);
8785     if (!VT.isVector())
8786       break;
8787     // There is a form of VFSGNJ which injects the negated sign of its second
8788     // operand. Try and bubble any FNEG up after the extend/round to produce
8789     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8790     // TRUNC=1.
8791     SDValue In2 = N->getOperand(1);
8792     // Avoid cases where the extend/round has multiple uses, as duplicating
8793     // those is typically more expensive than removing a fneg.
8794     if (!In2.hasOneUse())
8795       break;
8796     if (In2.getOpcode() != ISD::FP_EXTEND &&
8797         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8798       break;
8799     In2 = In2.getOperand(0);
8800     if (In2.getOpcode() != ISD::FNEG)
8801       break;
8802     SDLoc DL(N);
8803     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8804     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8805                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8806   }
8807   case ISD::MGATHER:
8808   case ISD::MSCATTER:
8809   case ISD::VP_GATHER:
8810   case ISD::VP_SCATTER: {
8811     if (!DCI.isBeforeLegalize())
8812       break;
8813     SDValue Index, ScaleOp;
8814     bool IsIndexScaled = false;
8815     bool IsIndexSigned = false;
8816     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8817       Index = VPGSN->getIndex();
8818       ScaleOp = VPGSN->getScale();
8819       IsIndexScaled = VPGSN->isIndexScaled();
8820       IsIndexSigned = VPGSN->isIndexSigned();
8821     } else {
8822       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8823       Index = MGSN->getIndex();
8824       ScaleOp = MGSN->getScale();
8825       IsIndexScaled = MGSN->isIndexScaled();
8826       IsIndexSigned = MGSN->isIndexSigned();
8827     }
8828     EVT IndexVT = Index.getValueType();
8829     MVT XLenVT = Subtarget.getXLenVT();
8830     // RISCV indexed loads only support the "unsigned unscaled" addressing
8831     // mode, so anything else must be manually legalized.
8832     bool NeedsIdxLegalization =
8833         IsIndexScaled ||
8834         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8835     if (!NeedsIdxLegalization)
8836       break;
8837 
8838     SDLoc DL(N);
8839 
8840     // Any index legalization should first promote to XLenVT, so we don't lose
8841     // bits when scaling. This may create an illegal index type so we let
8842     // LLVM's legalization take care of the splitting.
8843     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8844     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8845       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8846       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8847                           DL, IndexVT, Index);
8848     }
8849 
8850     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8851     if (IsIndexScaled && Scale != 1) {
8852       // Manually scale the indices by the element size.
8853       // TODO: Sanitize the scale operand here?
8854       // TODO: For VP nodes, should we use VP_SHL here?
8855       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8856       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8857       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8858     }
8859 
8860     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8861     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8862       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8863                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8864                               VPGN->getScale(), VPGN->getMask(),
8865                               VPGN->getVectorLength()},
8866                              VPGN->getMemOperand(), NewIndexTy);
8867     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8868       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8869                               {VPSN->getChain(), VPSN->getValue(),
8870                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8871                                VPSN->getMask(), VPSN->getVectorLength()},
8872                               VPSN->getMemOperand(), NewIndexTy);
8873     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8874       return DAG.getMaskedGather(
8875           N->getVTList(), MGN->getMemoryVT(), DL,
8876           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8877            MGN->getBasePtr(), Index, MGN->getScale()},
8878           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8879     const auto *MSN = cast<MaskedScatterSDNode>(N);
8880     return DAG.getMaskedScatter(
8881         N->getVTList(), MSN->getMemoryVT(), DL,
8882         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8883          Index, MSN->getScale()},
8884         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8885   }
8886   case RISCVISD::SRA_VL:
8887   case RISCVISD::SRL_VL:
8888   case RISCVISD::SHL_VL: {
8889     SDValue ShAmt = N->getOperand(1);
8890     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8891       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8892       SDLoc DL(N);
8893       SDValue VL = N->getOperand(3);
8894       EVT VT = N->getValueType(0);
8895       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8896                           ShAmt.getOperand(1), VL);
8897       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8898                          N->getOperand(2), N->getOperand(3));
8899     }
8900     break;
8901   }
8902   case ISD::SRA:
8903   case ISD::SRL:
8904   case ISD::SHL: {
8905     SDValue ShAmt = N->getOperand(1);
8906     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8907       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8908       SDLoc DL(N);
8909       EVT VT = N->getValueType(0);
8910       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8911                           ShAmt.getOperand(1),
8912                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8913       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8914     }
8915     break;
8916   }
8917   case RISCVISD::ADD_VL:
8918     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8919       return V;
8920     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8921   case RISCVISD::SUB_VL:
8922     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8923   case RISCVISD::VWADD_W_VL:
8924   case RISCVISD::VWADDU_W_VL:
8925   case RISCVISD::VWSUB_W_VL:
8926   case RISCVISD::VWSUBU_W_VL:
8927     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8928   case RISCVISD::MUL_VL:
8929     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8930       return V;
8931     // Mul is commutative.
8932     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8933   case ISD::STORE: {
8934     auto *Store = cast<StoreSDNode>(N);
8935     SDValue Val = Store->getValue();
8936     // Combine store of vmv.x.s to vse with VL of 1.
8937     // FIXME: Support FP.
8938     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8939       SDValue Src = Val.getOperand(0);
8940       EVT VecVT = Src.getValueType();
8941       EVT MemVT = Store->getMemoryVT();
8942       // The memory VT and the element type must match.
8943       if (VecVT.getVectorElementType() == MemVT) {
8944         SDLoc DL(N);
8945         MVT MaskVT = getMaskTypeFor(VecVT);
8946         return DAG.getStoreVP(
8947             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8948             DAG.getConstant(1, DL, MaskVT),
8949             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8950             Store->getMemOperand(), Store->getAddressingMode(),
8951             Store->isTruncatingStore(), /*IsCompress*/ false);
8952       }
8953     }
8954 
8955     break;
8956   }
8957   case ISD::SPLAT_VECTOR: {
8958     EVT VT = N->getValueType(0);
8959     // Only perform this combine on legal MVT types.
8960     if (!isTypeLegal(VT))
8961       break;
8962     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8963                                          DAG, Subtarget))
8964       return Gather;
8965     break;
8966   }
8967   case RISCVISD::VMV_V_X_VL: {
8968     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8969     // scalar input.
8970     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8971     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8972     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8973       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8974         return SDValue(N, 0);
8975 
8976     break;
8977   }
8978   case ISD::INTRINSIC_WO_CHAIN: {
8979     unsigned IntNo = N->getConstantOperandVal(0);
8980     switch (IntNo) {
8981       // By default we do not combine any intrinsic.
8982     default:
8983       return SDValue();
8984     case Intrinsic::riscv_vcpop:
8985     case Intrinsic::riscv_vcpop_mask:
8986     case Intrinsic::riscv_vfirst:
8987     case Intrinsic::riscv_vfirst_mask: {
8988       SDValue VL = N->getOperand(2);
8989       if (IntNo == Intrinsic::riscv_vcpop_mask ||
8990           IntNo == Intrinsic::riscv_vfirst_mask)
8991         VL = N->getOperand(3);
8992       if (!isNullConstant(VL))
8993         return SDValue();
8994       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
8995       SDLoc DL(N);
8996       EVT VT = N->getValueType(0);
8997       if (IntNo == Intrinsic::riscv_vfirst ||
8998           IntNo == Intrinsic::riscv_vfirst_mask)
8999         return DAG.getConstant(-1, DL, VT);
9000       return DAG.getConstant(0, DL, VT);
9001     }
9002     }
9003   }
9004   }
9005 
9006   return SDValue();
9007 }
9008 
9009 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
9010     const SDNode *N, CombineLevel Level) const {
9011   // The following folds are only desirable if `(OP _, c1 << c2)` can be
9012   // materialised in fewer instructions than `(OP _, c1)`:
9013   //
9014   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
9015   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
9016   SDValue N0 = N->getOperand(0);
9017   EVT Ty = N0.getValueType();
9018   if (Ty.isScalarInteger() &&
9019       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
9020     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
9021     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
9022     if (C1 && C2) {
9023       const APInt &C1Int = C1->getAPIntValue();
9024       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
9025 
9026       // We can materialise `c1 << c2` into an add immediate, so it's "free",
9027       // and the combine should happen, to potentially allow further combines
9028       // later.
9029       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
9030           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
9031         return true;
9032 
9033       // We can materialise `c1` in an add immediate, so it's "free", and the
9034       // combine should be prevented.
9035       if (C1Int.getMinSignedBits() <= 64 &&
9036           isLegalAddImmediate(C1Int.getSExtValue()))
9037         return false;
9038 
9039       // Neither constant will fit into an immediate, so find materialisation
9040       // costs.
9041       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
9042                                               Subtarget.getFeatureBits(),
9043                                               /*CompressionCost*/true);
9044       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
9045           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
9046           /*CompressionCost*/true);
9047 
9048       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
9049       // combine should be prevented.
9050       if (C1Cost < ShiftedC1Cost)
9051         return false;
9052     }
9053   }
9054   return true;
9055 }
9056 
9057 bool RISCVTargetLowering::targetShrinkDemandedConstant(
9058     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
9059     TargetLoweringOpt &TLO) const {
9060   // Delay this optimization as late as possible.
9061   if (!TLO.LegalOps)
9062     return false;
9063 
9064   EVT VT = Op.getValueType();
9065   if (VT.isVector())
9066     return false;
9067 
9068   // Only handle AND for now.
9069   if (Op.getOpcode() != ISD::AND)
9070     return false;
9071 
9072   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
9073   if (!C)
9074     return false;
9075 
9076   const APInt &Mask = C->getAPIntValue();
9077 
9078   // Clear all non-demanded bits initially.
9079   APInt ShrunkMask = Mask & DemandedBits;
9080 
9081   // Try to make a smaller immediate by setting undemanded bits.
9082 
9083   APInt ExpandedMask = Mask | ~DemandedBits;
9084 
9085   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
9086     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
9087   };
9088   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
9089     if (NewMask == Mask)
9090       return true;
9091     SDLoc DL(Op);
9092     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
9093     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
9094     return TLO.CombineTo(Op, NewOp);
9095   };
9096 
9097   // If the shrunk mask fits in sign extended 12 bits, let the target
9098   // independent code apply it.
9099   if (ShrunkMask.isSignedIntN(12))
9100     return false;
9101 
9102   // Preserve (and X, 0xffff) when zext.h is supported.
9103   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
9104     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
9105     if (IsLegalMask(NewMask))
9106       return UseMask(NewMask);
9107   }
9108 
9109   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
9110   if (VT == MVT::i64) {
9111     APInt NewMask = APInt(64, 0xffffffff);
9112     if (IsLegalMask(NewMask))
9113       return UseMask(NewMask);
9114   }
9115 
9116   // For the remaining optimizations, we need to be able to make a negative
9117   // number through a combination of mask and undemanded bits.
9118   if (!ExpandedMask.isNegative())
9119     return false;
9120 
9121   // What is the fewest number of bits we need to represent the negative number.
9122   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
9123 
9124   // Try to make a 12 bit negative immediate. If that fails try to make a 32
9125   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
9126   APInt NewMask = ShrunkMask;
9127   if (MinSignedBits <= 12)
9128     NewMask.setBitsFrom(11);
9129   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
9130     NewMask.setBitsFrom(31);
9131   else
9132     return false;
9133 
9134   // Check that our new mask is a subset of the demanded mask.
9135   assert(IsLegalMask(NewMask));
9136   return UseMask(NewMask);
9137 }
9138 
9139 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
9140   static const uint64_t GREVMasks[] = {
9141       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
9142       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
9143 
9144   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9145     unsigned Shift = 1 << Stage;
9146     if (ShAmt & Shift) {
9147       uint64_t Mask = GREVMasks[Stage];
9148       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9149       if (IsGORC)
9150         Res |= x;
9151       x = Res;
9152     }
9153   }
9154 
9155   return x;
9156 }
9157 
9158 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9159                                                         KnownBits &Known,
9160                                                         const APInt &DemandedElts,
9161                                                         const SelectionDAG &DAG,
9162                                                         unsigned Depth) const {
9163   unsigned BitWidth = Known.getBitWidth();
9164   unsigned Opc = Op.getOpcode();
9165   assert((Opc >= ISD::BUILTIN_OP_END ||
9166           Opc == ISD::INTRINSIC_WO_CHAIN ||
9167           Opc == ISD::INTRINSIC_W_CHAIN ||
9168           Opc == ISD::INTRINSIC_VOID) &&
9169          "Should use MaskedValueIsZero if you don't know whether Op"
9170          " is a target node!");
9171 
9172   Known.resetAll();
9173   switch (Opc) {
9174   default: break;
9175   case RISCVISD::SELECT_CC: {
9176     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9177     // If we don't know any bits, early out.
9178     if (Known.isUnknown())
9179       break;
9180     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9181 
9182     // Only known if known in both the LHS and RHS.
9183     Known = KnownBits::commonBits(Known, Known2);
9184     break;
9185   }
9186   case RISCVISD::REMUW: {
9187     KnownBits Known2;
9188     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9189     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9190     // We only care about the lower 32 bits.
9191     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9192     // Restore the original width by sign extending.
9193     Known = Known.sext(BitWidth);
9194     break;
9195   }
9196   case RISCVISD::DIVUW: {
9197     KnownBits Known2;
9198     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9199     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9200     // We only care about the lower 32 bits.
9201     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9202     // Restore the original width by sign extending.
9203     Known = Known.sext(BitWidth);
9204     break;
9205   }
9206   case RISCVISD::CTZW: {
9207     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9208     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9209     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9210     Known.Zero.setBitsFrom(LowBits);
9211     break;
9212   }
9213   case RISCVISD::CLZW: {
9214     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9215     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9216     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9217     Known.Zero.setBitsFrom(LowBits);
9218     break;
9219   }
9220   case RISCVISD::GREV:
9221   case RISCVISD::GORC: {
9222     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9223       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9224       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9225       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9226       // To compute zeros, we need to invert the value and invert it back after.
9227       Known.Zero =
9228           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9229       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9230     }
9231     break;
9232   }
9233   case RISCVISD::READ_VLENB: {
9234     // If we know the minimum VLen from Zvl extensions, we can use that to
9235     // determine the trailing zeros of VLENB.
9236     // FIXME: Limit to 128 bit vectors until we have more testing.
9237     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
9238     if (MinVLenB > 0)
9239       Known.Zero.setLowBits(Log2_32(MinVLenB));
9240     // We assume VLENB is no more than 65536 / 8 bytes.
9241     Known.Zero.setBitsFrom(14);
9242     break;
9243   }
9244   case ISD::INTRINSIC_W_CHAIN:
9245   case ISD::INTRINSIC_WO_CHAIN: {
9246     unsigned IntNo =
9247         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9248     switch (IntNo) {
9249     default:
9250       // We can't do anything for most intrinsics.
9251       break;
9252     case Intrinsic::riscv_vsetvli:
9253     case Intrinsic::riscv_vsetvlimax:
9254     case Intrinsic::riscv_vsetvli_opt:
9255     case Intrinsic::riscv_vsetvlimax_opt:
9256       // Assume that VL output is positive and would fit in an int32_t.
9257       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9258       if (BitWidth >= 32)
9259         Known.Zero.setBitsFrom(31);
9260       break;
9261     }
9262     break;
9263   }
9264   }
9265 }
9266 
9267 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9268     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9269     unsigned Depth) const {
9270   switch (Op.getOpcode()) {
9271   default:
9272     break;
9273   case RISCVISD::SELECT_CC: {
9274     unsigned Tmp =
9275         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9276     if (Tmp == 1) return 1;  // Early out.
9277     unsigned Tmp2 =
9278         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9279     return std::min(Tmp, Tmp2);
9280   }
9281   case RISCVISD::SLLW:
9282   case RISCVISD::SRAW:
9283   case RISCVISD::SRLW:
9284   case RISCVISD::DIVW:
9285   case RISCVISD::DIVUW:
9286   case RISCVISD::REMUW:
9287   case RISCVISD::ROLW:
9288   case RISCVISD::RORW:
9289   case RISCVISD::GREVW:
9290   case RISCVISD::GORCW:
9291   case RISCVISD::FSLW:
9292   case RISCVISD::FSRW:
9293   case RISCVISD::SHFLW:
9294   case RISCVISD::UNSHFLW:
9295   case RISCVISD::BCOMPRESSW:
9296   case RISCVISD::BDECOMPRESSW:
9297   case RISCVISD::BFPW:
9298   case RISCVISD::FCVT_W_RV64:
9299   case RISCVISD::FCVT_WU_RV64:
9300   case RISCVISD::STRICT_FCVT_W_RV64:
9301   case RISCVISD::STRICT_FCVT_WU_RV64:
9302     // TODO: As the result is sign-extended, this is conservatively correct. A
9303     // more precise answer could be calculated for SRAW depending on known
9304     // bits in the shift amount.
9305     return 33;
9306   case RISCVISD::SHFL:
9307   case RISCVISD::UNSHFL: {
9308     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9309     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9310     // will stay within the upper 32 bits. If there were more than 32 sign bits
9311     // before there will be at least 33 sign bits after.
9312     if (Op.getValueType() == MVT::i64 &&
9313         isa<ConstantSDNode>(Op.getOperand(1)) &&
9314         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9315       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9316       if (Tmp > 32)
9317         return 33;
9318     }
9319     break;
9320   }
9321   case RISCVISD::VMV_X_S: {
9322     // The number of sign bits of the scalar result is computed by obtaining the
9323     // element type of the input vector operand, subtracting its width from the
9324     // XLEN, and then adding one (sign bit within the element type). If the
9325     // element type is wider than XLen, the least-significant XLEN bits are
9326     // taken.
9327     unsigned XLen = Subtarget.getXLen();
9328     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9329     if (EltBits <= XLen)
9330       return XLen - EltBits + 1;
9331     break;
9332   }
9333   }
9334 
9335   return 1;
9336 }
9337 
9338 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9339                                                   MachineBasicBlock *BB) {
9340   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9341 
9342   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9343   // Should the count have wrapped while it was being read, we need to try
9344   // again.
9345   // ...
9346   // read:
9347   // rdcycleh x3 # load high word of cycle
9348   // rdcycle  x2 # load low word of cycle
9349   // rdcycleh x4 # load high word of cycle
9350   // bne x3, x4, read # check if high word reads match, otherwise try again
9351   // ...
9352 
9353   MachineFunction &MF = *BB->getParent();
9354   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9355   MachineFunction::iterator It = ++BB->getIterator();
9356 
9357   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9358   MF.insert(It, LoopMBB);
9359 
9360   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9361   MF.insert(It, DoneMBB);
9362 
9363   // Transfer the remainder of BB and its successor edges to DoneMBB.
9364   DoneMBB->splice(DoneMBB->begin(), BB,
9365                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9366   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9367 
9368   BB->addSuccessor(LoopMBB);
9369 
9370   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9371   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9372   Register LoReg = MI.getOperand(0).getReg();
9373   Register HiReg = MI.getOperand(1).getReg();
9374   DebugLoc DL = MI.getDebugLoc();
9375 
9376   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9377   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9378       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9379       .addReg(RISCV::X0);
9380   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9381       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9382       .addReg(RISCV::X0);
9383   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9384       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9385       .addReg(RISCV::X0);
9386 
9387   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9388       .addReg(HiReg)
9389       .addReg(ReadAgainReg)
9390       .addMBB(LoopMBB);
9391 
9392   LoopMBB->addSuccessor(LoopMBB);
9393   LoopMBB->addSuccessor(DoneMBB);
9394 
9395   MI.eraseFromParent();
9396 
9397   return DoneMBB;
9398 }
9399 
9400 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9401                                              MachineBasicBlock *BB) {
9402   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9403 
9404   MachineFunction &MF = *BB->getParent();
9405   DebugLoc DL = MI.getDebugLoc();
9406   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9407   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9408   Register LoReg = MI.getOperand(0).getReg();
9409   Register HiReg = MI.getOperand(1).getReg();
9410   Register SrcReg = MI.getOperand(2).getReg();
9411   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9412   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9413 
9414   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9415                           RI);
9416   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9417   MachineMemOperand *MMOLo =
9418       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9419   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9420       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9421   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9422       .addFrameIndex(FI)
9423       .addImm(0)
9424       .addMemOperand(MMOLo);
9425   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9426       .addFrameIndex(FI)
9427       .addImm(4)
9428       .addMemOperand(MMOHi);
9429   MI.eraseFromParent(); // The pseudo instruction is gone now.
9430   return BB;
9431 }
9432 
9433 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9434                                                  MachineBasicBlock *BB) {
9435   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9436          "Unexpected instruction");
9437 
9438   MachineFunction &MF = *BB->getParent();
9439   DebugLoc DL = MI.getDebugLoc();
9440   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9441   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9442   Register DstReg = MI.getOperand(0).getReg();
9443   Register LoReg = MI.getOperand(1).getReg();
9444   Register HiReg = MI.getOperand(2).getReg();
9445   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9446   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9447 
9448   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9449   MachineMemOperand *MMOLo =
9450       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9451   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9452       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9453   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9454       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9455       .addFrameIndex(FI)
9456       .addImm(0)
9457       .addMemOperand(MMOLo);
9458   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9459       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9460       .addFrameIndex(FI)
9461       .addImm(4)
9462       .addMemOperand(MMOHi);
9463   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9464   MI.eraseFromParent(); // The pseudo instruction is gone now.
9465   return BB;
9466 }
9467 
9468 static bool isSelectPseudo(MachineInstr &MI) {
9469   switch (MI.getOpcode()) {
9470   default:
9471     return false;
9472   case RISCV::Select_GPR_Using_CC_GPR:
9473   case RISCV::Select_FPR16_Using_CC_GPR:
9474   case RISCV::Select_FPR32_Using_CC_GPR:
9475   case RISCV::Select_FPR64_Using_CC_GPR:
9476     return true;
9477   }
9478 }
9479 
9480 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9481                                         unsigned RelOpcode, unsigned EqOpcode,
9482                                         const RISCVSubtarget &Subtarget) {
9483   DebugLoc DL = MI.getDebugLoc();
9484   Register DstReg = MI.getOperand(0).getReg();
9485   Register Src1Reg = MI.getOperand(1).getReg();
9486   Register Src2Reg = MI.getOperand(2).getReg();
9487   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9488   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9489   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9490 
9491   // Save the current FFLAGS.
9492   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9493 
9494   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9495                  .addReg(Src1Reg)
9496                  .addReg(Src2Reg);
9497   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9498     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9499 
9500   // Restore the FFLAGS.
9501   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9502       .addReg(SavedFFlags, RegState::Kill);
9503 
9504   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9505   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9506                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9507                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9508   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9509     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9510 
9511   // Erase the pseudoinstruction.
9512   MI.eraseFromParent();
9513   return BB;
9514 }
9515 
9516 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9517                                            MachineBasicBlock *BB,
9518                                            const RISCVSubtarget &Subtarget) {
9519   // To "insert" Select_* instructions, we actually have to insert the triangle
9520   // control-flow pattern.  The incoming instructions know the destination vreg
9521   // to set, the condition code register to branch on, the true/false values to
9522   // select between, and the condcode to use to select the appropriate branch.
9523   //
9524   // We produce the following control flow:
9525   //     HeadMBB
9526   //     |  \
9527   //     |  IfFalseMBB
9528   //     | /
9529   //    TailMBB
9530   //
9531   // When we find a sequence of selects we attempt to optimize their emission
9532   // by sharing the control flow. Currently we only handle cases where we have
9533   // multiple selects with the exact same condition (same LHS, RHS and CC).
9534   // The selects may be interleaved with other instructions if the other
9535   // instructions meet some requirements we deem safe:
9536   // - They are debug instructions. Otherwise,
9537   // - They do not have side-effects, do not access memory and their inputs do
9538   //   not depend on the results of the select pseudo-instructions.
9539   // The TrueV/FalseV operands of the selects cannot depend on the result of
9540   // previous selects in the sequence.
9541   // These conditions could be further relaxed. See the X86 target for a
9542   // related approach and more information.
9543   Register LHS = MI.getOperand(1).getReg();
9544   Register RHS = MI.getOperand(2).getReg();
9545   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9546 
9547   SmallVector<MachineInstr *, 4> SelectDebugValues;
9548   SmallSet<Register, 4> SelectDests;
9549   SelectDests.insert(MI.getOperand(0).getReg());
9550 
9551   MachineInstr *LastSelectPseudo = &MI;
9552 
9553   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9554        SequenceMBBI != E; ++SequenceMBBI) {
9555     if (SequenceMBBI->isDebugInstr())
9556       continue;
9557     else if (isSelectPseudo(*SequenceMBBI)) {
9558       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9559           SequenceMBBI->getOperand(2).getReg() != RHS ||
9560           SequenceMBBI->getOperand(3).getImm() != CC ||
9561           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9562           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9563         break;
9564       LastSelectPseudo = &*SequenceMBBI;
9565       SequenceMBBI->collectDebugValues(SelectDebugValues);
9566       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9567     } else {
9568       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9569           SequenceMBBI->mayLoadOrStore())
9570         break;
9571       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9572             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9573           }))
9574         break;
9575     }
9576   }
9577 
9578   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9579   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9580   DebugLoc DL = MI.getDebugLoc();
9581   MachineFunction::iterator I = ++BB->getIterator();
9582 
9583   MachineBasicBlock *HeadMBB = BB;
9584   MachineFunction *F = BB->getParent();
9585   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9586   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9587 
9588   F->insert(I, IfFalseMBB);
9589   F->insert(I, TailMBB);
9590 
9591   // Transfer debug instructions associated with the selects to TailMBB.
9592   for (MachineInstr *DebugInstr : SelectDebugValues) {
9593     TailMBB->push_back(DebugInstr->removeFromParent());
9594   }
9595 
9596   // Move all instructions after the sequence to TailMBB.
9597   TailMBB->splice(TailMBB->end(), HeadMBB,
9598                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9599   // Update machine-CFG edges by transferring all successors of the current
9600   // block to the new block which will contain the Phi nodes for the selects.
9601   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9602   // Set the successors for HeadMBB.
9603   HeadMBB->addSuccessor(IfFalseMBB);
9604   HeadMBB->addSuccessor(TailMBB);
9605 
9606   // Insert appropriate branch.
9607   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9608     .addReg(LHS)
9609     .addReg(RHS)
9610     .addMBB(TailMBB);
9611 
9612   // IfFalseMBB just falls through to TailMBB.
9613   IfFalseMBB->addSuccessor(TailMBB);
9614 
9615   // Create PHIs for all of the select pseudo-instructions.
9616   auto SelectMBBI = MI.getIterator();
9617   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9618   auto InsertionPoint = TailMBB->begin();
9619   while (SelectMBBI != SelectEnd) {
9620     auto Next = std::next(SelectMBBI);
9621     if (isSelectPseudo(*SelectMBBI)) {
9622       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9623       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9624               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9625           .addReg(SelectMBBI->getOperand(4).getReg())
9626           .addMBB(HeadMBB)
9627           .addReg(SelectMBBI->getOperand(5).getReg())
9628           .addMBB(IfFalseMBB);
9629       SelectMBBI->eraseFromParent();
9630     }
9631     SelectMBBI = Next;
9632   }
9633 
9634   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9635   return TailMBB;
9636 }
9637 
9638 MachineBasicBlock *
9639 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9640                                                  MachineBasicBlock *BB) const {
9641   switch (MI.getOpcode()) {
9642   default:
9643     llvm_unreachable("Unexpected instr type to insert");
9644   case RISCV::ReadCycleWide:
9645     assert(!Subtarget.is64Bit() &&
9646            "ReadCycleWrite is only to be used on riscv32");
9647     return emitReadCycleWidePseudo(MI, BB);
9648   case RISCV::Select_GPR_Using_CC_GPR:
9649   case RISCV::Select_FPR16_Using_CC_GPR:
9650   case RISCV::Select_FPR32_Using_CC_GPR:
9651   case RISCV::Select_FPR64_Using_CC_GPR:
9652     return emitSelectPseudo(MI, BB, Subtarget);
9653   case RISCV::BuildPairF64Pseudo:
9654     return emitBuildPairF64Pseudo(MI, BB);
9655   case RISCV::SplitF64Pseudo:
9656     return emitSplitF64Pseudo(MI, BB);
9657   case RISCV::PseudoQuietFLE_H:
9658     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9659   case RISCV::PseudoQuietFLT_H:
9660     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9661   case RISCV::PseudoQuietFLE_S:
9662     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9663   case RISCV::PseudoQuietFLT_S:
9664     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9665   case RISCV::PseudoQuietFLE_D:
9666     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9667   case RISCV::PseudoQuietFLT_D:
9668     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9669   }
9670 }
9671 
9672 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9673                                                         SDNode *Node) const {
9674   // Add FRM dependency to any instructions with dynamic rounding mode.
9675   unsigned Opc = MI.getOpcode();
9676   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9677   if (Idx < 0)
9678     return;
9679   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9680     return;
9681   // If the instruction already reads FRM, don't add another read.
9682   if (MI.readsRegister(RISCV::FRM))
9683     return;
9684   MI.addOperand(
9685       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9686 }
9687 
9688 // Calling Convention Implementation.
9689 // The expectations for frontend ABI lowering vary from target to target.
9690 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9691 // details, but this is a longer term goal. For now, we simply try to keep the
9692 // role of the frontend as simple and well-defined as possible. The rules can
9693 // be summarised as:
9694 // * Never split up large scalar arguments. We handle them here.
9695 // * If a hardfloat calling convention is being used, and the struct may be
9696 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9697 // available, then pass as two separate arguments. If either the GPRs or FPRs
9698 // are exhausted, then pass according to the rule below.
9699 // * If a struct could never be passed in registers or directly in a stack
9700 // slot (as it is larger than 2*XLEN and the floating point rules don't
9701 // apply), then pass it using a pointer with the byval attribute.
9702 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9703 // word-sized array or a 2*XLEN scalar (depending on alignment).
9704 // * The frontend can determine whether a struct is returned by reference or
9705 // not based on its size and fields. If it will be returned by reference, the
9706 // frontend must modify the prototype so a pointer with the sret annotation is
9707 // passed as the first argument. This is not necessary for large scalar
9708 // returns.
9709 // * Struct return values and varargs should be coerced to structs containing
9710 // register-size fields in the same situations they would be for fixed
9711 // arguments.
9712 
9713 static const MCPhysReg ArgGPRs[] = {
9714   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9715   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9716 };
9717 static const MCPhysReg ArgFPR16s[] = {
9718   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9719   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9720 };
9721 static const MCPhysReg ArgFPR32s[] = {
9722   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9723   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9724 };
9725 static const MCPhysReg ArgFPR64s[] = {
9726   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9727   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9728 };
9729 // This is an interim calling convention and it may be changed in the future.
9730 static const MCPhysReg ArgVRs[] = {
9731     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9732     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9733     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9734 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9735                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9736                                      RISCV::V20M2, RISCV::V22M2};
9737 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9738                                      RISCV::V20M4};
9739 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9740 
9741 // Pass a 2*XLEN argument that has been split into two XLEN values through
9742 // registers or the stack as necessary.
9743 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9744                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9745                                 MVT ValVT2, MVT LocVT2,
9746                                 ISD::ArgFlagsTy ArgFlags2) {
9747   unsigned XLenInBytes = XLen / 8;
9748   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9749     // At least one half can be passed via register.
9750     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9751                                      VA1.getLocVT(), CCValAssign::Full));
9752   } else {
9753     // Both halves must be passed on the stack, with proper alignment.
9754     Align StackAlign =
9755         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9756     State.addLoc(
9757         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9758                             State.AllocateStack(XLenInBytes, StackAlign),
9759                             VA1.getLocVT(), CCValAssign::Full));
9760     State.addLoc(CCValAssign::getMem(
9761         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9762         LocVT2, CCValAssign::Full));
9763     return false;
9764   }
9765 
9766   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9767     // The second half can also be passed via register.
9768     State.addLoc(
9769         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9770   } else {
9771     // The second half is passed via the stack, without additional alignment.
9772     State.addLoc(CCValAssign::getMem(
9773         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9774         LocVT2, CCValAssign::Full));
9775   }
9776 
9777   return false;
9778 }
9779 
9780 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9781                                Optional<unsigned> FirstMaskArgument,
9782                                CCState &State, const RISCVTargetLowering &TLI) {
9783   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9784   if (RC == &RISCV::VRRegClass) {
9785     // Assign the first mask argument to V0.
9786     // This is an interim calling convention and it may be changed in the
9787     // future.
9788     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9789       return State.AllocateReg(RISCV::V0);
9790     return State.AllocateReg(ArgVRs);
9791   }
9792   if (RC == &RISCV::VRM2RegClass)
9793     return State.AllocateReg(ArgVRM2s);
9794   if (RC == &RISCV::VRM4RegClass)
9795     return State.AllocateReg(ArgVRM4s);
9796   if (RC == &RISCV::VRM8RegClass)
9797     return State.AllocateReg(ArgVRM8s);
9798   llvm_unreachable("Unhandled register class for ValueType");
9799 }
9800 
9801 // Implements the RISC-V calling convention. Returns true upon failure.
9802 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9803                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9804                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9805                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9806                      Optional<unsigned> FirstMaskArgument) {
9807   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9808   assert(XLen == 32 || XLen == 64);
9809   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9810 
9811   // Any return value split in to more than two values can't be returned
9812   // directly. Vectors are returned via the available vector registers.
9813   if (!LocVT.isVector() && IsRet && ValNo > 1)
9814     return true;
9815 
9816   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9817   // variadic argument, or if no F16/F32 argument registers are available.
9818   bool UseGPRForF16_F32 = true;
9819   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9820   // variadic argument, or if no F64 argument registers are available.
9821   bool UseGPRForF64 = true;
9822 
9823   switch (ABI) {
9824   default:
9825     llvm_unreachable("Unexpected ABI");
9826   case RISCVABI::ABI_ILP32:
9827   case RISCVABI::ABI_LP64:
9828     break;
9829   case RISCVABI::ABI_ILP32F:
9830   case RISCVABI::ABI_LP64F:
9831     UseGPRForF16_F32 = !IsFixed;
9832     break;
9833   case RISCVABI::ABI_ILP32D:
9834   case RISCVABI::ABI_LP64D:
9835     UseGPRForF16_F32 = !IsFixed;
9836     UseGPRForF64 = !IsFixed;
9837     break;
9838   }
9839 
9840   // FPR16, FPR32, and FPR64 alias each other.
9841   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9842     UseGPRForF16_F32 = true;
9843     UseGPRForF64 = true;
9844   }
9845 
9846   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9847   // similar local variables rather than directly checking against the target
9848   // ABI.
9849 
9850   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9851     LocVT = XLenVT;
9852     LocInfo = CCValAssign::BCvt;
9853   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9854     LocVT = MVT::i64;
9855     LocInfo = CCValAssign::BCvt;
9856   }
9857 
9858   // If this is a variadic argument, the RISC-V calling convention requires
9859   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9860   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9861   // be used regardless of whether the original argument was split during
9862   // legalisation or not. The argument will not be passed by registers if the
9863   // original type is larger than 2*XLEN, so the register alignment rule does
9864   // not apply.
9865   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9866   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9867       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9868     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9869     // Skip 'odd' register if necessary.
9870     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9871       State.AllocateReg(ArgGPRs);
9872   }
9873 
9874   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9875   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9876       State.getPendingArgFlags();
9877 
9878   assert(PendingLocs.size() == PendingArgFlags.size() &&
9879          "PendingLocs and PendingArgFlags out of sync");
9880 
9881   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9882   // registers are exhausted.
9883   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9884     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9885            "Can't lower f64 if it is split");
9886     // Depending on available argument GPRS, f64 may be passed in a pair of
9887     // GPRs, split between a GPR and the stack, or passed completely on the
9888     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9889     // cases.
9890     Register Reg = State.AllocateReg(ArgGPRs);
9891     LocVT = MVT::i32;
9892     if (!Reg) {
9893       unsigned StackOffset = State.AllocateStack(8, Align(8));
9894       State.addLoc(
9895           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9896       return false;
9897     }
9898     if (!State.AllocateReg(ArgGPRs))
9899       State.AllocateStack(4, Align(4));
9900     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9901     return false;
9902   }
9903 
9904   // Fixed-length vectors are located in the corresponding scalable-vector
9905   // container types.
9906   if (ValVT.isFixedLengthVector())
9907     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9908 
9909   // Split arguments might be passed indirectly, so keep track of the pending
9910   // values. Split vectors are passed via a mix of registers and indirectly, so
9911   // treat them as we would any other argument.
9912   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9913     LocVT = XLenVT;
9914     LocInfo = CCValAssign::Indirect;
9915     PendingLocs.push_back(
9916         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9917     PendingArgFlags.push_back(ArgFlags);
9918     if (!ArgFlags.isSplitEnd()) {
9919       return false;
9920     }
9921   }
9922 
9923   // If the split argument only had two elements, it should be passed directly
9924   // in registers or on the stack.
9925   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9926       PendingLocs.size() <= 2) {
9927     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9928     // Apply the normal calling convention rules to the first half of the
9929     // split argument.
9930     CCValAssign VA = PendingLocs[0];
9931     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9932     PendingLocs.clear();
9933     PendingArgFlags.clear();
9934     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9935                                ArgFlags);
9936   }
9937 
9938   // Allocate to a register if possible, or else a stack slot.
9939   Register Reg;
9940   unsigned StoreSizeBytes = XLen / 8;
9941   Align StackAlign = Align(XLen / 8);
9942 
9943   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9944     Reg = State.AllocateReg(ArgFPR16s);
9945   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9946     Reg = State.AllocateReg(ArgFPR32s);
9947   else if (ValVT == MVT::f64 && !UseGPRForF64)
9948     Reg = State.AllocateReg(ArgFPR64s);
9949   else if (ValVT.isVector()) {
9950     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9951     if (!Reg) {
9952       // For return values, the vector must be passed fully via registers or
9953       // via the stack.
9954       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9955       // but we're using all of them.
9956       if (IsRet)
9957         return true;
9958       // Try using a GPR to pass the address
9959       if ((Reg = State.AllocateReg(ArgGPRs))) {
9960         LocVT = XLenVT;
9961         LocInfo = CCValAssign::Indirect;
9962       } else if (ValVT.isScalableVector()) {
9963         LocVT = XLenVT;
9964         LocInfo = CCValAssign::Indirect;
9965       } else {
9966         // Pass fixed-length vectors on the stack.
9967         LocVT = ValVT;
9968         StoreSizeBytes = ValVT.getStoreSize();
9969         // Align vectors to their element sizes, being careful for vXi1
9970         // vectors.
9971         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9972       }
9973     }
9974   } else {
9975     Reg = State.AllocateReg(ArgGPRs);
9976   }
9977 
9978   unsigned StackOffset =
9979       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9980 
9981   // If we reach this point and PendingLocs is non-empty, we must be at the
9982   // end of a split argument that must be passed indirectly.
9983   if (!PendingLocs.empty()) {
9984     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9985     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9986 
9987     for (auto &It : PendingLocs) {
9988       if (Reg)
9989         It.convertToReg(Reg);
9990       else
9991         It.convertToMem(StackOffset);
9992       State.addLoc(It);
9993     }
9994     PendingLocs.clear();
9995     PendingArgFlags.clear();
9996     return false;
9997   }
9998 
9999   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
10000           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
10001          "Expected an XLenVT or vector types at this stage");
10002 
10003   if (Reg) {
10004     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10005     return false;
10006   }
10007 
10008   // When a floating-point value is passed on the stack, no bit-conversion is
10009   // needed.
10010   if (ValVT.isFloatingPoint()) {
10011     LocVT = ValVT;
10012     LocInfo = CCValAssign::Full;
10013   }
10014   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10015   return false;
10016 }
10017 
10018 template <typename ArgTy>
10019 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
10020   for (const auto &ArgIdx : enumerate(Args)) {
10021     MVT ArgVT = ArgIdx.value().VT;
10022     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
10023       return ArgIdx.index();
10024   }
10025   return None;
10026 }
10027 
10028 void RISCVTargetLowering::analyzeInputArgs(
10029     MachineFunction &MF, CCState &CCInfo,
10030     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
10031     RISCVCCAssignFn Fn) const {
10032   unsigned NumArgs = Ins.size();
10033   FunctionType *FType = MF.getFunction().getFunctionType();
10034 
10035   Optional<unsigned> FirstMaskArgument;
10036   if (Subtarget.hasVInstructions())
10037     FirstMaskArgument = preAssignMask(Ins);
10038 
10039   for (unsigned i = 0; i != NumArgs; ++i) {
10040     MVT ArgVT = Ins[i].VT;
10041     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
10042 
10043     Type *ArgTy = nullptr;
10044     if (IsRet)
10045       ArgTy = FType->getReturnType();
10046     else if (Ins[i].isOrigArg())
10047       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
10048 
10049     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10050     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10051            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
10052            FirstMaskArgument)) {
10053       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
10054                         << EVT(ArgVT).getEVTString() << '\n');
10055       llvm_unreachable(nullptr);
10056     }
10057   }
10058 }
10059 
10060 void RISCVTargetLowering::analyzeOutputArgs(
10061     MachineFunction &MF, CCState &CCInfo,
10062     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
10063     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
10064   unsigned NumArgs = Outs.size();
10065 
10066   Optional<unsigned> FirstMaskArgument;
10067   if (Subtarget.hasVInstructions())
10068     FirstMaskArgument = preAssignMask(Outs);
10069 
10070   for (unsigned i = 0; i != NumArgs; i++) {
10071     MVT ArgVT = Outs[i].VT;
10072     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10073     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
10074 
10075     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10076     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10077            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
10078            FirstMaskArgument)) {
10079       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
10080                         << EVT(ArgVT).getEVTString() << "\n");
10081       llvm_unreachable(nullptr);
10082     }
10083   }
10084 }
10085 
10086 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
10087 // values.
10088 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
10089                                    const CCValAssign &VA, const SDLoc &DL,
10090                                    const RISCVSubtarget &Subtarget) {
10091   switch (VA.getLocInfo()) {
10092   default:
10093     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10094   case CCValAssign::Full:
10095     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
10096       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
10097     break;
10098   case CCValAssign::BCvt:
10099     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10100       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
10101     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10102       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
10103     else
10104       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
10105     break;
10106   }
10107   return Val;
10108 }
10109 
10110 // The caller is responsible for loading the full value if the argument is
10111 // passed with CCValAssign::Indirect.
10112 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
10113                                 const CCValAssign &VA, const SDLoc &DL,
10114                                 const RISCVTargetLowering &TLI) {
10115   MachineFunction &MF = DAG.getMachineFunction();
10116   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10117   EVT LocVT = VA.getLocVT();
10118   SDValue Val;
10119   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
10120   Register VReg = RegInfo.createVirtualRegister(RC);
10121   RegInfo.addLiveIn(VA.getLocReg(), VReg);
10122   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
10123 
10124   if (VA.getLocInfo() == CCValAssign::Indirect)
10125     return Val;
10126 
10127   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
10128 }
10129 
10130 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
10131                                    const CCValAssign &VA, const SDLoc &DL,
10132                                    const RISCVSubtarget &Subtarget) {
10133   EVT LocVT = VA.getLocVT();
10134 
10135   switch (VA.getLocInfo()) {
10136   default:
10137     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10138   case CCValAssign::Full:
10139     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
10140       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
10141     break;
10142   case CCValAssign::BCvt:
10143     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10144       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10145     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10146       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10147     else
10148       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10149     break;
10150   }
10151   return Val;
10152 }
10153 
10154 // The caller is responsible for loading the full value if the argument is
10155 // passed with CCValAssign::Indirect.
10156 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10157                                 const CCValAssign &VA, const SDLoc &DL) {
10158   MachineFunction &MF = DAG.getMachineFunction();
10159   MachineFrameInfo &MFI = MF.getFrameInfo();
10160   EVT LocVT = VA.getLocVT();
10161   EVT ValVT = VA.getValVT();
10162   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10163   if (ValVT.isScalableVector()) {
10164     // When the value is a scalable vector, we save the pointer which points to
10165     // the scalable vector value in the stack. The ValVT will be the pointer
10166     // type, instead of the scalable vector type.
10167     ValVT = LocVT;
10168   }
10169   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10170                                  /*IsImmutable=*/true);
10171   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10172   SDValue Val;
10173 
10174   ISD::LoadExtType ExtType;
10175   switch (VA.getLocInfo()) {
10176   default:
10177     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10178   case CCValAssign::Full:
10179   case CCValAssign::Indirect:
10180   case CCValAssign::BCvt:
10181     ExtType = ISD::NON_EXTLOAD;
10182     break;
10183   }
10184   Val = DAG.getExtLoad(
10185       ExtType, DL, LocVT, Chain, FIN,
10186       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10187   return Val;
10188 }
10189 
10190 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10191                                        const CCValAssign &VA, const SDLoc &DL) {
10192   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10193          "Unexpected VA");
10194   MachineFunction &MF = DAG.getMachineFunction();
10195   MachineFrameInfo &MFI = MF.getFrameInfo();
10196   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10197 
10198   if (VA.isMemLoc()) {
10199     // f64 is passed on the stack.
10200     int FI =
10201         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10202     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10203     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10204                        MachinePointerInfo::getFixedStack(MF, FI));
10205   }
10206 
10207   assert(VA.isRegLoc() && "Expected register VA assignment");
10208 
10209   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10210   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10211   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10212   SDValue Hi;
10213   if (VA.getLocReg() == RISCV::X17) {
10214     // Second half of f64 is passed on the stack.
10215     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10216     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10217     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10218                      MachinePointerInfo::getFixedStack(MF, FI));
10219   } else {
10220     // Second half of f64 is passed in another GPR.
10221     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10222     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10223     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10224   }
10225   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10226 }
10227 
10228 // FastCC has less than 1% performance improvement for some particular
10229 // benchmark. But theoretically, it may has benenfit for some cases.
10230 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10231                             unsigned ValNo, MVT ValVT, MVT LocVT,
10232                             CCValAssign::LocInfo LocInfo,
10233                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10234                             bool IsFixed, bool IsRet, Type *OrigTy,
10235                             const RISCVTargetLowering &TLI,
10236                             Optional<unsigned> FirstMaskArgument) {
10237 
10238   // X5 and X6 might be used for save-restore libcall.
10239   static const MCPhysReg GPRList[] = {
10240       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10241       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10242       RISCV::X29, RISCV::X30, RISCV::X31};
10243 
10244   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10245     if (unsigned Reg = State.AllocateReg(GPRList)) {
10246       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10247       return false;
10248     }
10249   }
10250 
10251   if (LocVT == MVT::f16) {
10252     static const MCPhysReg FPR16List[] = {
10253         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10254         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10255         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10256         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10257     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10258       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10259       return false;
10260     }
10261   }
10262 
10263   if (LocVT == MVT::f32) {
10264     static const MCPhysReg FPR32List[] = {
10265         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10266         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10267         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10268         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10269     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10270       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10271       return false;
10272     }
10273   }
10274 
10275   if (LocVT == MVT::f64) {
10276     static const MCPhysReg FPR64List[] = {
10277         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10278         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10279         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10280         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10281     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10282       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10283       return false;
10284     }
10285   }
10286 
10287   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10288     unsigned Offset4 = State.AllocateStack(4, Align(4));
10289     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10290     return false;
10291   }
10292 
10293   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10294     unsigned Offset5 = State.AllocateStack(8, Align(8));
10295     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10296     return false;
10297   }
10298 
10299   if (LocVT.isVector()) {
10300     if (unsigned Reg =
10301             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10302       // Fixed-length vectors are located in the corresponding scalable-vector
10303       // container types.
10304       if (ValVT.isFixedLengthVector())
10305         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10306       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10307     } else {
10308       // Try and pass the address via a "fast" GPR.
10309       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10310         LocInfo = CCValAssign::Indirect;
10311         LocVT = TLI.getSubtarget().getXLenVT();
10312         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10313       } else if (ValVT.isFixedLengthVector()) {
10314         auto StackAlign =
10315             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10316         unsigned StackOffset =
10317             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10318         State.addLoc(
10319             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10320       } else {
10321         // Can't pass scalable vectors on the stack.
10322         return true;
10323       }
10324     }
10325 
10326     return false;
10327   }
10328 
10329   return true; // CC didn't match.
10330 }
10331 
10332 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10333                          CCValAssign::LocInfo LocInfo,
10334                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10335 
10336   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10337     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10338     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10339     static const MCPhysReg GPRList[] = {
10340         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10341         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10342     if (unsigned Reg = State.AllocateReg(GPRList)) {
10343       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10344       return false;
10345     }
10346   }
10347 
10348   if (LocVT == MVT::f32) {
10349     // Pass in STG registers: F1, ..., F6
10350     //                        fs0 ... fs5
10351     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10352                                           RISCV::F18_F, RISCV::F19_F,
10353                                           RISCV::F20_F, RISCV::F21_F};
10354     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10355       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10356       return false;
10357     }
10358   }
10359 
10360   if (LocVT == MVT::f64) {
10361     // Pass in STG registers: D1, ..., D6
10362     //                        fs6 ... fs11
10363     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10364                                           RISCV::F24_D, RISCV::F25_D,
10365                                           RISCV::F26_D, RISCV::F27_D};
10366     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10367       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10368       return false;
10369     }
10370   }
10371 
10372   report_fatal_error("No registers left in GHC calling convention");
10373   return true;
10374 }
10375 
10376 // Transform physical registers into virtual registers.
10377 SDValue RISCVTargetLowering::LowerFormalArguments(
10378     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10379     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10380     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10381 
10382   MachineFunction &MF = DAG.getMachineFunction();
10383 
10384   switch (CallConv) {
10385   default:
10386     report_fatal_error("Unsupported calling convention");
10387   case CallingConv::C:
10388   case CallingConv::Fast:
10389     break;
10390   case CallingConv::GHC:
10391     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10392         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10393       report_fatal_error(
10394         "GHC calling convention requires the F and D instruction set extensions");
10395   }
10396 
10397   const Function &Func = MF.getFunction();
10398   if (Func.hasFnAttribute("interrupt")) {
10399     if (!Func.arg_empty())
10400       report_fatal_error(
10401         "Functions with the interrupt attribute cannot have arguments!");
10402 
10403     StringRef Kind =
10404       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10405 
10406     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10407       report_fatal_error(
10408         "Function interrupt attribute argument not supported!");
10409   }
10410 
10411   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10412   MVT XLenVT = Subtarget.getXLenVT();
10413   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10414   // Used with vargs to acumulate store chains.
10415   std::vector<SDValue> OutChains;
10416 
10417   // Assign locations to all of the incoming arguments.
10418   SmallVector<CCValAssign, 16> ArgLocs;
10419   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10420 
10421   if (CallConv == CallingConv::GHC)
10422     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10423   else
10424     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10425                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10426                                                    : CC_RISCV);
10427 
10428   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10429     CCValAssign &VA = ArgLocs[i];
10430     SDValue ArgValue;
10431     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10432     // case.
10433     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10434       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10435     else if (VA.isRegLoc())
10436       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10437     else
10438       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10439 
10440     if (VA.getLocInfo() == CCValAssign::Indirect) {
10441       // If the original argument was split and passed by reference (e.g. i128
10442       // on RV32), we need to load all parts of it here (using the same
10443       // address). Vectors may be partly split to registers and partly to the
10444       // stack, in which case the base address is partly offset and subsequent
10445       // stores are relative to that.
10446       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10447                                    MachinePointerInfo()));
10448       unsigned ArgIndex = Ins[i].OrigArgIndex;
10449       unsigned ArgPartOffset = Ins[i].PartOffset;
10450       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10451       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10452         CCValAssign &PartVA = ArgLocs[i + 1];
10453         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10454         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10455         if (PartVA.getValVT().isScalableVector())
10456           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10457         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10458         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10459                                      MachinePointerInfo()));
10460         ++i;
10461       }
10462       continue;
10463     }
10464     InVals.push_back(ArgValue);
10465   }
10466 
10467   if (IsVarArg) {
10468     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10469     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10470     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10471     MachineFrameInfo &MFI = MF.getFrameInfo();
10472     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10473     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10474 
10475     // Offset of the first variable argument from stack pointer, and size of
10476     // the vararg save area. For now, the varargs save area is either zero or
10477     // large enough to hold a0-a7.
10478     int VaArgOffset, VarArgsSaveSize;
10479 
10480     // If all registers are allocated, then all varargs must be passed on the
10481     // stack and we don't need to save any argregs.
10482     if (ArgRegs.size() == Idx) {
10483       VaArgOffset = CCInfo.getNextStackOffset();
10484       VarArgsSaveSize = 0;
10485     } else {
10486       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10487       VaArgOffset = -VarArgsSaveSize;
10488     }
10489 
10490     // Record the frame index of the first variable argument
10491     // which is a value necessary to VASTART.
10492     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10493     RVFI->setVarArgsFrameIndex(FI);
10494 
10495     // If saving an odd number of registers then create an extra stack slot to
10496     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10497     // offsets to even-numbered registered remain 2*XLEN-aligned.
10498     if (Idx % 2) {
10499       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10500       VarArgsSaveSize += XLenInBytes;
10501     }
10502 
10503     // Copy the integer registers that may have been used for passing varargs
10504     // to the vararg save area.
10505     for (unsigned I = Idx; I < ArgRegs.size();
10506          ++I, VaArgOffset += XLenInBytes) {
10507       const Register Reg = RegInfo.createVirtualRegister(RC);
10508       RegInfo.addLiveIn(ArgRegs[I], Reg);
10509       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10510       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10511       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10512       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10513                                    MachinePointerInfo::getFixedStack(MF, FI));
10514       cast<StoreSDNode>(Store.getNode())
10515           ->getMemOperand()
10516           ->setValue((Value *)nullptr);
10517       OutChains.push_back(Store);
10518     }
10519     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10520   }
10521 
10522   // All stores are grouped in one node to allow the matching between
10523   // the size of Ins and InVals. This only happens for vararg functions.
10524   if (!OutChains.empty()) {
10525     OutChains.push_back(Chain);
10526     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10527   }
10528 
10529   return Chain;
10530 }
10531 
10532 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10533 /// for tail call optimization.
10534 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10535 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10536     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10537     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10538 
10539   auto &Callee = CLI.Callee;
10540   auto CalleeCC = CLI.CallConv;
10541   auto &Outs = CLI.Outs;
10542   auto &Caller = MF.getFunction();
10543   auto CallerCC = Caller.getCallingConv();
10544 
10545   // Exception-handling functions need a special set of instructions to
10546   // indicate a return to the hardware. Tail-calling another function would
10547   // probably break this.
10548   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10549   // should be expanded as new function attributes are introduced.
10550   if (Caller.hasFnAttribute("interrupt"))
10551     return false;
10552 
10553   // Do not tail call opt if the stack is used to pass parameters.
10554   if (CCInfo.getNextStackOffset() != 0)
10555     return false;
10556 
10557   // Do not tail call opt if any parameters need to be passed indirectly.
10558   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10559   // passed indirectly. So the address of the value will be passed in a
10560   // register, or if not available, then the address is put on the stack. In
10561   // order to pass indirectly, space on the stack often needs to be allocated
10562   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10563   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10564   // are passed CCValAssign::Indirect.
10565   for (auto &VA : ArgLocs)
10566     if (VA.getLocInfo() == CCValAssign::Indirect)
10567       return false;
10568 
10569   // Do not tail call opt if either caller or callee uses struct return
10570   // semantics.
10571   auto IsCallerStructRet = Caller.hasStructRetAttr();
10572   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10573   if (IsCallerStructRet || IsCalleeStructRet)
10574     return false;
10575 
10576   // Externally-defined functions with weak linkage should not be
10577   // tail-called. The behaviour of branch instructions in this situation (as
10578   // used for tail calls) is implementation-defined, so we cannot rely on the
10579   // linker replacing the tail call with a return.
10580   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10581     const GlobalValue *GV = G->getGlobal();
10582     if (GV->hasExternalWeakLinkage())
10583       return false;
10584   }
10585 
10586   // The callee has to preserve all registers the caller needs to preserve.
10587   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10588   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10589   if (CalleeCC != CallerCC) {
10590     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10591     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10592       return false;
10593   }
10594 
10595   // Byval parameters hand the function a pointer directly into the stack area
10596   // we want to reuse during a tail call. Working around this *is* possible
10597   // but less efficient and uglier in LowerCall.
10598   for (auto &Arg : Outs)
10599     if (Arg.Flags.isByVal())
10600       return false;
10601 
10602   return true;
10603 }
10604 
10605 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10606   return DAG.getDataLayout().getPrefTypeAlign(
10607       VT.getTypeForEVT(*DAG.getContext()));
10608 }
10609 
10610 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10611 // and output parameter nodes.
10612 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10613                                        SmallVectorImpl<SDValue> &InVals) const {
10614   SelectionDAG &DAG = CLI.DAG;
10615   SDLoc &DL = CLI.DL;
10616   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10617   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10618   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10619   SDValue Chain = CLI.Chain;
10620   SDValue Callee = CLI.Callee;
10621   bool &IsTailCall = CLI.IsTailCall;
10622   CallingConv::ID CallConv = CLI.CallConv;
10623   bool IsVarArg = CLI.IsVarArg;
10624   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10625   MVT XLenVT = Subtarget.getXLenVT();
10626 
10627   MachineFunction &MF = DAG.getMachineFunction();
10628 
10629   // Analyze the operands of the call, assigning locations to each operand.
10630   SmallVector<CCValAssign, 16> ArgLocs;
10631   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10632 
10633   if (CallConv == CallingConv::GHC)
10634     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10635   else
10636     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10637                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10638                                                     : CC_RISCV);
10639 
10640   // Check if it's really possible to do a tail call.
10641   if (IsTailCall)
10642     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10643 
10644   if (IsTailCall)
10645     ++NumTailCalls;
10646   else if (CLI.CB && CLI.CB->isMustTailCall())
10647     report_fatal_error("failed to perform tail call elimination on a call "
10648                        "site marked musttail");
10649 
10650   // Get a count of how many bytes are to be pushed on the stack.
10651   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10652 
10653   // Create local copies for byval args
10654   SmallVector<SDValue, 8> ByValArgs;
10655   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10656     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10657     if (!Flags.isByVal())
10658       continue;
10659 
10660     SDValue Arg = OutVals[i];
10661     unsigned Size = Flags.getByValSize();
10662     Align Alignment = Flags.getNonZeroByValAlign();
10663 
10664     int FI =
10665         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10666     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10667     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10668 
10669     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10670                           /*IsVolatile=*/false,
10671                           /*AlwaysInline=*/false, IsTailCall,
10672                           MachinePointerInfo(), MachinePointerInfo());
10673     ByValArgs.push_back(FIPtr);
10674   }
10675 
10676   if (!IsTailCall)
10677     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10678 
10679   // Copy argument values to their designated locations.
10680   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10681   SmallVector<SDValue, 8> MemOpChains;
10682   SDValue StackPtr;
10683   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10684     CCValAssign &VA = ArgLocs[i];
10685     SDValue ArgValue = OutVals[i];
10686     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10687 
10688     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10689     bool IsF64OnRV32DSoftABI =
10690         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10691     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10692       SDValue SplitF64 = DAG.getNode(
10693           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10694       SDValue Lo = SplitF64.getValue(0);
10695       SDValue Hi = SplitF64.getValue(1);
10696 
10697       Register RegLo = VA.getLocReg();
10698       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10699 
10700       if (RegLo == RISCV::X17) {
10701         // Second half of f64 is passed on the stack.
10702         // Work out the address of the stack slot.
10703         if (!StackPtr.getNode())
10704           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10705         // Emit the store.
10706         MemOpChains.push_back(
10707             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10708       } else {
10709         // Second half of f64 is passed in another GPR.
10710         assert(RegLo < RISCV::X31 && "Invalid register pair");
10711         Register RegHigh = RegLo + 1;
10712         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10713       }
10714       continue;
10715     }
10716 
10717     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10718     // as any other MemLoc.
10719 
10720     // Promote the value if needed.
10721     // For now, only handle fully promoted and indirect arguments.
10722     if (VA.getLocInfo() == CCValAssign::Indirect) {
10723       // Store the argument in a stack slot and pass its address.
10724       Align StackAlign =
10725           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10726                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10727       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10728       // If the original argument was split (e.g. i128), we need
10729       // to store the required parts of it here (and pass just one address).
10730       // Vectors may be partly split to registers and partly to the stack, in
10731       // which case the base address is partly offset and subsequent stores are
10732       // relative to that.
10733       unsigned ArgIndex = Outs[i].OrigArgIndex;
10734       unsigned ArgPartOffset = Outs[i].PartOffset;
10735       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10736       // Calculate the total size to store. We don't have access to what we're
10737       // actually storing other than performing the loop and collecting the
10738       // info.
10739       SmallVector<std::pair<SDValue, SDValue>> Parts;
10740       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10741         SDValue PartValue = OutVals[i + 1];
10742         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10743         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10744         EVT PartVT = PartValue.getValueType();
10745         if (PartVT.isScalableVector())
10746           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10747         StoredSize += PartVT.getStoreSize();
10748         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10749         Parts.push_back(std::make_pair(PartValue, Offset));
10750         ++i;
10751       }
10752       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10753       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10754       MemOpChains.push_back(
10755           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10756                        MachinePointerInfo::getFixedStack(MF, FI)));
10757       for (const auto &Part : Parts) {
10758         SDValue PartValue = Part.first;
10759         SDValue PartOffset = Part.second;
10760         SDValue Address =
10761             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10762         MemOpChains.push_back(
10763             DAG.getStore(Chain, DL, PartValue, Address,
10764                          MachinePointerInfo::getFixedStack(MF, FI)));
10765       }
10766       ArgValue = SpillSlot;
10767     } else {
10768       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10769     }
10770 
10771     // Use local copy if it is a byval arg.
10772     if (Flags.isByVal())
10773       ArgValue = ByValArgs[j++];
10774 
10775     if (VA.isRegLoc()) {
10776       // Queue up the argument copies and emit them at the end.
10777       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10778     } else {
10779       assert(VA.isMemLoc() && "Argument not register or memory");
10780       assert(!IsTailCall && "Tail call not allowed if stack is used "
10781                             "for passing parameters");
10782 
10783       // Work out the address of the stack slot.
10784       if (!StackPtr.getNode())
10785         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10786       SDValue Address =
10787           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10788                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10789 
10790       // Emit the store.
10791       MemOpChains.push_back(
10792           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10793     }
10794   }
10795 
10796   // Join the stores, which are independent of one another.
10797   if (!MemOpChains.empty())
10798     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10799 
10800   SDValue Glue;
10801 
10802   // Build a sequence of copy-to-reg nodes, chained and glued together.
10803   for (auto &Reg : RegsToPass) {
10804     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10805     Glue = Chain.getValue(1);
10806   }
10807 
10808   // Validate that none of the argument registers have been marked as
10809   // reserved, if so report an error. Do the same for the return address if this
10810   // is not a tailcall.
10811   validateCCReservedRegs(RegsToPass, MF);
10812   if (!IsTailCall &&
10813       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10814     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10815         MF.getFunction(),
10816         "Return address register required, but has been reserved."});
10817 
10818   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10819   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10820   // split it and then direct call can be matched by PseudoCALL.
10821   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10822     const GlobalValue *GV = S->getGlobal();
10823 
10824     unsigned OpFlags = RISCVII::MO_CALL;
10825     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10826       OpFlags = RISCVII::MO_PLT;
10827 
10828     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10829   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10830     unsigned OpFlags = RISCVII::MO_CALL;
10831 
10832     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10833                                                  nullptr))
10834       OpFlags = RISCVII::MO_PLT;
10835 
10836     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10837   }
10838 
10839   // The first call operand is the chain and the second is the target address.
10840   SmallVector<SDValue, 8> Ops;
10841   Ops.push_back(Chain);
10842   Ops.push_back(Callee);
10843 
10844   // Add argument registers to the end of the list so that they are
10845   // known live into the call.
10846   for (auto &Reg : RegsToPass)
10847     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10848 
10849   if (!IsTailCall) {
10850     // Add a register mask operand representing the call-preserved registers.
10851     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10852     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10853     assert(Mask && "Missing call preserved mask for calling convention");
10854     Ops.push_back(DAG.getRegisterMask(Mask));
10855   }
10856 
10857   // Glue the call to the argument copies, if any.
10858   if (Glue.getNode())
10859     Ops.push_back(Glue);
10860 
10861   // Emit the call.
10862   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10863 
10864   if (IsTailCall) {
10865     MF.getFrameInfo().setHasTailCall();
10866     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10867   }
10868 
10869   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10870   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10871   Glue = Chain.getValue(1);
10872 
10873   // Mark the end of the call, which is glued to the call itself.
10874   Chain = DAG.getCALLSEQ_END(Chain,
10875                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10876                              DAG.getConstant(0, DL, PtrVT, true),
10877                              Glue, DL);
10878   Glue = Chain.getValue(1);
10879 
10880   // Assign locations to each value returned by this call.
10881   SmallVector<CCValAssign, 16> RVLocs;
10882   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10883   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10884 
10885   // Copy all of the result registers out of their specified physreg.
10886   for (auto &VA : RVLocs) {
10887     // Copy the value out
10888     SDValue RetValue =
10889         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10890     // Glue the RetValue to the end of the call sequence
10891     Chain = RetValue.getValue(1);
10892     Glue = RetValue.getValue(2);
10893 
10894     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10895       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10896       SDValue RetValue2 =
10897           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10898       Chain = RetValue2.getValue(1);
10899       Glue = RetValue2.getValue(2);
10900       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10901                              RetValue2);
10902     }
10903 
10904     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10905 
10906     InVals.push_back(RetValue);
10907   }
10908 
10909   return Chain;
10910 }
10911 
10912 bool RISCVTargetLowering::CanLowerReturn(
10913     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10914     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10915   SmallVector<CCValAssign, 16> RVLocs;
10916   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10917 
10918   Optional<unsigned> FirstMaskArgument;
10919   if (Subtarget.hasVInstructions())
10920     FirstMaskArgument = preAssignMask(Outs);
10921 
10922   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10923     MVT VT = Outs[i].VT;
10924     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10925     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10926     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10927                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10928                  *this, FirstMaskArgument))
10929       return false;
10930   }
10931   return true;
10932 }
10933 
10934 SDValue
10935 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10936                                  bool IsVarArg,
10937                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10938                                  const SmallVectorImpl<SDValue> &OutVals,
10939                                  const SDLoc &DL, SelectionDAG &DAG) const {
10940   const MachineFunction &MF = DAG.getMachineFunction();
10941   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10942 
10943   // Stores the assignment of the return value to a location.
10944   SmallVector<CCValAssign, 16> RVLocs;
10945 
10946   // Info about the registers and stack slot.
10947   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10948                  *DAG.getContext());
10949 
10950   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10951                     nullptr, CC_RISCV);
10952 
10953   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10954     report_fatal_error("GHC functions return void only");
10955 
10956   SDValue Glue;
10957   SmallVector<SDValue, 4> RetOps(1, Chain);
10958 
10959   // Copy the result values into the output registers.
10960   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10961     SDValue Val = OutVals[i];
10962     CCValAssign &VA = RVLocs[i];
10963     assert(VA.isRegLoc() && "Can only return in registers!");
10964 
10965     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10966       // Handle returning f64 on RV32D with a soft float ABI.
10967       assert(VA.isRegLoc() && "Expected return via registers");
10968       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10969                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10970       SDValue Lo = SplitF64.getValue(0);
10971       SDValue Hi = SplitF64.getValue(1);
10972       Register RegLo = VA.getLocReg();
10973       assert(RegLo < RISCV::X31 && "Invalid register pair");
10974       Register RegHi = RegLo + 1;
10975 
10976       if (STI.isRegisterReservedByUser(RegLo) ||
10977           STI.isRegisterReservedByUser(RegHi))
10978         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10979             MF.getFunction(),
10980             "Return value register required, but has been reserved."});
10981 
10982       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10983       Glue = Chain.getValue(1);
10984       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10985       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10986       Glue = Chain.getValue(1);
10987       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10988     } else {
10989       // Handle a 'normal' return.
10990       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10991       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10992 
10993       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10994         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10995             MF.getFunction(),
10996             "Return value register required, but has been reserved."});
10997 
10998       // Guarantee that all emitted copies are stuck together.
10999       Glue = Chain.getValue(1);
11000       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
11001     }
11002   }
11003 
11004   RetOps[0] = Chain; // Update chain.
11005 
11006   // Add the glue node if we have it.
11007   if (Glue.getNode()) {
11008     RetOps.push_back(Glue);
11009   }
11010 
11011   unsigned RetOpc = RISCVISD::RET_FLAG;
11012   // Interrupt service routines use different return instructions.
11013   const Function &Func = DAG.getMachineFunction().getFunction();
11014   if (Func.hasFnAttribute("interrupt")) {
11015     if (!Func.getReturnType()->isVoidTy())
11016       report_fatal_error(
11017           "Functions with the interrupt attribute must have void return type!");
11018 
11019     MachineFunction &MF = DAG.getMachineFunction();
11020     StringRef Kind =
11021       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
11022 
11023     if (Kind == "user")
11024       RetOpc = RISCVISD::URET_FLAG;
11025     else if (Kind == "supervisor")
11026       RetOpc = RISCVISD::SRET_FLAG;
11027     else
11028       RetOpc = RISCVISD::MRET_FLAG;
11029   }
11030 
11031   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
11032 }
11033 
11034 void RISCVTargetLowering::validateCCReservedRegs(
11035     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
11036     MachineFunction &MF) const {
11037   const Function &F = MF.getFunction();
11038   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11039 
11040   if (llvm::any_of(Regs, [&STI](auto Reg) {
11041         return STI.isRegisterReservedByUser(Reg.first);
11042       }))
11043     F.getContext().diagnose(DiagnosticInfoUnsupported{
11044         F, "Argument register required, but has been reserved."});
11045 }
11046 
11047 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
11048   return CI->isTailCall();
11049 }
11050 
11051 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
11052 #define NODE_NAME_CASE(NODE)                                                   \
11053   case RISCVISD::NODE:                                                         \
11054     return "RISCVISD::" #NODE;
11055   // clang-format off
11056   switch ((RISCVISD::NodeType)Opcode) {
11057   case RISCVISD::FIRST_NUMBER:
11058     break;
11059   NODE_NAME_CASE(RET_FLAG)
11060   NODE_NAME_CASE(URET_FLAG)
11061   NODE_NAME_CASE(SRET_FLAG)
11062   NODE_NAME_CASE(MRET_FLAG)
11063   NODE_NAME_CASE(CALL)
11064   NODE_NAME_CASE(SELECT_CC)
11065   NODE_NAME_CASE(BR_CC)
11066   NODE_NAME_CASE(BuildPairF64)
11067   NODE_NAME_CASE(SplitF64)
11068   NODE_NAME_CASE(TAIL)
11069   NODE_NAME_CASE(MULHSU)
11070   NODE_NAME_CASE(SLLW)
11071   NODE_NAME_CASE(SRAW)
11072   NODE_NAME_CASE(SRLW)
11073   NODE_NAME_CASE(DIVW)
11074   NODE_NAME_CASE(DIVUW)
11075   NODE_NAME_CASE(REMUW)
11076   NODE_NAME_CASE(ROLW)
11077   NODE_NAME_CASE(RORW)
11078   NODE_NAME_CASE(CLZW)
11079   NODE_NAME_CASE(CTZW)
11080   NODE_NAME_CASE(FSLW)
11081   NODE_NAME_CASE(FSRW)
11082   NODE_NAME_CASE(FSL)
11083   NODE_NAME_CASE(FSR)
11084   NODE_NAME_CASE(FMV_H_X)
11085   NODE_NAME_CASE(FMV_X_ANYEXTH)
11086   NODE_NAME_CASE(FMV_X_SIGNEXTH)
11087   NODE_NAME_CASE(FMV_W_X_RV64)
11088   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
11089   NODE_NAME_CASE(FCVT_X)
11090   NODE_NAME_CASE(FCVT_XU)
11091   NODE_NAME_CASE(FCVT_W_RV64)
11092   NODE_NAME_CASE(FCVT_WU_RV64)
11093   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
11094   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
11095   NODE_NAME_CASE(READ_CYCLE_WIDE)
11096   NODE_NAME_CASE(GREV)
11097   NODE_NAME_CASE(GREVW)
11098   NODE_NAME_CASE(GORC)
11099   NODE_NAME_CASE(GORCW)
11100   NODE_NAME_CASE(SHFL)
11101   NODE_NAME_CASE(SHFLW)
11102   NODE_NAME_CASE(UNSHFL)
11103   NODE_NAME_CASE(UNSHFLW)
11104   NODE_NAME_CASE(BFP)
11105   NODE_NAME_CASE(BFPW)
11106   NODE_NAME_CASE(BCOMPRESS)
11107   NODE_NAME_CASE(BCOMPRESSW)
11108   NODE_NAME_CASE(BDECOMPRESS)
11109   NODE_NAME_CASE(BDECOMPRESSW)
11110   NODE_NAME_CASE(VMV_V_X_VL)
11111   NODE_NAME_CASE(VFMV_V_F_VL)
11112   NODE_NAME_CASE(VMV_X_S)
11113   NODE_NAME_CASE(VMV_S_X_VL)
11114   NODE_NAME_CASE(VFMV_S_F_VL)
11115   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
11116   NODE_NAME_CASE(READ_VLENB)
11117   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
11118   NODE_NAME_CASE(VSLIDEUP_VL)
11119   NODE_NAME_CASE(VSLIDE1UP_VL)
11120   NODE_NAME_CASE(VSLIDEDOWN_VL)
11121   NODE_NAME_CASE(VSLIDE1DOWN_VL)
11122   NODE_NAME_CASE(VID_VL)
11123   NODE_NAME_CASE(VFNCVT_ROD_VL)
11124   NODE_NAME_CASE(VECREDUCE_ADD_VL)
11125   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
11126   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
11127   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
11128   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
11129   NODE_NAME_CASE(VECREDUCE_AND_VL)
11130   NODE_NAME_CASE(VECREDUCE_OR_VL)
11131   NODE_NAME_CASE(VECREDUCE_XOR_VL)
11132   NODE_NAME_CASE(VECREDUCE_FADD_VL)
11133   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
11134   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
11135   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
11136   NODE_NAME_CASE(ADD_VL)
11137   NODE_NAME_CASE(AND_VL)
11138   NODE_NAME_CASE(MUL_VL)
11139   NODE_NAME_CASE(OR_VL)
11140   NODE_NAME_CASE(SDIV_VL)
11141   NODE_NAME_CASE(SHL_VL)
11142   NODE_NAME_CASE(SREM_VL)
11143   NODE_NAME_CASE(SRA_VL)
11144   NODE_NAME_CASE(SRL_VL)
11145   NODE_NAME_CASE(SUB_VL)
11146   NODE_NAME_CASE(UDIV_VL)
11147   NODE_NAME_CASE(UREM_VL)
11148   NODE_NAME_CASE(XOR_VL)
11149   NODE_NAME_CASE(SADDSAT_VL)
11150   NODE_NAME_CASE(UADDSAT_VL)
11151   NODE_NAME_CASE(SSUBSAT_VL)
11152   NODE_NAME_CASE(USUBSAT_VL)
11153   NODE_NAME_CASE(FADD_VL)
11154   NODE_NAME_CASE(FSUB_VL)
11155   NODE_NAME_CASE(FMUL_VL)
11156   NODE_NAME_CASE(FDIV_VL)
11157   NODE_NAME_CASE(FNEG_VL)
11158   NODE_NAME_CASE(FABS_VL)
11159   NODE_NAME_CASE(FSQRT_VL)
11160   NODE_NAME_CASE(FMA_VL)
11161   NODE_NAME_CASE(FCOPYSIGN_VL)
11162   NODE_NAME_CASE(SMIN_VL)
11163   NODE_NAME_CASE(SMAX_VL)
11164   NODE_NAME_CASE(UMIN_VL)
11165   NODE_NAME_CASE(UMAX_VL)
11166   NODE_NAME_CASE(FMINNUM_VL)
11167   NODE_NAME_CASE(FMAXNUM_VL)
11168   NODE_NAME_CASE(MULHS_VL)
11169   NODE_NAME_CASE(MULHU_VL)
11170   NODE_NAME_CASE(FP_TO_SINT_VL)
11171   NODE_NAME_CASE(FP_TO_UINT_VL)
11172   NODE_NAME_CASE(SINT_TO_FP_VL)
11173   NODE_NAME_CASE(UINT_TO_FP_VL)
11174   NODE_NAME_CASE(FP_EXTEND_VL)
11175   NODE_NAME_CASE(FP_ROUND_VL)
11176   NODE_NAME_CASE(VWMUL_VL)
11177   NODE_NAME_CASE(VWMULU_VL)
11178   NODE_NAME_CASE(VWMULSU_VL)
11179   NODE_NAME_CASE(VWADD_VL)
11180   NODE_NAME_CASE(VWADDU_VL)
11181   NODE_NAME_CASE(VWSUB_VL)
11182   NODE_NAME_CASE(VWSUBU_VL)
11183   NODE_NAME_CASE(VWADD_W_VL)
11184   NODE_NAME_CASE(VWADDU_W_VL)
11185   NODE_NAME_CASE(VWSUB_W_VL)
11186   NODE_NAME_CASE(VWSUBU_W_VL)
11187   NODE_NAME_CASE(SETCC_VL)
11188   NODE_NAME_CASE(VSELECT_VL)
11189   NODE_NAME_CASE(VP_MERGE_VL)
11190   NODE_NAME_CASE(VMAND_VL)
11191   NODE_NAME_CASE(VMOR_VL)
11192   NODE_NAME_CASE(VMXOR_VL)
11193   NODE_NAME_CASE(VMCLR_VL)
11194   NODE_NAME_CASE(VMSET_VL)
11195   NODE_NAME_CASE(VRGATHER_VX_VL)
11196   NODE_NAME_CASE(VRGATHER_VV_VL)
11197   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11198   NODE_NAME_CASE(VSEXT_VL)
11199   NODE_NAME_CASE(VZEXT_VL)
11200   NODE_NAME_CASE(VCPOP_VL)
11201   NODE_NAME_CASE(READ_CSR)
11202   NODE_NAME_CASE(WRITE_CSR)
11203   NODE_NAME_CASE(SWAP_CSR)
11204   }
11205   // clang-format on
11206   return nullptr;
11207 #undef NODE_NAME_CASE
11208 }
11209 
11210 /// getConstraintType - Given a constraint letter, return the type of
11211 /// constraint it is for this target.
11212 RISCVTargetLowering::ConstraintType
11213 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11214   if (Constraint.size() == 1) {
11215     switch (Constraint[0]) {
11216     default:
11217       break;
11218     case 'f':
11219       return C_RegisterClass;
11220     case 'I':
11221     case 'J':
11222     case 'K':
11223       return C_Immediate;
11224     case 'A':
11225       return C_Memory;
11226     case 'S': // A symbolic address
11227       return C_Other;
11228     }
11229   } else {
11230     if (Constraint == "vr" || Constraint == "vm")
11231       return C_RegisterClass;
11232   }
11233   return TargetLowering::getConstraintType(Constraint);
11234 }
11235 
11236 std::pair<unsigned, const TargetRegisterClass *>
11237 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11238                                                   StringRef Constraint,
11239                                                   MVT VT) const {
11240   // First, see if this is a constraint that directly corresponds to a
11241   // RISCV register class.
11242   if (Constraint.size() == 1) {
11243     switch (Constraint[0]) {
11244     case 'r':
11245       // TODO: Support fixed vectors up to XLen for P extension?
11246       if (VT.isVector())
11247         break;
11248       return std::make_pair(0U, &RISCV::GPRRegClass);
11249     case 'f':
11250       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11251         return std::make_pair(0U, &RISCV::FPR16RegClass);
11252       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11253         return std::make_pair(0U, &RISCV::FPR32RegClass);
11254       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11255         return std::make_pair(0U, &RISCV::FPR64RegClass);
11256       break;
11257     default:
11258       break;
11259     }
11260   } else if (Constraint == "vr") {
11261     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11262                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11263       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11264         return std::make_pair(0U, RC);
11265     }
11266   } else if (Constraint == "vm") {
11267     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11268       return std::make_pair(0U, &RISCV::VMV0RegClass);
11269   }
11270 
11271   // Clang will correctly decode the usage of register name aliases into their
11272   // official names. However, other frontends like `rustc` do not. This allows
11273   // users of these frontends to use the ABI names for registers in LLVM-style
11274   // register constraints.
11275   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11276                                .Case("{zero}", RISCV::X0)
11277                                .Case("{ra}", RISCV::X1)
11278                                .Case("{sp}", RISCV::X2)
11279                                .Case("{gp}", RISCV::X3)
11280                                .Case("{tp}", RISCV::X4)
11281                                .Case("{t0}", RISCV::X5)
11282                                .Case("{t1}", RISCV::X6)
11283                                .Case("{t2}", RISCV::X7)
11284                                .Cases("{s0}", "{fp}", RISCV::X8)
11285                                .Case("{s1}", RISCV::X9)
11286                                .Case("{a0}", RISCV::X10)
11287                                .Case("{a1}", RISCV::X11)
11288                                .Case("{a2}", RISCV::X12)
11289                                .Case("{a3}", RISCV::X13)
11290                                .Case("{a4}", RISCV::X14)
11291                                .Case("{a5}", RISCV::X15)
11292                                .Case("{a6}", RISCV::X16)
11293                                .Case("{a7}", RISCV::X17)
11294                                .Case("{s2}", RISCV::X18)
11295                                .Case("{s3}", RISCV::X19)
11296                                .Case("{s4}", RISCV::X20)
11297                                .Case("{s5}", RISCV::X21)
11298                                .Case("{s6}", RISCV::X22)
11299                                .Case("{s7}", RISCV::X23)
11300                                .Case("{s8}", RISCV::X24)
11301                                .Case("{s9}", RISCV::X25)
11302                                .Case("{s10}", RISCV::X26)
11303                                .Case("{s11}", RISCV::X27)
11304                                .Case("{t3}", RISCV::X28)
11305                                .Case("{t4}", RISCV::X29)
11306                                .Case("{t5}", RISCV::X30)
11307                                .Case("{t6}", RISCV::X31)
11308                                .Default(RISCV::NoRegister);
11309   if (XRegFromAlias != RISCV::NoRegister)
11310     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11311 
11312   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11313   // TableGen record rather than the AsmName to choose registers for InlineAsm
11314   // constraints, plus we want to match those names to the widest floating point
11315   // register type available, manually select floating point registers here.
11316   //
11317   // The second case is the ABI name of the register, so that frontends can also
11318   // use the ABI names in register constraint lists.
11319   if (Subtarget.hasStdExtF()) {
11320     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11321                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11322                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11323                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11324                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11325                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11326                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11327                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11328                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11329                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11330                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11331                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11332                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11333                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11334                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11335                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11336                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11337                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11338                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11339                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11340                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11341                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11342                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11343                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11344                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11345                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11346                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11347                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11348                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11349                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11350                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11351                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11352                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11353                         .Default(RISCV::NoRegister);
11354     if (FReg != RISCV::NoRegister) {
11355       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11356       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11357         unsigned RegNo = FReg - RISCV::F0_F;
11358         unsigned DReg = RISCV::F0_D + RegNo;
11359         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11360       }
11361       if (VT == MVT::f32 || VT == MVT::Other)
11362         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11363       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11364         unsigned RegNo = FReg - RISCV::F0_F;
11365         unsigned HReg = RISCV::F0_H + RegNo;
11366         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11367       }
11368     }
11369   }
11370 
11371   if (Subtarget.hasVInstructions()) {
11372     Register VReg = StringSwitch<Register>(Constraint.lower())
11373                         .Case("{v0}", RISCV::V0)
11374                         .Case("{v1}", RISCV::V1)
11375                         .Case("{v2}", RISCV::V2)
11376                         .Case("{v3}", RISCV::V3)
11377                         .Case("{v4}", RISCV::V4)
11378                         .Case("{v5}", RISCV::V5)
11379                         .Case("{v6}", RISCV::V6)
11380                         .Case("{v7}", RISCV::V7)
11381                         .Case("{v8}", RISCV::V8)
11382                         .Case("{v9}", RISCV::V9)
11383                         .Case("{v10}", RISCV::V10)
11384                         .Case("{v11}", RISCV::V11)
11385                         .Case("{v12}", RISCV::V12)
11386                         .Case("{v13}", RISCV::V13)
11387                         .Case("{v14}", RISCV::V14)
11388                         .Case("{v15}", RISCV::V15)
11389                         .Case("{v16}", RISCV::V16)
11390                         .Case("{v17}", RISCV::V17)
11391                         .Case("{v18}", RISCV::V18)
11392                         .Case("{v19}", RISCV::V19)
11393                         .Case("{v20}", RISCV::V20)
11394                         .Case("{v21}", RISCV::V21)
11395                         .Case("{v22}", RISCV::V22)
11396                         .Case("{v23}", RISCV::V23)
11397                         .Case("{v24}", RISCV::V24)
11398                         .Case("{v25}", RISCV::V25)
11399                         .Case("{v26}", RISCV::V26)
11400                         .Case("{v27}", RISCV::V27)
11401                         .Case("{v28}", RISCV::V28)
11402                         .Case("{v29}", RISCV::V29)
11403                         .Case("{v30}", RISCV::V30)
11404                         .Case("{v31}", RISCV::V31)
11405                         .Default(RISCV::NoRegister);
11406     if (VReg != RISCV::NoRegister) {
11407       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11408         return std::make_pair(VReg, &RISCV::VMRegClass);
11409       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11410         return std::make_pair(VReg, &RISCV::VRRegClass);
11411       for (const auto *RC :
11412            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11413         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11414           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11415           return std::make_pair(VReg, RC);
11416         }
11417       }
11418     }
11419   }
11420 
11421   std::pair<Register, const TargetRegisterClass *> Res =
11422       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11423 
11424   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11425   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11426   // Subtarget into account.
11427   if (Res.second == &RISCV::GPRF16RegClass ||
11428       Res.second == &RISCV::GPRF32RegClass ||
11429       Res.second == &RISCV::GPRF64RegClass)
11430     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11431 
11432   return Res;
11433 }
11434 
11435 unsigned
11436 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11437   // Currently only support length 1 constraints.
11438   if (ConstraintCode.size() == 1) {
11439     switch (ConstraintCode[0]) {
11440     case 'A':
11441       return InlineAsm::Constraint_A;
11442     default:
11443       break;
11444     }
11445   }
11446 
11447   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11448 }
11449 
11450 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11451     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11452     SelectionDAG &DAG) const {
11453   // Currently only support length 1 constraints.
11454   if (Constraint.length() == 1) {
11455     switch (Constraint[0]) {
11456     case 'I':
11457       // Validate & create a 12-bit signed immediate operand.
11458       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11459         uint64_t CVal = C->getSExtValue();
11460         if (isInt<12>(CVal))
11461           Ops.push_back(
11462               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11463       }
11464       return;
11465     case 'J':
11466       // Validate & create an integer zero operand.
11467       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11468         if (C->getZExtValue() == 0)
11469           Ops.push_back(
11470               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11471       return;
11472     case 'K':
11473       // Validate & create a 5-bit unsigned immediate operand.
11474       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11475         uint64_t CVal = C->getZExtValue();
11476         if (isUInt<5>(CVal))
11477           Ops.push_back(
11478               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11479       }
11480       return;
11481     case 'S':
11482       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11483         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11484                                                  GA->getValueType(0)));
11485       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11486         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11487                                                 BA->getValueType(0)));
11488       }
11489       return;
11490     default:
11491       break;
11492     }
11493   }
11494   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11495 }
11496 
11497 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11498                                                    Instruction *Inst,
11499                                                    AtomicOrdering Ord) const {
11500   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11501     return Builder.CreateFence(Ord);
11502   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11503     return Builder.CreateFence(AtomicOrdering::Release);
11504   return nullptr;
11505 }
11506 
11507 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11508                                                     Instruction *Inst,
11509                                                     AtomicOrdering Ord) const {
11510   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11511     return Builder.CreateFence(AtomicOrdering::Acquire);
11512   return nullptr;
11513 }
11514 
11515 TargetLowering::AtomicExpansionKind
11516 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11517   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11518   // point operations can't be used in an lr/sc sequence without breaking the
11519   // forward-progress guarantee.
11520   if (AI->isFloatingPointOperation())
11521     return AtomicExpansionKind::CmpXChg;
11522 
11523   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11524   if (Size == 8 || Size == 16)
11525     return AtomicExpansionKind::MaskedIntrinsic;
11526   return AtomicExpansionKind::None;
11527 }
11528 
11529 static Intrinsic::ID
11530 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11531   if (XLen == 32) {
11532     switch (BinOp) {
11533     default:
11534       llvm_unreachable("Unexpected AtomicRMW BinOp");
11535     case AtomicRMWInst::Xchg:
11536       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11537     case AtomicRMWInst::Add:
11538       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11539     case AtomicRMWInst::Sub:
11540       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11541     case AtomicRMWInst::Nand:
11542       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11543     case AtomicRMWInst::Max:
11544       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11545     case AtomicRMWInst::Min:
11546       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11547     case AtomicRMWInst::UMax:
11548       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11549     case AtomicRMWInst::UMin:
11550       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11551     }
11552   }
11553 
11554   if (XLen == 64) {
11555     switch (BinOp) {
11556     default:
11557       llvm_unreachable("Unexpected AtomicRMW BinOp");
11558     case AtomicRMWInst::Xchg:
11559       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11560     case AtomicRMWInst::Add:
11561       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11562     case AtomicRMWInst::Sub:
11563       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11564     case AtomicRMWInst::Nand:
11565       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11566     case AtomicRMWInst::Max:
11567       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11568     case AtomicRMWInst::Min:
11569       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11570     case AtomicRMWInst::UMax:
11571       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11572     case AtomicRMWInst::UMin:
11573       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11574     }
11575   }
11576 
11577   llvm_unreachable("Unexpected XLen\n");
11578 }
11579 
11580 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11581     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11582     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11583   unsigned XLen = Subtarget.getXLen();
11584   Value *Ordering =
11585       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11586   Type *Tys[] = {AlignedAddr->getType()};
11587   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11588       AI->getModule(),
11589       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11590 
11591   if (XLen == 64) {
11592     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11593     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11594     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11595   }
11596 
11597   Value *Result;
11598 
11599   // Must pass the shift amount needed to sign extend the loaded value prior
11600   // to performing a signed comparison for min/max. ShiftAmt is the number of
11601   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11602   // is the number of bits to left+right shift the value in order to
11603   // sign-extend.
11604   if (AI->getOperation() == AtomicRMWInst::Min ||
11605       AI->getOperation() == AtomicRMWInst::Max) {
11606     const DataLayout &DL = AI->getModule()->getDataLayout();
11607     unsigned ValWidth =
11608         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11609     Value *SextShamt =
11610         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11611     Result = Builder.CreateCall(LrwOpScwLoop,
11612                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11613   } else {
11614     Result =
11615         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11616   }
11617 
11618   if (XLen == 64)
11619     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11620   return Result;
11621 }
11622 
11623 TargetLowering::AtomicExpansionKind
11624 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11625     AtomicCmpXchgInst *CI) const {
11626   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11627   if (Size == 8 || Size == 16)
11628     return AtomicExpansionKind::MaskedIntrinsic;
11629   return AtomicExpansionKind::None;
11630 }
11631 
11632 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11633     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11634     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11635   unsigned XLen = Subtarget.getXLen();
11636   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11637   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11638   if (XLen == 64) {
11639     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11640     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11641     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11642     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11643   }
11644   Type *Tys[] = {AlignedAddr->getType()};
11645   Function *MaskedCmpXchg =
11646       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11647   Value *Result = Builder.CreateCall(
11648       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11649   if (XLen == 64)
11650     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11651   return Result;
11652 }
11653 
11654 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11655   return false;
11656 }
11657 
11658 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11659                                                EVT VT) const {
11660   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11661     return false;
11662 
11663   switch (FPVT.getSimpleVT().SimpleTy) {
11664   case MVT::f16:
11665     return Subtarget.hasStdExtZfh();
11666   case MVT::f32:
11667     return Subtarget.hasStdExtF();
11668   case MVT::f64:
11669     return Subtarget.hasStdExtD();
11670   default:
11671     return false;
11672   }
11673 }
11674 
11675 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11676   // If we are using the small code model, we can reduce size of jump table
11677   // entry to 4 bytes.
11678   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11679       getTargetMachine().getCodeModel() == CodeModel::Small) {
11680     return MachineJumpTableInfo::EK_Custom32;
11681   }
11682   return TargetLowering::getJumpTableEncoding();
11683 }
11684 
11685 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11686     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11687     unsigned uid, MCContext &Ctx) const {
11688   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11689          getTargetMachine().getCodeModel() == CodeModel::Small);
11690   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11691 }
11692 
11693 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11694                                                      EVT VT) const {
11695   VT = VT.getScalarType();
11696 
11697   if (!VT.isSimple())
11698     return false;
11699 
11700   switch (VT.getSimpleVT().SimpleTy) {
11701   case MVT::f16:
11702     return Subtarget.hasStdExtZfh();
11703   case MVT::f32:
11704     return Subtarget.hasStdExtF();
11705   case MVT::f64:
11706     return Subtarget.hasStdExtD();
11707   default:
11708     break;
11709   }
11710 
11711   return false;
11712 }
11713 
11714 Register RISCVTargetLowering::getExceptionPointerRegister(
11715     const Constant *PersonalityFn) const {
11716   return RISCV::X10;
11717 }
11718 
11719 Register RISCVTargetLowering::getExceptionSelectorRegister(
11720     const Constant *PersonalityFn) const {
11721   return RISCV::X11;
11722 }
11723 
11724 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11725   // Return false to suppress the unnecessary extensions if the LibCall
11726   // arguments or return value is f32 type for LP64 ABI.
11727   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11728   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11729     return false;
11730 
11731   return true;
11732 }
11733 
11734 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11735   if (Subtarget.is64Bit() && Type == MVT::i32)
11736     return true;
11737 
11738   return IsSigned;
11739 }
11740 
11741 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11742                                                  SDValue C) const {
11743   // Check integral scalar types.
11744   if (VT.isScalarInteger()) {
11745     // Omit the optimization if the sub target has the M extension and the data
11746     // size exceeds XLen.
11747     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11748       return false;
11749     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11750       // Break the MUL to a SLLI and an ADD/SUB.
11751       const APInt &Imm = ConstNode->getAPIntValue();
11752       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11753           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11754         return true;
11755       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11756       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11757           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11758            (Imm - 8).isPowerOf2()))
11759         return true;
11760       // Omit the following optimization if the sub target has the M extension
11761       // and the data size >= XLen.
11762       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11763         return false;
11764       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11765       // a pair of LUI/ADDI.
11766       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11767         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11768         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11769             (1 - ImmS).isPowerOf2())
11770         return true;
11771       }
11772     }
11773   }
11774 
11775   return false;
11776 }
11777 
11778 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11779                                                       SDValue ConstNode) const {
11780   // Let the DAGCombiner decide for vectors.
11781   EVT VT = AddNode.getValueType();
11782   if (VT.isVector())
11783     return true;
11784 
11785   // Let the DAGCombiner decide for larger types.
11786   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11787     return true;
11788 
11789   // It is worse if c1 is simm12 while c1*c2 is not.
11790   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11791   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11792   const APInt &C1 = C1Node->getAPIntValue();
11793   const APInt &C2 = C2Node->getAPIntValue();
11794   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11795     return false;
11796 
11797   // Default to true and let the DAGCombiner decide.
11798   return true;
11799 }
11800 
11801 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11802     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11803     bool *Fast) const {
11804   if (!VT.isVector())
11805     return false;
11806 
11807   EVT ElemVT = VT.getVectorElementType();
11808   if (Alignment >= ElemVT.getStoreSize()) {
11809     if (Fast)
11810       *Fast = true;
11811     return true;
11812   }
11813 
11814   return false;
11815 }
11816 
11817 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11818     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11819     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11820   bool IsABIRegCopy = CC.hasValue();
11821   EVT ValueVT = Val.getValueType();
11822   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11823     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11824     // and cast to f32.
11825     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11826     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11827     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11828                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11829     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11830     Parts[0] = Val;
11831     return true;
11832   }
11833 
11834   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11835     LLVMContext &Context = *DAG.getContext();
11836     EVT ValueEltVT = ValueVT.getVectorElementType();
11837     EVT PartEltVT = PartVT.getVectorElementType();
11838     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11839     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11840     if (PartVTBitSize % ValueVTBitSize == 0) {
11841       assert(PartVTBitSize >= ValueVTBitSize);
11842       // If the element types are different, bitcast to the same element type of
11843       // PartVT first.
11844       // Give an example here, we want copy a <vscale x 1 x i8> value to
11845       // <vscale x 4 x i16>.
11846       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11847       // subvector, then we can bitcast to <vscale x 4 x i16>.
11848       if (ValueEltVT != PartEltVT) {
11849         if (PartVTBitSize > ValueVTBitSize) {
11850           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11851           assert(Count != 0 && "The number of element should not be zero.");
11852           EVT SameEltTypeVT =
11853               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11854           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11855                             DAG.getUNDEF(SameEltTypeVT), Val,
11856                             DAG.getVectorIdxConstant(0, DL));
11857         }
11858         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11859       } else {
11860         Val =
11861             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11862                         Val, DAG.getVectorIdxConstant(0, DL));
11863       }
11864       Parts[0] = Val;
11865       return true;
11866     }
11867   }
11868   return false;
11869 }
11870 
11871 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11872     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11873     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11874   bool IsABIRegCopy = CC.hasValue();
11875   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11876     SDValue Val = Parts[0];
11877 
11878     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11879     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11880     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11881     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11882     return Val;
11883   }
11884 
11885   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11886     LLVMContext &Context = *DAG.getContext();
11887     SDValue Val = Parts[0];
11888     EVT ValueEltVT = ValueVT.getVectorElementType();
11889     EVT PartEltVT = PartVT.getVectorElementType();
11890     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11891     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11892     if (PartVTBitSize % ValueVTBitSize == 0) {
11893       assert(PartVTBitSize >= ValueVTBitSize);
11894       EVT SameEltTypeVT = ValueVT;
11895       // If the element types are different, convert it to the same element type
11896       // of PartVT.
11897       // Give an example here, we want copy a <vscale x 1 x i8> value from
11898       // <vscale x 4 x i16>.
11899       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11900       // then we can extract <vscale x 1 x i8>.
11901       if (ValueEltVT != PartEltVT) {
11902         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11903         assert(Count != 0 && "The number of element should not be zero.");
11904         SameEltTypeVT =
11905             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11906         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11907       }
11908       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11909                         DAG.getVectorIdxConstant(0, DL));
11910       return Val;
11911     }
11912   }
11913   return SDValue();
11914 }
11915 
11916 SDValue
11917 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11918                                    SelectionDAG &DAG,
11919                                    SmallVectorImpl<SDNode *> &Created) const {
11920   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11921   if (isIntDivCheap(N->getValueType(0), Attr))
11922     return SDValue(N, 0); // Lower SDIV as SDIV
11923 
11924   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11925          "Unexpected divisor!");
11926 
11927   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11928   if (!Subtarget.hasStdExtZbt())
11929     return SDValue();
11930 
11931   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11932   // Besides, more critical path instructions will be generated when dividing
11933   // by 2. So we keep using the original DAGs for these cases.
11934   unsigned Lg2 = Divisor.countTrailingZeros();
11935   if (Lg2 == 1 || Lg2 >= 12)
11936     return SDValue();
11937 
11938   // fold (sdiv X, pow2)
11939   EVT VT = N->getValueType(0);
11940   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11941     return SDValue();
11942 
11943   SDLoc DL(N);
11944   SDValue N0 = N->getOperand(0);
11945   SDValue Zero = DAG.getConstant(0, DL, VT);
11946   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11947 
11948   // Add (N0 < 0) ? Pow2 - 1 : 0;
11949   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11950   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11951   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11952 
11953   Created.push_back(Cmp.getNode());
11954   Created.push_back(Add.getNode());
11955   Created.push_back(Sel.getNode());
11956 
11957   // Divide by pow2.
11958   SDValue SRA =
11959       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11960 
11961   // If we're dividing by a positive value, we're done.  Otherwise, we must
11962   // negate the result.
11963   if (Divisor.isNonNegative())
11964     return SRA;
11965 
11966   Created.push_back(SRA.getNode());
11967   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11968 }
11969 
11970 #define GET_REGISTER_MATCHER
11971 #include "RISCVGenAsmMatcher.inc"
11972 
11973 Register
11974 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11975                                        const MachineFunction &MF) const {
11976   Register Reg = MatchRegisterAltName(RegName);
11977   if (Reg == RISCV::NoRegister)
11978     Reg = MatchRegisterName(RegName);
11979   if (Reg == RISCV::NoRegister)
11980     report_fatal_error(
11981         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11982   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11983   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11984     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11985                              StringRef(RegName) + "\"."));
11986   return Reg;
11987 }
11988 
11989 namespace llvm {
11990 namespace RISCVVIntrinsicsTable {
11991 
11992 #define GET_RISCVVIntrinsicsTable_IMPL
11993 #include "RISCVGenSearchableTables.inc"
11994 
11995 } // namespace RISCVVIntrinsicsTable
11996 
11997 } // namespace llvm
11998