1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
174                    MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
188 
189   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
190   if (!Subtarget.hasStdExtZbb())
191     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
192 
193   if (Subtarget.is64Bit()) {
194     setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
195                        MVT::i32, Custom);
196 
197     setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
198                        MVT::i32, Custom);
199   } else {
200     setLibcallName(
201         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
202         nullptr);
203     setLibcallName(RTLIB::MULO_I64, nullptr);
204   }
205 
206   if (!Subtarget.hasStdExtM()) {
207     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV,
208                         ISD::SREM, ISD::UREM},
209                        XLenVT, Expand);
210   } else {
211     if (Subtarget.is64Bit()) {
212       setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
213 
214       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
215                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
216     } else {
217       setOperationAction(ISD::MUL, MVT::i64, Custom);
218     }
219   }
220 
221   setOperationAction(
222       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
223       Expand);
224 
225   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
226                      Custom);
227 
228   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
229       Subtarget.hasStdExtZbkb()) {
230     if (Subtarget.is64Bit())
231       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
232   } else {
233     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
234   }
235 
236   if (Subtarget.hasStdExtZbp()) {
237     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
238     // more combining.
239     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
240 
241     // BSWAP i8 doesn't exist.
242     setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
243 
244     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
245 
246     if (Subtarget.is64Bit())
247       setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
248   } else {
249     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
250     // pattern match it directly in isel.
251     setOperationAction(ISD::BSWAP, XLenVT,
252                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
253                            ? Legal
254                            : Expand);
255     // Zbkb can use rev8+brev8 to implement bitreverse.
256     setOperationAction(ISD::BITREVERSE, XLenVT,
257                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
258   }
259 
260   if (Subtarget.hasStdExtZbb()) {
261     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
262                        Legal);
263 
264     if (Subtarget.is64Bit())
265       setOperationAction(
266           {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
267           MVT::i32, Custom);
268   } else {
269     setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
270 
271     if (Subtarget.is64Bit())
272       setOperationAction(ISD::ABS, MVT::i32, Custom);
273   }
274 
275   if (Subtarget.hasStdExtZbt()) {
276     setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
277     setOperationAction(ISD::SELECT, XLenVT, Legal);
278 
279     if (Subtarget.is64Bit())
280       setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
281   } else {
282     setOperationAction(ISD::SELECT, XLenVT, Custom);
283   }
284 
285   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
286       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
287       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
288       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
289       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
290       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
291       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
292 
293   static const ISD::CondCode FPCCToExpand[] = {
294       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
295       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
296       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
297 
298   static const ISD::NodeType FPOpToExpand[] = {
299       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
300       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
301 
302   if (Subtarget.hasStdExtZfh())
303     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
304 
305   if (Subtarget.hasStdExtZfh()) {
306     for (auto NT : FPLegalNodeTypes)
307       setOperationAction(NT, MVT::f16, Legal);
308     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
309     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
310     for (auto CC : FPCCToExpand)
311       setCondCodeAction(CC, MVT::f16, Expand);
312     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
313     setOperationAction(ISD::SELECT, MVT::f16, Custom);
314     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
315 
316     setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT,
317                         ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
318                         ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN,
319                         ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG,
320                         ISD::FLOG2, ISD::FLOG10},
321                        MVT::f16, Promote);
322 
323     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
324     // complete support for all operations in LegalizeDAG.
325 
326     // We need to custom promote this.
327     if (Subtarget.is64Bit())
328       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
329   }
330 
331   if (Subtarget.hasStdExtF()) {
332     for (auto NT : FPLegalNodeTypes)
333       setOperationAction(NT, MVT::f32, Legal);
334     for (auto CC : FPCCToExpand)
335       setCondCodeAction(CC, MVT::f32, Expand);
336     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
337     setOperationAction(ISD::SELECT, MVT::f32, Custom);
338     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
339     for (auto Op : FPOpToExpand)
340       setOperationAction(Op, MVT::f32, Expand);
341     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
342     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
343   }
344 
345   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
346     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
347 
348   if (Subtarget.hasStdExtD()) {
349     for (auto NT : FPLegalNodeTypes)
350       setOperationAction(NT, MVT::f64, Legal);
351     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
352     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
353     for (auto CC : FPCCToExpand)
354       setCondCodeAction(CC, MVT::f64, Expand);
355     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
356     setOperationAction(ISD::SELECT, MVT::f64, Custom);
357     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
358     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
359     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
360     for (auto Op : FPOpToExpand)
361       setOperationAction(Op, MVT::f64, Expand);
362     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
363     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
364   }
365 
366   if (Subtarget.is64Bit())
367     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
368                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
369                        MVT::i32, Custom);
370 
371   if (Subtarget.hasStdExtF()) {
372     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
373                        Custom);
374 
375     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
376                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
377                        XLenVT, Legal);
378 
379     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
380     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
381   }
382 
383   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
384                       ISD::JumpTable},
385                      XLenVT, Custom);
386 
387   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
388 
389   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
390   // Unfortunately this can't be determined just from the ISA naming string.
391   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
392                      Subtarget.is64Bit() ? Legal : Custom);
393 
394   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
395   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
396   if (Subtarget.is64Bit())
397     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
398 
399   if (Subtarget.hasStdExtA()) {
400     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
401     setMinCmpXchgSizeInBits(32);
402   } else {
403     setMaxAtomicSizeInBitsSupported(0);
404   }
405 
406   setBooleanContents(ZeroOrOneBooleanContent);
407 
408   if (Subtarget.hasVInstructions()) {
409     setBooleanVectorContents(ZeroOrOneBooleanContent);
410 
411     setOperationAction(ISD::VSCALE, XLenVT, Custom);
412 
413     // RVV intrinsics may have illegal operands.
414     // We also need to custom legalize vmv.x.s.
415     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
416                        {MVT::i8, MVT::i16}, Custom);
417     if (Subtarget.is64Bit())
418       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
419     else
420       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
421                          MVT::i64, Custom);
422 
423     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
424                        MVT::Other, Custom);
425 
426     static const unsigned IntegerVPOps[] = {
427         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
428         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
429         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
430         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
431         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
432         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
433         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
434         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
435         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SIGN_EXTEND,
436         ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE};
437 
438     static const unsigned FloatingPointVPOps[] = {
439         ISD::VP_FADD,        ISD::VP_FSUB,
440         ISD::VP_FMUL,        ISD::VP_FDIV,
441         ISD::VP_FNEG,        ISD::VP_FMA,
442         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
443         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
444         ISD::VP_MERGE,       ISD::VP_SELECT,
445         ISD::VP_SITOFP,      ISD::VP_UITOFP,
446         ISD::VP_SETCC,       ISD::VP_FP_ROUND,
447         ISD::VP_FP_EXTEND};
448 
449     if (!Subtarget.is64Bit()) {
450       // We must custom-lower certain vXi64 operations on RV32 due to the vector
451       // element type being illegal.
452       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
453                          MVT::i64, Custom);
454 
455       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
456                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
457                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
458                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
459                          MVT::i64, Custom);
460 
461       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
462                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
463                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
464                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
465                          MVT::i64, Custom);
466     }
467 
468     for (MVT VT : BoolVecVTs) {
469       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
470 
471       // Mask VTs are custom-expanded into a series of standard nodes
472       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
473                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
474                          VT, Custom);
475 
476       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
477                          Custom);
478 
479       setOperationAction(ISD::SELECT, VT, Custom);
480       setOperationAction(
481           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
482           Expand);
483 
484       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
485 
486       setOperationAction(
487           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
488           Custom);
489 
490       setOperationAction(
491           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
492           Custom);
493 
494       // RVV has native int->float & float->int conversions where the
495       // element type sizes are within one power-of-two of each other. Any
496       // wider distances between type sizes have to be lowered as sequences
497       // which progressively narrow the gap in stages.
498       setOperationAction(
499           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
500           VT, Custom);
501 
502       // Expand all extending loads to types larger than this, and truncating
503       // stores from types larger than this.
504       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
505         setTruncStoreAction(OtherVT, VT, Expand);
506         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
507                          VT, Expand);
508       }
509 
510       setOperationAction(
511           {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT,
512           Custom);
513     }
514 
515     for (MVT VT : IntVecVTs) {
516       if (VT.getVectorElementType() == MVT::i64 &&
517           !Subtarget.hasVInstructionsI64())
518         continue;
519 
520       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
521       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
522 
523       // Vectors implement MULHS/MULHU.
524       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
525 
526       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
527       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
528         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
529 
530       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
531                          Legal);
532 
533       setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
534 
535       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT,
536                          Expand);
537 
538       setOperationAction(ISD::BSWAP, VT, Expand);
539 
540       // Custom-lower extensions and truncations from/to mask types.
541       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
542                          VT, Custom);
543 
544       // RVV has native int->float & float->int conversions where the
545       // element type sizes are within one power-of-two of each other. Any
546       // wider distances between type sizes have to be lowered as sequences
547       // which progressively narrow the gap in stages.
548       setOperationAction(
549           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
550           VT, Custom);
551 
552       setOperationAction(
553           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
554 
555       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
556       // nodes which truncate by one power of two at a time.
557       setOperationAction(ISD::TRUNCATE, VT, Custom);
558 
559       // Custom-lower insert/extract operations to simplify patterns.
560       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
561                          Custom);
562 
563       // Custom-lower reduction operations to set up the corresponding custom
564       // nodes' operands.
565       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
566                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
567                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
568                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
569                          VT, Custom);
570 
571       for (unsigned VPOpc : IntegerVPOps)
572         setOperationAction(VPOpc, VT, Custom);
573 
574       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
575 
576       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
577                          VT, Custom);
578 
579       setOperationAction(
580           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
581           Custom);
582 
583       setOperationAction(
584           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
585           VT, Custom);
586 
587       setOperationAction(ISD::SELECT, VT, Custom);
588       setOperationAction(ISD::SELECT_CC, VT, Expand);
589 
590       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
591 
592       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
593         setTruncStoreAction(VT, OtherVT, Expand);
594         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
595                          VT, Expand);
596       }
597 
598       // Splice
599       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
600 
601       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
602       // type that can represent the value exactly.
603       if (VT.getVectorElementType() != MVT::i64) {
604         MVT FloatEltVT =
605             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
606         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
607         if (isTypeLegal(FloatVT)) {
608           setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
609                              Custom);
610         }
611       }
612     }
613 
614     // Expand various CCs to best match the RVV ISA, which natively supports UNE
615     // but no other unordered comparisons, and supports all ordered comparisons
616     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
617     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
618     // and we pattern-match those back to the "original", swapping operands once
619     // more. This way we catch both operations and both "vf" and "fv" forms with
620     // fewer patterns.
621     static const ISD::CondCode VFPCCToExpand[] = {
622         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
623         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
624         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
625     };
626 
627     // Sets common operation actions on RVV floating-point vector types.
628     const auto SetCommonVFPActions = [&](MVT VT) {
629       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
630       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
631       // sizes are within one power-of-two of each other. Therefore conversions
632       // between vXf16 and vXf64 must be lowered as sequences which convert via
633       // vXf32.
634       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
635       // Custom-lower insert/extract operations to simplify patterns.
636       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
637                          Custom);
638       // Expand various condition codes (explained above).
639       for (auto CC : VFPCCToExpand)
640         setCondCodeAction(CC, VT, Expand);
641 
642       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
643 
644       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
645                          VT, Custom);
646 
647       setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
648                           ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
649                          VT, Custom);
650 
651       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
652 
653       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
654 
655       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
656                          VT, Custom);
657 
658       setOperationAction(
659           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
660           Custom);
661 
662       setOperationAction(ISD::SELECT, VT, Custom);
663       setOperationAction(ISD::SELECT_CC, VT, Expand);
664 
665       setOperationAction(
666           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
667           VT, Custom);
668 
669       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
670 
671       for (unsigned VPOpc : FloatingPointVPOps)
672         setOperationAction(VPOpc, VT, Custom);
673     };
674 
675     // Sets common extload/truncstore actions on RVV floating-point vector
676     // types.
677     const auto SetCommonVFPExtLoadTruncStoreActions =
678         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
679           for (auto SmallVT : SmallerVTs) {
680             setTruncStoreAction(VT, SmallVT, Expand);
681             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
682           }
683         };
684 
685     if (Subtarget.hasVInstructionsF16())
686       for (MVT VT : F16VecVTs)
687         SetCommonVFPActions(VT);
688 
689     for (MVT VT : F32VecVTs) {
690       if (Subtarget.hasVInstructionsF32())
691         SetCommonVFPActions(VT);
692       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
693     }
694 
695     for (MVT VT : F64VecVTs) {
696       if (Subtarget.hasVInstructionsF64())
697         SetCommonVFPActions(VT);
698       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
699       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
700     }
701 
702     if (Subtarget.useRVVForFixedLengthVectors()) {
703       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
704         if (!useRVVForFixedLengthVectorVT(VT))
705           continue;
706 
707         // By default everything must be expanded.
708         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
709           setOperationAction(Op, VT, Expand);
710         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
711           setTruncStoreAction(VT, OtherVT, Expand);
712           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
713                            OtherVT, VT, Expand);
714         }
715 
716         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
717         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
718                            Custom);
719 
720         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
721                            Custom);
722 
723         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
724                            VT, Custom);
725 
726         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
727 
728         setOperationAction(ISD::SETCC, VT, Custom);
729 
730         setOperationAction(ISD::SELECT, VT, Custom);
731 
732         setOperationAction(ISD::TRUNCATE, VT, Custom);
733 
734         setOperationAction(ISD::BITCAST, VT, Custom);
735 
736         setOperationAction(
737             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
738             Custom);
739 
740         setOperationAction(
741             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
742             Custom);
743 
744         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
745                             ISD::FP_TO_UINT},
746                            VT, Custom);
747 
748         // Operations below are different for between masks and other vectors.
749         if (VT.getVectorElementType() == MVT::i1) {
750           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
751                               ISD::OR, ISD::XOR},
752                              VT, Custom);
753 
754           setOperationAction(
755               {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNCATE},
756               VT, Custom);
757           continue;
758         }
759 
760         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
761         // it before type legalization for i64 vectors on RV32. It will then be
762         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
763         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
764         // improvements first.
765         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
766           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
767           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
768         }
769 
770         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
771         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
772 
773         setOperationAction(
774             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
775 
776         setOperationAction(
777             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
778             Custom);
779 
780         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
781                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
782                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
783                            VT, Custom);
784 
785         setOperationAction(
786             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
787 
788         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
789         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
790           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
791 
792         setOperationAction(
793             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
794             Custom);
795 
796         setOperationAction(ISD::VSELECT, VT, Custom);
797         setOperationAction(ISD::SELECT_CC, VT, Expand);
798 
799         setOperationAction(
800             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
801 
802         // Custom-lower reduction operations to set up the corresponding custom
803         // nodes' operands.
804         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
805                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
806                             ISD::VECREDUCE_UMIN},
807                            VT, Custom);
808 
809         for (unsigned VPOpc : IntegerVPOps)
810           setOperationAction(VPOpc, VT, Custom);
811 
812         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
813         // type that can represent the value exactly.
814         if (VT.getVectorElementType() != MVT::i64) {
815           MVT FloatEltVT =
816               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
817           EVT FloatVT =
818               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
819           if (isTypeLegal(FloatVT))
820             setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
821                                Custom);
822         }
823       }
824 
825       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
826         if (!useRVVForFixedLengthVectorVT(VT))
827           continue;
828 
829         // By default everything must be expanded.
830         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
831           setOperationAction(Op, VT, Expand);
832         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
833           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
834           setTruncStoreAction(VT, OtherVT, Expand);
835         }
836 
837         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
838         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
839                            Custom);
840 
841         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
842                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
843                             ISD::EXTRACT_VECTOR_ELT},
844                            VT, Custom);
845 
846         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
847                             ISD::MGATHER, ISD::MSCATTER},
848                            VT, Custom);
849 
850         setOperationAction(
851             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
852             Custom);
853 
854         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
855                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
856                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
857                            VT, Custom);
858 
859         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
860 
861         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
862                            VT, Custom);
863 
864         for (auto CC : VFPCCToExpand)
865           setCondCodeAction(CC, VT, Expand);
866 
867         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
868         setOperationAction(ISD::SELECT_CC, VT, Expand);
869 
870         setOperationAction(ISD::BITCAST, VT, Custom);
871 
872         setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
873                             ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
874                            VT, Custom);
875 
876         for (unsigned VPOpc : FloatingPointVPOps)
877           setOperationAction(VPOpc, VT, Custom);
878       }
879 
880       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
881       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
882                          Custom);
883       if (Subtarget.hasStdExtZfh())
884         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
885       if (Subtarget.hasStdExtF())
886         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
887       if (Subtarget.hasStdExtD())
888         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
889     }
890   }
891 
892   // Function alignments.
893   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
894   setMinFunctionAlignment(FunctionAlignment);
895   setPrefFunctionAlignment(FunctionAlignment);
896 
897   setMinimumJumpTableEntries(5);
898 
899   // Jumps are expensive, compared to logic
900   setJumpIsExpensive();
901 
902   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
903                        ISD::OR, ISD::XOR});
904 
905   if (Subtarget.hasStdExtF())
906     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
907 
908   if (Subtarget.hasStdExtZbp())
909     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
910 
911   if (Subtarget.hasStdExtZbb())
912     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
913 
914   if (Subtarget.hasStdExtZbkb())
915     setTargetDAGCombine(ISD::BITREVERSE);
916   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
917     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
918   if (Subtarget.hasStdExtF())
919     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
920                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
921   if (Subtarget.hasVInstructions())
922     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
923                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
924                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
925 
926   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
927   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
928 }
929 
930 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
931                                             LLVMContext &Context,
932                                             EVT VT) const {
933   if (!VT.isVector())
934     return getPointerTy(DL);
935   if (Subtarget.hasVInstructions() &&
936       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
937     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
938   return VT.changeVectorElementTypeToInteger();
939 }
940 
941 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
942   return Subtarget.getXLenVT();
943 }
944 
945 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
946                                              const CallInst &I,
947                                              MachineFunction &MF,
948                                              unsigned Intrinsic) const {
949   auto &DL = I.getModule()->getDataLayout();
950   switch (Intrinsic) {
951   default:
952     return false;
953   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
954   case Intrinsic::riscv_masked_atomicrmw_add_i32:
955   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
956   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
957   case Intrinsic::riscv_masked_atomicrmw_max_i32:
958   case Intrinsic::riscv_masked_atomicrmw_min_i32:
959   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
960   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
961   case Intrinsic::riscv_masked_cmpxchg_i32:
962     Info.opc = ISD::INTRINSIC_W_CHAIN;
963     Info.memVT = MVT::i32;
964     Info.ptrVal = I.getArgOperand(0);
965     Info.offset = 0;
966     Info.align = Align(4);
967     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
968                  MachineMemOperand::MOVolatile;
969     return true;
970   case Intrinsic::riscv_masked_strided_load:
971     Info.opc = ISD::INTRINSIC_W_CHAIN;
972     Info.ptrVal = I.getArgOperand(1);
973     Info.memVT = getValueType(DL, I.getType()->getScalarType());
974     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
975     Info.size = MemoryLocation::UnknownSize;
976     Info.flags |= MachineMemOperand::MOLoad;
977     return true;
978   case Intrinsic::riscv_masked_strided_store:
979     Info.opc = ISD::INTRINSIC_VOID;
980     Info.ptrVal = I.getArgOperand(1);
981     Info.memVT =
982         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
983     Info.align = Align(
984         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
985         8);
986     Info.size = MemoryLocation::UnknownSize;
987     Info.flags |= MachineMemOperand::MOStore;
988     return true;
989   case Intrinsic::riscv_seg2_load:
990   case Intrinsic::riscv_seg3_load:
991   case Intrinsic::riscv_seg4_load:
992   case Intrinsic::riscv_seg5_load:
993   case Intrinsic::riscv_seg6_load:
994   case Intrinsic::riscv_seg7_load:
995   case Intrinsic::riscv_seg8_load:
996     Info.opc = ISD::INTRINSIC_W_CHAIN;
997     Info.ptrVal = I.getArgOperand(0);
998     Info.memVT =
999         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1000     Info.align =
1001         Align(DL.getTypeSizeInBits(
1002                   I.getType()->getStructElementType(0)->getScalarType()) /
1003               8);
1004     Info.size = MemoryLocation::UnknownSize;
1005     Info.flags |= MachineMemOperand::MOLoad;
1006     return true;
1007   }
1008 }
1009 
1010 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1011                                                 const AddrMode &AM, Type *Ty,
1012                                                 unsigned AS,
1013                                                 Instruction *I) const {
1014   // No global is ever allowed as a base.
1015   if (AM.BaseGV)
1016     return false;
1017 
1018   // RVV instructions only support register addressing.
1019   if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
1020     return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
1021 
1022   // Require a 12-bit signed offset.
1023   if (!isInt<12>(AM.BaseOffs))
1024     return false;
1025 
1026   switch (AM.Scale) {
1027   case 0: // "r+i" or just "i", depending on HasBaseReg.
1028     break;
1029   case 1:
1030     if (!AM.HasBaseReg) // allow "r+i".
1031       break;
1032     return false; // disallow "r+r" or "r+r+i".
1033   default:
1034     return false;
1035   }
1036 
1037   return true;
1038 }
1039 
1040 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1041   return isInt<12>(Imm);
1042 }
1043 
1044 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1045   return isInt<12>(Imm);
1046 }
1047 
1048 // On RV32, 64-bit integers are split into their high and low parts and held
1049 // in two different registers, so the trunc is free since the low register can
1050 // just be used.
1051 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1052   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1053     return false;
1054   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1055   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1056   return (SrcBits == 64 && DestBits == 32);
1057 }
1058 
1059 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1060   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1061       !SrcVT.isInteger() || !DstVT.isInteger())
1062     return false;
1063   unsigned SrcBits = SrcVT.getSizeInBits();
1064   unsigned DestBits = DstVT.getSizeInBits();
1065   return (SrcBits == 64 && DestBits == 32);
1066 }
1067 
1068 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1069   // Zexts are free if they can be combined with a load.
1070   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1071   // poorly with type legalization of compares preferring sext.
1072   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1073     EVT MemVT = LD->getMemoryVT();
1074     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1075         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1076          LD->getExtensionType() == ISD::ZEXTLOAD))
1077       return true;
1078   }
1079 
1080   return TargetLowering::isZExtFree(Val, VT2);
1081 }
1082 
1083 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1084   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1085 }
1086 
1087 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1088   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1089 }
1090 
1091 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1092   return Subtarget.hasStdExtZbb();
1093 }
1094 
1095 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1096   return Subtarget.hasStdExtZbb();
1097 }
1098 
1099 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1100   EVT VT = Y.getValueType();
1101 
1102   // FIXME: Support vectors once we have tests.
1103   if (VT.isVector())
1104     return false;
1105 
1106   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1107           Subtarget.hasStdExtZbkb()) &&
1108          !isa<ConstantSDNode>(Y);
1109 }
1110 
1111 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1112   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1113   auto *C = dyn_cast<ConstantSDNode>(Y);
1114   return C && C->getAPIntValue().ule(10);
1115 }
1116 
1117 bool RISCVTargetLowering::
1118     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
1119         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
1120         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
1121         SelectionDAG &DAG) const {
1122   // One interesting pattern that we'd want to form is 'bit extract':
1123   //   ((1 >> Y) & 1) ==/!= 0
1124   // But we also need to be careful not to try to reverse that fold.
1125 
1126   // Is this '((1 >> Y) & 1)'?
1127   if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
1128     return false; // Keep the 'bit extract' pattern.
1129 
1130   // Will this be '((1 >> Y) & 1)' after the transform?
1131   if (NewShiftOpcode == ISD::SRL && CC->isOne())
1132     return true; // Do form the 'bit extract' pattern.
1133 
1134   // If 'X' is a constant, and we transform, then we will immediately
1135   // try to undo the fold, thus causing endless combine loop.
1136   // So only do the transform if X is not a constant. This matches the default
1137   // implementation of this function.
1138   return !XC;
1139 }
1140 
1141 /// Check if sinking \p I's operands to I's basic block is profitable, because
1142 /// the operands can be folded into a target instruction, e.g.
1143 /// splats of scalars can fold into vector instructions.
1144 bool RISCVTargetLowering::shouldSinkOperands(
1145     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1146   using namespace llvm::PatternMatch;
1147 
1148   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1149     return false;
1150 
1151   auto IsSinker = [&](Instruction *I, int Operand) {
1152     switch (I->getOpcode()) {
1153     case Instruction::Add:
1154     case Instruction::Sub:
1155     case Instruction::Mul:
1156     case Instruction::And:
1157     case Instruction::Or:
1158     case Instruction::Xor:
1159     case Instruction::FAdd:
1160     case Instruction::FSub:
1161     case Instruction::FMul:
1162     case Instruction::FDiv:
1163     case Instruction::ICmp:
1164     case Instruction::FCmp:
1165       return true;
1166     case Instruction::Shl:
1167     case Instruction::LShr:
1168     case Instruction::AShr:
1169     case Instruction::UDiv:
1170     case Instruction::SDiv:
1171     case Instruction::URem:
1172     case Instruction::SRem:
1173       return Operand == 1;
1174     case Instruction::Call:
1175       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1176         switch (II->getIntrinsicID()) {
1177         case Intrinsic::fma:
1178         case Intrinsic::vp_fma:
1179           return Operand == 0 || Operand == 1;
1180         // FIXME: Our patterns can only match vx/vf instructions when the splat
1181         // it on the RHS, because TableGen doesn't recognize our VP operations
1182         // as commutative.
1183         case Intrinsic::vp_add:
1184         case Intrinsic::vp_mul:
1185         case Intrinsic::vp_and:
1186         case Intrinsic::vp_or:
1187         case Intrinsic::vp_xor:
1188         case Intrinsic::vp_fadd:
1189         case Intrinsic::vp_fmul:
1190         case Intrinsic::vp_shl:
1191         case Intrinsic::vp_lshr:
1192         case Intrinsic::vp_ashr:
1193         case Intrinsic::vp_udiv:
1194         case Intrinsic::vp_sdiv:
1195         case Intrinsic::vp_urem:
1196         case Intrinsic::vp_srem:
1197           return Operand == 1;
1198         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1199         // explicit patterns for both LHS and RHS (as 'vr' versions).
1200         case Intrinsic::vp_sub:
1201         case Intrinsic::vp_fsub:
1202         case Intrinsic::vp_fdiv:
1203           return Operand == 0 || Operand == 1;
1204         default:
1205           return false;
1206         }
1207       }
1208       return false;
1209     default:
1210       return false;
1211     }
1212   };
1213 
1214   for (auto OpIdx : enumerate(I->operands())) {
1215     if (!IsSinker(I, OpIdx.index()))
1216       continue;
1217 
1218     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1219     // Make sure we are not already sinking this operand
1220     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1221       continue;
1222 
1223     // We are looking for a splat that can be sunk.
1224     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1225                              m_Undef(), m_ZeroMask())))
1226       continue;
1227 
1228     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1229     // and vector registers
1230     for (Use &U : Op->uses()) {
1231       Instruction *Insn = cast<Instruction>(U.getUser());
1232       if (!IsSinker(Insn, U.getOperandNo()))
1233         return false;
1234     }
1235 
1236     Ops.push_back(&Op->getOperandUse(0));
1237     Ops.push_back(&OpIdx.value());
1238   }
1239   return true;
1240 }
1241 
1242 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1243                                        bool ForCodeSize) const {
1244   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1245   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1246     return false;
1247   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1248     return false;
1249   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1250     return false;
1251   return Imm.isZero();
1252 }
1253 
1254 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1255   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1256          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1257          (VT == MVT::f64 && Subtarget.hasStdExtD());
1258 }
1259 
1260 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1261                                                       CallingConv::ID CC,
1262                                                       EVT VT) const {
1263   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1264   // We might still end up using a GPR but that will be decided based on ABI.
1265   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1266   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1267     return MVT::f32;
1268 
1269   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1270 }
1271 
1272 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1273                                                            CallingConv::ID CC,
1274                                                            EVT VT) const {
1275   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1276   // We might still end up using a GPR but that will be decided based on ABI.
1277   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1278   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1279     return 1;
1280 
1281   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1282 }
1283 
1284 // Changes the condition code and swaps operands if necessary, so the SetCC
1285 // operation matches one of the comparisons supported directly by branches
1286 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1287 // with 1/-1.
1288 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1289                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1290   // Convert X > -1 to X >= 0.
1291   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1292     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1293     CC = ISD::SETGE;
1294     return;
1295   }
1296   // Convert X < 1 to 0 >= X.
1297   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1298     RHS = LHS;
1299     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1300     CC = ISD::SETGE;
1301     return;
1302   }
1303 
1304   switch (CC) {
1305   default:
1306     break;
1307   case ISD::SETGT:
1308   case ISD::SETLE:
1309   case ISD::SETUGT:
1310   case ISD::SETULE:
1311     CC = ISD::getSetCCSwappedOperands(CC);
1312     std::swap(LHS, RHS);
1313     break;
1314   }
1315 }
1316 
1317 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1318   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1319   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1320   if (VT.getVectorElementType() == MVT::i1)
1321     KnownSize *= 8;
1322 
1323   switch (KnownSize) {
1324   default:
1325     llvm_unreachable("Invalid LMUL.");
1326   case 8:
1327     return RISCVII::VLMUL::LMUL_F8;
1328   case 16:
1329     return RISCVII::VLMUL::LMUL_F4;
1330   case 32:
1331     return RISCVII::VLMUL::LMUL_F2;
1332   case 64:
1333     return RISCVII::VLMUL::LMUL_1;
1334   case 128:
1335     return RISCVII::VLMUL::LMUL_2;
1336   case 256:
1337     return RISCVII::VLMUL::LMUL_4;
1338   case 512:
1339     return RISCVII::VLMUL::LMUL_8;
1340   }
1341 }
1342 
1343 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1344   switch (LMul) {
1345   default:
1346     llvm_unreachable("Invalid LMUL.");
1347   case RISCVII::VLMUL::LMUL_F8:
1348   case RISCVII::VLMUL::LMUL_F4:
1349   case RISCVII::VLMUL::LMUL_F2:
1350   case RISCVII::VLMUL::LMUL_1:
1351     return RISCV::VRRegClassID;
1352   case RISCVII::VLMUL::LMUL_2:
1353     return RISCV::VRM2RegClassID;
1354   case RISCVII::VLMUL::LMUL_4:
1355     return RISCV::VRM4RegClassID;
1356   case RISCVII::VLMUL::LMUL_8:
1357     return RISCV::VRM8RegClassID;
1358   }
1359 }
1360 
1361 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1362   RISCVII::VLMUL LMUL = getLMUL(VT);
1363   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1364       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1365       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1366       LMUL == RISCVII::VLMUL::LMUL_1) {
1367     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1368                   "Unexpected subreg numbering");
1369     return RISCV::sub_vrm1_0 + Index;
1370   }
1371   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1372     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1373                   "Unexpected subreg numbering");
1374     return RISCV::sub_vrm2_0 + Index;
1375   }
1376   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1377     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1378                   "Unexpected subreg numbering");
1379     return RISCV::sub_vrm4_0 + Index;
1380   }
1381   llvm_unreachable("Invalid vector type.");
1382 }
1383 
1384 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1385   if (VT.getVectorElementType() == MVT::i1)
1386     return RISCV::VRRegClassID;
1387   return getRegClassIDForLMUL(getLMUL(VT));
1388 }
1389 
1390 // Attempt to decompose a subvector insert/extract between VecVT and
1391 // SubVecVT via subregister indices. Returns the subregister index that
1392 // can perform the subvector insert/extract with the given element index, as
1393 // well as the index corresponding to any leftover subvectors that must be
1394 // further inserted/extracted within the register class for SubVecVT.
1395 std::pair<unsigned, unsigned>
1396 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1397     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1398     const RISCVRegisterInfo *TRI) {
1399   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1400                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1401                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1402                 "Register classes not ordered");
1403   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1404   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1405   // Try to compose a subregister index that takes us from the incoming
1406   // LMUL>1 register class down to the outgoing one. At each step we half
1407   // the LMUL:
1408   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1409   // Note that this is not guaranteed to find a subregister index, such as
1410   // when we are extracting from one VR type to another.
1411   unsigned SubRegIdx = RISCV::NoSubRegister;
1412   for (const unsigned RCID :
1413        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1414     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1415       VecVT = VecVT.getHalfNumVectorElementsVT();
1416       bool IsHi =
1417           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1418       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1419                                             getSubregIndexByMVT(VecVT, IsHi));
1420       if (IsHi)
1421         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1422     }
1423   return {SubRegIdx, InsertExtractIdx};
1424 }
1425 
1426 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1427 // stores for those types.
1428 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1429   return !Subtarget.useRVVForFixedLengthVectors() ||
1430          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1431 }
1432 
1433 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1434   if (ScalarTy->isPointerTy())
1435     return true;
1436 
1437   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1438       ScalarTy->isIntegerTy(32))
1439     return true;
1440 
1441   if (ScalarTy->isIntegerTy(64))
1442     return Subtarget.hasVInstructionsI64();
1443 
1444   if (ScalarTy->isHalfTy())
1445     return Subtarget.hasVInstructionsF16();
1446   if (ScalarTy->isFloatTy())
1447     return Subtarget.hasVInstructionsF32();
1448   if (ScalarTy->isDoubleTy())
1449     return Subtarget.hasVInstructionsF64();
1450 
1451   return false;
1452 }
1453 
1454 static SDValue getVLOperand(SDValue Op) {
1455   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1456           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1457          "Unexpected opcode");
1458   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1459   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1460   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1461       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1462   if (!II)
1463     return SDValue();
1464   return Op.getOperand(II->VLOperand + 1 + HasChain);
1465 }
1466 
1467 static bool useRVVForFixedLengthVectorVT(MVT VT,
1468                                          const RISCVSubtarget &Subtarget) {
1469   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1470   if (!Subtarget.useRVVForFixedLengthVectors())
1471     return false;
1472 
1473   // We only support a set of vector types with a consistent maximum fixed size
1474   // across all supported vector element types to avoid legalization issues.
1475   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1476   // fixed-length vector type we support is 1024 bytes.
1477   if (VT.getFixedSizeInBits() > 1024 * 8)
1478     return false;
1479 
1480   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1481 
1482   MVT EltVT = VT.getVectorElementType();
1483 
1484   // Don't use RVV for vectors we cannot scalarize if required.
1485   switch (EltVT.SimpleTy) {
1486   // i1 is supported but has different rules.
1487   default:
1488     return false;
1489   case MVT::i1:
1490     // Masks can only use a single register.
1491     if (VT.getVectorNumElements() > MinVLen)
1492       return false;
1493     MinVLen /= 8;
1494     break;
1495   case MVT::i8:
1496   case MVT::i16:
1497   case MVT::i32:
1498     break;
1499   case MVT::i64:
1500     if (!Subtarget.hasVInstructionsI64())
1501       return false;
1502     break;
1503   case MVT::f16:
1504     if (!Subtarget.hasVInstructionsF16())
1505       return false;
1506     break;
1507   case MVT::f32:
1508     if (!Subtarget.hasVInstructionsF32())
1509       return false;
1510     break;
1511   case MVT::f64:
1512     if (!Subtarget.hasVInstructionsF64())
1513       return false;
1514     break;
1515   }
1516 
1517   // Reject elements larger than ELEN.
1518   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1519     return false;
1520 
1521   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1522   // Don't use RVV for types that don't fit.
1523   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1524     return false;
1525 
1526   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1527   // the base fixed length RVV support in place.
1528   if (!VT.isPow2VectorType())
1529     return false;
1530 
1531   return true;
1532 }
1533 
1534 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1535   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1536 }
1537 
1538 // Return the largest legal scalable vector type that matches VT's element type.
1539 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1540                                             const RISCVSubtarget &Subtarget) {
1541   // This may be called before legal types are setup.
1542   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1543           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1544          "Expected legal fixed length vector!");
1545 
1546   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1547   unsigned MaxELen = Subtarget.getELEN();
1548 
1549   MVT EltVT = VT.getVectorElementType();
1550   switch (EltVT.SimpleTy) {
1551   default:
1552     llvm_unreachable("unexpected element type for RVV container");
1553   case MVT::i1:
1554   case MVT::i8:
1555   case MVT::i16:
1556   case MVT::i32:
1557   case MVT::i64:
1558   case MVT::f16:
1559   case MVT::f32:
1560   case MVT::f64: {
1561     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1562     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1563     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1564     unsigned NumElts =
1565         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1566     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1567     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1568     return MVT::getScalableVectorVT(EltVT, NumElts);
1569   }
1570   }
1571 }
1572 
1573 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1574                                             const RISCVSubtarget &Subtarget) {
1575   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1576                                           Subtarget);
1577 }
1578 
1579 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1580   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1581 }
1582 
1583 // Grow V to consume an entire RVV register.
1584 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1585                                        const RISCVSubtarget &Subtarget) {
1586   assert(VT.isScalableVector() &&
1587          "Expected to convert into a scalable vector!");
1588   assert(V.getValueType().isFixedLengthVector() &&
1589          "Expected a fixed length vector operand!");
1590   SDLoc DL(V);
1591   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1592   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1593 }
1594 
1595 // Shrink V so it's just big enough to maintain a VT's worth of data.
1596 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1597                                          const RISCVSubtarget &Subtarget) {
1598   assert(VT.isFixedLengthVector() &&
1599          "Expected to convert into a fixed length vector!");
1600   assert(V.getValueType().isScalableVector() &&
1601          "Expected a scalable vector operand!");
1602   SDLoc DL(V);
1603   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1604   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1605 }
1606 
1607 /// Return the type of the mask type suitable for masking the provided
1608 /// vector type.  This is simply an i1 element type vector of the same
1609 /// (possibly scalable) length.
1610 static MVT getMaskTypeFor(EVT VecVT) {
1611   assert(VecVT.isVector());
1612   ElementCount EC = VecVT.getVectorElementCount();
1613   return MVT::getVectorVT(MVT::i1, EC);
1614 }
1615 
1616 /// Creates an all ones mask suitable for masking a vector of type VecTy with
1617 /// vector length VL.  .
1618 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
1619                               SelectionDAG &DAG) {
1620   MVT MaskVT = getMaskTypeFor(VecVT);
1621   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1622 }
1623 
1624 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1625 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1626 // the vector type that it is contained in.
1627 static std::pair<SDValue, SDValue>
1628 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1629                 const RISCVSubtarget &Subtarget) {
1630   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1631   MVT XLenVT = Subtarget.getXLenVT();
1632   SDValue VL = VecVT.isFixedLengthVector()
1633                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1634                    : DAG.getRegister(RISCV::X0, XLenVT);
1635   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
1636   return {Mask, VL};
1637 }
1638 
1639 // As above but assuming the given type is a scalable vector type.
1640 static std::pair<SDValue, SDValue>
1641 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1642                         const RISCVSubtarget &Subtarget) {
1643   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1644   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1645 }
1646 
1647 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1648 // of either is (currently) supported. This can get us into an infinite loop
1649 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1650 // as a ..., etc.
1651 // Until either (or both) of these can reliably lower any node, reporting that
1652 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1653 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1654 // which is not desirable.
1655 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1656     EVT VT, unsigned DefinedValues) const {
1657   return false;
1658 }
1659 
1660 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1661                                   const RISCVSubtarget &Subtarget) {
1662   // RISCV FP-to-int conversions saturate to the destination register size, but
1663   // don't produce 0 for nan. We can use a conversion instruction and fix the
1664   // nan case with a compare and a select.
1665   SDValue Src = Op.getOperand(0);
1666 
1667   EVT DstVT = Op.getValueType();
1668   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1669 
1670   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1671   unsigned Opc;
1672   if (SatVT == DstVT)
1673     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1674   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1675     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1676   else
1677     return SDValue();
1678   // FIXME: Support other SatVTs by clamping before or after the conversion.
1679 
1680   SDLoc DL(Op);
1681   SDValue FpToInt = DAG.getNode(
1682       Opc, DL, DstVT, Src,
1683       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1684 
1685   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1686   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1687 }
1688 
1689 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1690 // and back. Taking care to avoid converting values that are nan or already
1691 // correct.
1692 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1693 // have FRM dependencies modeled yet.
1694 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1695   MVT VT = Op.getSimpleValueType();
1696   assert(VT.isVector() && "Unexpected type");
1697 
1698   SDLoc DL(Op);
1699 
1700   // Freeze the source since we are increasing the number of uses.
1701   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1702 
1703   // Truncate to integer and convert back to FP.
1704   MVT IntVT = VT.changeVectorElementTypeToInteger();
1705   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1706   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1707 
1708   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1709 
1710   if (Op.getOpcode() == ISD::FCEIL) {
1711     // If the truncated value is the greater than or equal to the original
1712     // value, we've computed the ceil. Otherwise, we went the wrong way and
1713     // need to increase by 1.
1714     // FIXME: This should use a masked operation. Handle here or in isel?
1715     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1716                                  DAG.getConstantFP(1.0, DL, VT));
1717     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1718     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1719   } else if (Op.getOpcode() == ISD::FFLOOR) {
1720     // If the truncated value is the less than or equal to the original value,
1721     // we've computed the floor. Otherwise, we went the wrong way and need to
1722     // decrease by 1.
1723     // FIXME: This should use a masked operation. Handle here or in isel?
1724     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1725                                  DAG.getConstantFP(1.0, DL, VT));
1726     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1727     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1728   }
1729 
1730   // Restore the original sign so that -0.0 is preserved.
1731   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1732 
1733   // Determine the largest integer that can be represented exactly. This and
1734   // values larger than it don't have any fractional bits so don't need to
1735   // be converted.
1736   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1737   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1738   APFloat MaxVal = APFloat(FltSem);
1739   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1740                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1741   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1742 
1743   // If abs(Src) was larger than MaxVal or nan, keep it.
1744   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1745   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1746   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1747 }
1748 
1749 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1750 // This mode isn't supported in vector hardware on RISCV. But as long as we
1751 // aren't compiling with trapping math, we can emulate this with
1752 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1753 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1754 // dependencies modeled yet.
1755 // FIXME: Use masked operations to avoid final merge.
1756 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1757   MVT VT = Op.getSimpleValueType();
1758   assert(VT.isVector() && "Unexpected type");
1759 
1760   SDLoc DL(Op);
1761 
1762   // Freeze the source since we are increasing the number of uses.
1763   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1764 
1765   // We do the conversion on the absolute value and fix the sign at the end.
1766   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1767 
1768   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1769   bool Ignored;
1770   APFloat Point5Pred = APFloat(0.5f);
1771   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1772   Point5Pred.next(/*nextDown*/ true);
1773 
1774   // Add the adjustment.
1775   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1776                                DAG.getConstantFP(Point5Pred, DL, VT));
1777 
1778   // Truncate to integer and convert back to fp.
1779   MVT IntVT = VT.changeVectorElementTypeToInteger();
1780   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1781   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1782 
1783   // Restore the original sign.
1784   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1785 
1786   // Determine the largest integer that can be represented exactly. This and
1787   // values larger than it don't have any fractional bits so don't need to
1788   // be converted.
1789   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1790   APFloat MaxVal = APFloat(FltSem);
1791   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1792                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1793   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1794 
1795   // If abs(Src) was larger than MaxVal or nan, keep it.
1796   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1797   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1798   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1799 }
1800 
1801 struct VIDSequence {
1802   int64_t StepNumerator;
1803   unsigned StepDenominator;
1804   int64_t Addend;
1805 };
1806 
1807 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1808 // to the (non-zero) step S and start value X. This can be then lowered as the
1809 // RVV sequence (VID * S) + X, for example.
1810 // The step S is represented as an integer numerator divided by a positive
1811 // denominator. Note that the implementation currently only identifies
1812 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1813 // cannot detect 2/3, for example.
1814 // Note that this method will also match potentially unappealing index
1815 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1816 // determine whether this is worth generating code for.
1817 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1818   unsigned NumElts = Op.getNumOperands();
1819   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1820   if (!Op.getValueType().isInteger())
1821     return None;
1822 
1823   Optional<unsigned> SeqStepDenom;
1824   Optional<int64_t> SeqStepNum, SeqAddend;
1825   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1826   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1827   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1828     // Assume undef elements match the sequence; we just have to be careful
1829     // when interpolating across them.
1830     if (Op.getOperand(Idx).isUndef())
1831       continue;
1832     // The BUILD_VECTOR must be all constants.
1833     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1834       return None;
1835 
1836     uint64_t Val = Op.getConstantOperandVal(Idx) &
1837                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1838 
1839     if (PrevElt) {
1840       // Calculate the step since the last non-undef element, and ensure
1841       // it's consistent across the entire sequence.
1842       unsigned IdxDiff = Idx - PrevElt->second;
1843       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1844 
1845       // A zero-value value difference means that we're somewhere in the middle
1846       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1847       // step change before evaluating the sequence.
1848       if (ValDiff == 0)
1849         continue;
1850 
1851       int64_t Remainder = ValDiff % IdxDiff;
1852       // Normalize the step if it's greater than 1.
1853       if (Remainder != ValDiff) {
1854         // The difference must cleanly divide the element span.
1855         if (Remainder != 0)
1856           return None;
1857         ValDiff /= IdxDiff;
1858         IdxDiff = 1;
1859       }
1860 
1861       if (!SeqStepNum)
1862         SeqStepNum = ValDiff;
1863       else if (ValDiff != SeqStepNum)
1864         return None;
1865 
1866       if (!SeqStepDenom)
1867         SeqStepDenom = IdxDiff;
1868       else if (IdxDiff != *SeqStepDenom)
1869         return None;
1870     }
1871 
1872     // Record this non-undef element for later.
1873     if (!PrevElt || PrevElt->first != Val)
1874       PrevElt = std::make_pair(Val, Idx);
1875   }
1876 
1877   // We need to have logged a step for this to count as a legal index sequence.
1878   if (!SeqStepNum || !SeqStepDenom)
1879     return None;
1880 
1881   // Loop back through the sequence and validate elements we might have skipped
1882   // while waiting for a valid step. While doing this, log any sequence addend.
1883   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1884     if (Op.getOperand(Idx).isUndef())
1885       continue;
1886     uint64_t Val = Op.getConstantOperandVal(Idx) &
1887                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1888     uint64_t ExpectedVal =
1889         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1890     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1891     if (!SeqAddend)
1892       SeqAddend = Addend;
1893     else if (Addend != SeqAddend)
1894       return None;
1895   }
1896 
1897   assert(SeqAddend && "Must have an addend if we have a step");
1898 
1899   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1900 }
1901 
1902 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1903 // and lower it as a VRGATHER_VX_VL from the source vector.
1904 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1905                                   SelectionDAG &DAG,
1906                                   const RISCVSubtarget &Subtarget) {
1907   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1908     return SDValue();
1909   SDValue Vec = SplatVal.getOperand(0);
1910   // Only perform this optimization on vectors of the same size for simplicity.
1911   if (Vec.getValueType() != VT)
1912     return SDValue();
1913   SDValue Idx = SplatVal.getOperand(1);
1914   // The index must be a legal type.
1915   if (Idx.getValueType() != Subtarget.getXLenVT())
1916     return SDValue();
1917 
1918   MVT ContainerVT = VT;
1919   if (VT.isFixedLengthVector()) {
1920     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1921     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
1922   }
1923 
1924   SDValue Mask, VL;
1925   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1926 
1927   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
1928                                Idx, Mask, VL);
1929 
1930   if (!VT.isFixedLengthVector())
1931     return Gather;
1932 
1933   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1934 }
1935 
1936 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1937                                  const RISCVSubtarget &Subtarget) {
1938   MVT VT = Op.getSimpleValueType();
1939   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1940 
1941   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1942 
1943   SDLoc DL(Op);
1944   SDValue Mask, VL;
1945   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1946 
1947   MVT XLenVT = Subtarget.getXLenVT();
1948   unsigned NumElts = Op.getNumOperands();
1949 
1950   if (VT.getVectorElementType() == MVT::i1) {
1951     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1952       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1953       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1954     }
1955 
1956     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1957       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1958       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1959     }
1960 
1961     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1962     // scalar integer chunks whose bit-width depends on the number of mask
1963     // bits and XLEN.
1964     // First, determine the most appropriate scalar integer type to use. This
1965     // is at most XLenVT, but may be shrunk to a smaller vector element type
1966     // according to the size of the final vector - use i8 chunks rather than
1967     // XLenVT if we're producing a v8i1. This results in more consistent
1968     // codegen across RV32 and RV64.
1969     unsigned NumViaIntegerBits =
1970         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1971     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
1972     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1973       // If we have to use more than one INSERT_VECTOR_ELT then this
1974       // optimization is likely to increase code size; avoid peforming it in
1975       // such a case. We can use a load from a constant pool in this case.
1976       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1977         return SDValue();
1978       // Now we can create our integer vector type. Note that it may be larger
1979       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1980       MVT IntegerViaVecVT =
1981           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1982                            divideCeil(NumElts, NumViaIntegerBits));
1983 
1984       uint64_t Bits = 0;
1985       unsigned BitPos = 0, IntegerEltIdx = 0;
1986       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1987 
1988       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1989         // Once we accumulate enough bits to fill our scalar type, insert into
1990         // our vector and clear our accumulated data.
1991         if (I != 0 && I % NumViaIntegerBits == 0) {
1992           if (NumViaIntegerBits <= 32)
1993             Bits = SignExtend64(Bits, 32);
1994           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1995           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1996                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1997           Bits = 0;
1998           BitPos = 0;
1999           IntegerEltIdx++;
2000         }
2001         SDValue V = Op.getOperand(I);
2002         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2003         Bits |= ((uint64_t)BitValue << BitPos);
2004       }
2005 
2006       // Insert the (remaining) scalar value into position in our integer
2007       // vector type.
2008       if (NumViaIntegerBits <= 32)
2009         Bits = SignExtend64(Bits, 32);
2010       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2011       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2012                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2013 
2014       if (NumElts < NumViaIntegerBits) {
2015         // If we're producing a smaller vector than our minimum legal integer
2016         // type, bitcast to the equivalent (known-legal) mask type, and extract
2017         // our final mask.
2018         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2019         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2020         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2021                           DAG.getConstant(0, DL, XLenVT));
2022       } else {
2023         // Else we must have produced an integer type with the same size as the
2024         // mask type; bitcast for the final result.
2025         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2026         Vec = DAG.getBitcast(VT, Vec);
2027       }
2028 
2029       return Vec;
2030     }
2031 
2032     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2033     // vector type, we have a legal equivalently-sized i8 type, so we can use
2034     // that.
2035     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2036     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2037 
2038     SDValue WideVec;
2039     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2040       // For a splat, perform a scalar truncate before creating the wider
2041       // vector.
2042       assert(Splat.getValueType() == XLenVT &&
2043              "Unexpected type for i1 splat value");
2044       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2045                           DAG.getConstant(1, DL, XLenVT));
2046       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2047     } else {
2048       SmallVector<SDValue, 8> Ops(Op->op_values());
2049       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2050       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2051       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2052     }
2053 
2054     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2055   }
2056 
2057   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2058     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2059       return Gather;
2060     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2061                                         : RISCVISD::VMV_V_X_VL;
2062     Splat =
2063         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2064     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2065   }
2066 
2067   // Try and match index sequences, which we can lower to the vid instruction
2068   // with optional modifications. An all-undef vector is matched by
2069   // getSplatValue, above.
2070   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2071     int64_t StepNumerator = SimpleVID->StepNumerator;
2072     unsigned StepDenominator = SimpleVID->StepDenominator;
2073     int64_t Addend = SimpleVID->Addend;
2074 
2075     assert(StepNumerator != 0 && "Invalid step");
2076     bool Negate = false;
2077     int64_t SplatStepVal = StepNumerator;
2078     unsigned StepOpcode = ISD::MUL;
2079     if (StepNumerator != 1) {
2080       if (isPowerOf2_64(std::abs(StepNumerator))) {
2081         Negate = StepNumerator < 0;
2082         StepOpcode = ISD::SHL;
2083         SplatStepVal = Log2_64(std::abs(StepNumerator));
2084       }
2085     }
2086 
2087     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2088     // threshold since it's the immediate value many RVV instructions accept.
2089     // There is no vmul.vi instruction so ensure multiply constant can fit in
2090     // a single addi instruction.
2091     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2092          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2093         isPowerOf2_32(StepDenominator) &&
2094         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
2095       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2096       // Convert right out of the scalable type so we can use standard ISD
2097       // nodes for the rest of the computation. If we used scalable types with
2098       // these, we'd lose the fixed-length vector info and generate worse
2099       // vsetvli code.
2100       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2101       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2102           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2103         SDValue SplatStep = DAG.getSplatBuildVector(
2104             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2105         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2106       }
2107       if (StepDenominator != 1) {
2108         SDValue SplatStep = DAG.getSplatBuildVector(
2109             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2110         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2111       }
2112       if (Addend != 0 || Negate) {
2113         SDValue SplatAddend = DAG.getSplatBuildVector(
2114             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2115         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2116       }
2117       return VID;
2118     }
2119   }
2120 
2121   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2122   // when re-interpreted as a vector with a larger element type. For example,
2123   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2124   // could be instead splat as
2125   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2126   // TODO: This optimization could also work on non-constant splats, but it
2127   // would require bit-manipulation instructions to construct the splat value.
2128   SmallVector<SDValue> Sequence;
2129   unsigned EltBitSize = VT.getScalarSizeInBits();
2130   const auto *BV = cast<BuildVectorSDNode>(Op);
2131   if (VT.isInteger() && EltBitSize < 64 &&
2132       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2133       BV->getRepeatedSequence(Sequence) &&
2134       (Sequence.size() * EltBitSize) <= 64) {
2135     unsigned SeqLen = Sequence.size();
2136     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2137     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2138     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2139             ViaIntVT == MVT::i64) &&
2140            "Unexpected sequence type");
2141 
2142     unsigned EltIdx = 0;
2143     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2144     uint64_t SplatValue = 0;
2145     // Construct the amalgamated value which can be splatted as this larger
2146     // vector type.
2147     for (const auto &SeqV : Sequence) {
2148       if (!SeqV.isUndef())
2149         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2150                        << (EltIdx * EltBitSize));
2151       EltIdx++;
2152     }
2153 
2154     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2155     // achieve better constant materializion.
2156     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2157       SplatValue = SignExtend64(SplatValue, 32);
2158 
2159     // Since we can't introduce illegal i64 types at this stage, we can only
2160     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2161     // way we can use RVV instructions to splat.
2162     assert((ViaIntVT.bitsLE(XLenVT) ||
2163             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2164            "Unexpected bitcast sequence");
2165     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2166       SDValue ViaVL =
2167           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2168       MVT ViaContainerVT =
2169           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2170       SDValue Splat =
2171           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2172                       DAG.getUNDEF(ViaContainerVT),
2173                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2174       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2175       return DAG.getBitcast(VT, Splat);
2176     }
2177   }
2178 
2179   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2180   // which constitute a large proportion of the elements. In such cases we can
2181   // splat a vector with the dominant element and make up the shortfall with
2182   // INSERT_VECTOR_ELTs.
2183   // Note that this includes vectors of 2 elements by association. The
2184   // upper-most element is the "dominant" one, allowing us to use a splat to
2185   // "insert" the upper element, and an insert of the lower element at position
2186   // 0, which improves codegen.
2187   SDValue DominantValue;
2188   unsigned MostCommonCount = 0;
2189   DenseMap<SDValue, unsigned> ValueCounts;
2190   unsigned NumUndefElts =
2191       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2192 
2193   // Track the number of scalar loads we know we'd be inserting, estimated as
2194   // any non-zero floating-point constant. Other kinds of element are either
2195   // already in registers or are materialized on demand. The threshold at which
2196   // a vector load is more desirable than several scalar materializion and
2197   // vector-insertion instructions is not known.
2198   unsigned NumScalarLoads = 0;
2199 
2200   for (SDValue V : Op->op_values()) {
2201     if (V.isUndef())
2202       continue;
2203 
2204     ValueCounts.insert(std::make_pair(V, 0));
2205     unsigned &Count = ValueCounts[V];
2206 
2207     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2208       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2209 
2210     // Is this value dominant? In case of a tie, prefer the highest element as
2211     // it's cheaper to insert near the beginning of a vector than it is at the
2212     // end.
2213     if (++Count >= MostCommonCount) {
2214       DominantValue = V;
2215       MostCommonCount = Count;
2216     }
2217   }
2218 
2219   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2220   unsigned NumDefElts = NumElts - NumUndefElts;
2221   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2222 
2223   // Don't perform this optimization when optimizing for size, since
2224   // materializing elements and inserting them tends to cause code bloat.
2225   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2226       ((MostCommonCount > DominantValueCountThreshold) ||
2227        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2228     // Start by splatting the most common element.
2229     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2230 
2231     DenseSet<SDValue> Processed{DominantValue};
2232     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2233     for (const auto &OpIdx : enumerate(Op->ops())) {
2234       const SDValue &V = OpIdx.value();
2235       if (V.isUndef() || !Processed.insert(V).second)
2236         continue;
2237       if (ValueCounts[V] == 1) {
2238         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2239                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2240       } else {
2241         // Blend in all instances of this value using a VSELECT, using a
2242         // mask where each bit signals whether that element is the one
2243         // we're after.
2244         SmallVector<SDValue> Ops;
2245         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2246           return DAG.getConstant(V == V1, DL, XLenVT);
2247         });
2248         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2249                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2250                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2251       }
2252     }
2253 
2254     return Vec;
2255   }
2256 
2257   return SDValue();
2258 }
2259 
2260 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2261                                    SDValue Lo, SDValue Hi, SDValue VL,
2262                                    SelectionDAG &DAG) {
2263   if (!Passthru)
2264     Passthru = DAG.getUNDEF(VT);
2265   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2266     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2267     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2268     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2269     // node in order to try and match RVV vector/scalar instructions.
2270     if ((LoC >> 31) == HiC)
2271       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2272 
2273     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2274     // vmv.v.x whose EEW = 32 to lower it.
2275     auto *Const = dyn_cast<ConstantSDNode>(VL);
2276     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2277       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2278       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2279       // access the subtarget here now.
2280       auto InterVec = DAG.getNode(
2281           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2282                                   DAG.getRegister(RISCV::X0, MVT::i32));
2283       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2284     }
2285   }
2286 
2287   // Fall back to a stack store and stride x0 vector load.
2288   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2289                      Hi, VL);
2290 }
2291 
2292 // Called by type legalization to handle splat of i64 on RV32.
2293 // FIXME: We can optimize this when the type has sign or zero bits in one
2294 // of the halves.
2295 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2296                                    SDValue Scalar, SDValue VL,
2297                                    SelectionDAG &DAG) {
2298   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2299   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2300                            DAG.getConstant(0, DL, MVT::i32));
2301   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2302                            DAG.getConstant(1, DL, MVT::i32));
2303   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2304 }
2305 
2306 // This function lowers a splat of a scalar operand Splat with the vector
2307 // length VL. It ensures the final sequence is type legal, which is useful when
2308 // lowering a splat after type legalization.
2309 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2310                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2311                                 const RISCVSubtarget &Subtarget) {
2312   bool HasPassthru = Passthru && !Passthru.isUndef();
2313   if (!HasPassthru && !Passthru)
2314     Passthru = DAG.getUNDEF(VT);
2315   if (VT.isFloatingPoint()) {
2316     // If VL is 1, we could use vfmv.s.f.
2317     if (isOneConstant(VL))
2318       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2319     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2320   }
2321 
2322   MVT XLenVT = Subtarget.getXLenVT();
2323 
2324   // Simplest case is that the operand needs to be promoted to XLenVT.
2325   if (Scalar.getValueType().bitsLE(XLenVT)) {
2326     // If the operand is a constant, sign extend to increase our chances
2327     // of being able to use a .vi instruction. ANY_EXTEND would become a
2328     // a zero extend and the simm5 check in isel would fail.
2329     // FIXME: Should we ignore the upper bits in isel instead?
2330     unsigned ExtOpc =
2331         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2332     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2333     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2334     // If VL is 1 and the scalar value won't benefit from immediate, we could
2335     // use vmv.s.x.
2336     if (isOneConstant(VL) &&
2337         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2338       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2339     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2340   }
2341 
2342   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2343          "Unexpected scalar for splat lowering!");
2344 
2345   if (isOneConstant(VL) && isNullConstant(Scalar))
2346     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2347                        DAG.getConstant(0, DL, XLenVT), VL);
2348 
2349   // Otherwise use the more complicated splatting algorithm.
2350   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2351 }
2352 
2353 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2354                                 const RISCVSubtarget &Subtarget) {
2355   // We need to be able to widen elements to the next larger integer type.
2356   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2357     return false;
2358 
2359   int Size = Mask.size();
2360   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2361 
2362   int Srcs[] = {-1, -1};
2363   for (int i = 0; i != Size; ++i) {
2364     // Ignore undef elements.
2365     if (Mask[i] < 0)
2366       continue;
2367 
2368     // Is this an even or odd element.
2369     int Pol = i % 2;
2370 
2371     // Ensure we consistently use the same source for this element polarity.
2372     int Src = Mask[i] / Size;
2373     if (Srcs[Pol] < 0)
2374       Srcs[Pol] = Src;
2375     if (Srcs[Pol] != Src)
2376       return false;
2377 
2378     // Make sure the element within the source is appropriate for this element
2379     // in the destination.
2380     int Elt = Mask[i] % Size;
2381     if (Elt != i / 2)
2382       return false;
2383   }
2384 
2385   // We need to find a source for each polarity and they can't be the same.
2386   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2387     return false;
2388 
2389   // Swap the sources if the second source was in the even polarity.
2390   SwapSources = Srcs[0] > Srcs[1];
2391 
2392   return true;
2393 }
2394 
2395 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2396 /// and then extract the original number of elements from the rotated result.
2397 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2398 /// returned rotation amount is for a rotate right, where elements move from
2399 /// higher elements to lower elements. \p LoSrc indicates the first source
2400 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2401 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2402 /// 0 or 1 if a rotation is found.
2403 ///
2404 /// NOTE: We talk about rotate to the right which matches how bit shift and
2405 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2406 /// and the table below write vectors with the lowest elements on the left.
2407 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2408   int Size = Mask.size();
2409 
2410   // We need to detect various ways of spelling a rotation:
2411   //   [11, 12, 13, 14, 15,  0,  1,  2]
2412   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2413   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2414   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2415   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2416   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2417   int Rotation = 0;
2418   LoSrc = -1;
2419   HiSrc = -1;
2420   for (int i = 0; i != Size; ++i) {
2421     int M = Mask[i];
2422     if (M < 0)
2423       continue;
2424 
2425     // Determine where a rotate vector would have started.
2426     int StartIdx = i - (M % Size);
2427     // The identity rotation isn't interesting, stop.
2428     if (StartIdx == 0)
2429       return -1;
2430 
2431     // If we found the tail of a vector the rotation must be the missing
2432     // front. If we found the head of a vector, it must be how much of the
2433     // head.
2434     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2435 
2436     if (Rotation == 0)
2437       Rotation = CandidateRotation;
2438     else if (Rotation != CandidateRotation)
2439       // The rotations don't match, so we can't match this mask.
2440       return -1;
2441 
2442     // Compute which value this mask is pointing at.
2443     int MaskSrc = M < Size ? 0 : 1;
2444 
2445     // Compute which of the two target values this index should be assigned to.
2446     // This reflects whether the high elements are remaining or the low elemnts
2447     // are remaining.
2448     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2449 
2450     // Either set up this value if we've not encountered it before, or check
2451     // that it remains consistent.
2452     if (TargetSrc < 0)
2453       TargetSrc = MaskSrc;
2454     else if (TargetSrc != MaskSrc)
2455       // This may be a rotation, but it pulls from the inputs in some
2456       // unsupported interleaving.
2457       return -1;
2458   }
2459 
2460   // Check that we successfully analyzed the mask, and normalize the results.
2461   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2462   assert((LoSrc >= 0 || HiSrc >= 0) &&
2463          "Failed to find a rotated input vector!");
2464 
2465   return Rotation;
2466 }
2467 
2468 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2469                                    const RISCVSubtarget &Subtarget) {
2470   SDValue V1 = Op.getOperand(0);
2471   SDValue V2 = Op.getOperand(1);
2472   SDLoc DL(Op);
2473   MVT XLenVT = Subtarget.getXLenVT();
2474   MVT VT = Op.getSimpleValueType();
2475   unsigned NumElts = VT.getVectorNumElements();
2476   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2477 
2478   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2479 
2480   SDValue TrueMask, VL;
2481   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2482 
2483   if (SVN->isSplat()) {
2484     const int Lane = SVN->getSplatIndex();
2485     if (Lane >= 0) {
2486       MVT SVT = VT.getVectorElementType();
2487 
2488       // Turn splatted vector load into a strided load with an X0 stride.
2489       SDValue V = V1;
2490       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2491       // with undef.
2492       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2493       int Offset = Lane;
2494       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2495         int OpElements =
2496             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2497         V = V.getOperand(Offset / OpElements);
2498         Offset %= OpElements;
2499       }
2500 
2501       // We need to ensure the load isn't atomic or volatile.
2502       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2503         auto *Ld = cast<LoadSDNode>(V);
2504         Offset *= SVT.getStoreSize();
2505         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2506                                                    TypeSize::Fixed(Offset), DL);
2507 
2508         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2509         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2510           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2511           SDValue IntID =
2512               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2513           SDValue Ops[] = {Ld->getChain(),
2514                            IntID,
2515                            DAG.getUNDEF(ContainerVT),
2516                            NewAddr,
2517                            DAG.getRegister(RISCV::X0, XLenVT),
2518                            VL};
2519           SDValue NewLoad = DAG.getMemIntrinsicNode(
2520               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2521               DAG.getMachineFunction().getMachineMemOperand(
2522                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2523           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2524           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2525         }
2526 
2527         // Otherwise use a scalar load and splat. This will give the best
2528         // opportunity to fold a splat into the operation. ISel can turn it into
2529         // the x0 strided load if we aren't able to fold away the select.
2530         if (SVT.isFloatingPoint())
2531           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2532                           Ld->getPointerInfo().getWithOffset(Offset),
2533                           Ld->getOriginalAlign(),
2534                           Ld->getMemOperand()->getFlags());
2535         else
2536           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2537                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2538                              Ld->getOriginalAlign(),
2539                              Ld->getMemOperand()->getFlags());
2540         DAG.makeEquivalentMemoryOrdering(Ld, V);
2541 
2542         unsigned Opc =
2543             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2544         SDValue Splat =
2545             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2546         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2547       }
2548 
2549       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2550       assert(Lane < (int)NumElts && "Unexpected lane!");
2551       SDValue Gather =
2552           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2553                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2554       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2555     }
2556   }
2557 
2558   ArrayRef<int> Mask = SVN->getMask();
2559 
2560   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2561   // be undef which can be handled with a single SLIDEDOWN/UP.
2562   int LoSrc, HiSrc;
2563   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2564   if (Rotation > 0) {
2565     SDValue LoV, HiV;
2566     if (LoSrc >= 0) {
2567       LoV = LoSrc == 0 ? V1 : V2;
2568       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2569     }
2570     if (HiSrc >= 0) {
2571       HiV = HiSrc == 0 ? V1 : V2;
2572       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2573     }
2574 
2575     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2576     // to slide LoV up by (NumElts - Rotation).
2577     unsigned InvRotate = NumElts - Rotation;
2578 
2579     SDValue Res = DAG.getUNDEF(ContainerVT);
2580     if (HiV) {
2581       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2582       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2583       // causes multiple vsetvlis in some test cases such as lowering
2584       // reduce.mul
2585       SDValue DownVL = VL;
2586       if (LoV)
2587         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2588       Res =
2589           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2590                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2591     }
2592     if (LoV)
2593       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2594                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2595 
2596     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2597   }
2598 
2599   // Detect an interleave shuffle and lower to
2600   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2601   bool SwapSources;
2602   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2603     // Swap sources if needed.
2604     if (SwapSources)
2605       std::swap(V1, V2);
2606 
2607     // Extract the lower half of the vectors.
2608     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2609     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2610                      DAG.getConstant(0, DL, XLenVT));
2611     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2612                      DAG.getConstant(0, DL, XLenVT));
2613 
2614     // Double the element width and halve the number of elements in an int type.
2615     unsigned EltBits = VT.getScalarSizeInBits();
2616     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2617     MVT WideIntVT =
2618         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2619     // Convert this to a scalable vector. We need to base this on the
2620     // destination size to ensure there's always a type with a smaller LMUL.
2621     MVT WideIntContainerVT =
2622         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2623 
2624     // Convert sources to scalable vectors with the same element count as the
2625     // larger type.
2626     MVT HalfContainerVT = MVT::getVectorVT(
2627         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2628     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2629     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2630 
2631     // Cast sources to integer.
2632     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2633     MVT IntHalfVT =
2634         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2635     V1 = DAG.getBitcast(IntHalfVT, V1);
2636     V2 = DAG.getBitcast(IntHalfVT, V2);
2637 
2638     // Freeze V2 since we use it twice and we need to be sure that the add and
2639     // multiply see the same value.
2640     V2 = DAG.getFreeze(V2);
2641 
2642     // Recreate TrueMask using the widened type's element count.
2643     TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
2644 
2645     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2646     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2647                               V2, TrueMask, VL);
2648     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2649     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2650                                      DAG.getUNDEF(IntHalfVT),
2651                                      DAG.getAllOnesConstant(DL, XLenVT));
2652     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2653                                    V2, Multiplier, TrueMask, VL);
2654     // Add the new copies to our previous addition giving us 2^eltbits copies of
2655     // V2. This is equivalent to shifting V2 left by eltbits. This should
2656     // combine with the vwmulu.vv above to form vwmaccu.vv.
2657     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2658                       TrueMask, VL);
2659     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2660     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2661     // vector VT.
2662     ContainerVT =
2663         MVT::getVectorVT(VT.getVectorElementType(),
2664                          WideIntContainerVT.getVectorElementCount() * 2);
2665     Add = DAG.getBitcast(ContainerVT, Add);
2666     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2667   }
2668 
2669   // Detect shuffles which can be re-expressed as vector selects; these are
2670   // shuffles in which each element in the destination is taken from an element
2671   // at the corresponding index in either source vectors.
2672   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2673     int MaskIndex = MaskIdx.value();
2674     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2675   });
2676 
2677   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2678 
2679   SmallVector<SDValue> MaskVals;
2680   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2681   // merged with a second vrgather.
2682   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2683 
2684   // By default we preserve the original operand order, and use a mask to
2685   // select LHS as true and RHS as false. However, since RVV vector selects may
2686   // feature splats but only on the LHS, we may choose to invert our mask and
2687   // instead select between RHS and LHS.
2688   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2689   bool InvertMask = IsSelect == SwapOps;
2690 
2691   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2692   // half.
2693   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2694 
2695   // Now construct the mask that will be used by the vselect or blended
2696   // vrgather operation. For vrgathers, construct the appropriate indices into
2697   // each vector.
2698   for (int MaskIndex : Mask) {
2699     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2700     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2701     if (!IsSelect) {
2702       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2703       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2704                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2705                                      : DAG.getUNDEF(XLenVT));
2706       GatherIndicesRHS.push_back(
2707           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2708                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2709       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2710         ++LHSIndexCounts[MaskIndex];
2711       if (!IsLHSOrUndefIndex)
2712         ++RHSIndexCounts[MaskIndex - NumElts];
2713     }
2714   }
2715 
2716   if (SwapOps) {
2717     std::swap(V1, V2);
2718     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2719   }
2720 
2721   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2722   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2723   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2724 
2725   if (IsSelect)
2726     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2727 
2728   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2729     // On such a large vector we're unable to use i8 as the index type.
2730     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2731     // may involve vector splitting if we're already at LMUL=8, or our
2732     // user-supplied maximum fixed-length LMUL.
2733     return SDValue();
2734   }
2735 
2736   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2737   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2738   MVT IndexVT = VT.changeTypeToInteger();
2739   // Since we can't introduce illegal index types at this stage, use i16 and
2740   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2741   // than XLenVT.
2742   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2743     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2744     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2745   }
2746 
2747   MVT IndexContainerVT =
2748       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2749 
2750   SDValue Gather;
2751   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2752   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2753   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2754     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2755                               Subtarget);
2756   } else {
2757     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2758     // If only one index is used, we can use a "splat" vrgather.
2759     // TODO: We can splat the most-common index and fix-up any stragglers, if
2760     // that's beneficial.
2761     if (LHSIndexCounts.size() == 1) {
2762       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2763       Gather =
2764           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2765                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2766     } else {
2767       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2768       LHSIndices =
2769           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2770 
2771       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2772                            TrueMask, VL);
2773     }
2774   }
2775 
2776   // If a second vector operand is used by this shuffle, blend it in with an
2777   // additional vrgather.
2778   if (!V2.isUndef()) {
2779     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2780     // If only one index is used, we can use a "splat" vrgather.
2781     // TODO: We can splat the most-common index and fix-up any stragglers, if
2782     // that's beneficial.
2783     if (RHSIndexCounts.size() == 1) {
2784       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2785       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2786                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2787     } else {
2788       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2789       RHSIndices =
2790           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2791       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2792                        VL);
2793     }
2794 
2795     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2796     SelectMask =
2797         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2798 
2799     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2800                          Gather, VL);
2801   }
2802 
2803   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2804 }
2805 
2806 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2807   // Support splats for any type. These should type legalize well.
2808   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2809     return true;
2810 
2811   // Only support legal VTs for other shuffles for now.
2812   if (!isTypeLegal(VT))
2813     return false;
2814 
2815   MVT SVT = VT.getSimpleVT();
2816 
2817   bool SwapSources;
2818   int LoSrc, HiSrc;
2819   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2820          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2821 }
2822 
2823 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2824 // the exponent.
2825 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2826   MVT VT = Op.getSimpleValueType();
2827   unsigned EltSize = VT.getScalarSizeInBits();
2828   SDValue Src = Op.getOperand(0);
2829   SDLoc DL(Op);
2830 
2831   // We need a FP type that can represent the value.
2832   // TODO: Use f16 for i8 when possible?
2833   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2834   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2835 
2836   // Legal types should have been checked in the RISCVTargetLowering
2837   // constructor.
2838   // TODO: Splitting may make sense in some cases.
2839   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2840          "Expected legal float type!");
2841 
2842   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2843   // The trailing zero count is equal to log2 of this single bit value.
2844   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2845     SDValue Neg =
2846         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2847     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2848   }
2849 
2850   // We have a legal FP type, convert to it.
2851   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2852   // Bitcast to integer and shift the exponent to the LSB.
2853   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2854   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2855   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2856   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2857                               DAG.getConstant(ShiftAmt, DL, IntVT));
2858   // Truncate back to original type to allow vnsrl.
2859   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2860   // The exponent contains log2 of the value in biased form.
2861   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2862 
2863   // For trailing zeros, we just need to subtract the bias.
2864   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2865     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2866                        DAG.getConstant(ExponentBias, DL, VT));
2867 
2868   // For leading zeros, we need to remove the bias and convert from log2 to
2869   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2870   unsigned Adjust = ExponentBias + (EltSize - 1);
2871   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2872 }
2873 
2874 // While RVV has alignment restrictions, we should always be able to load as a
2875 // legal equivalently-sized byte-typed vector instead. This method is
2876 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2877 // the load is already correctly-aligned, it returns SDValue().
2878 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2879                                                     SelectionDAG &DAG) const {
2880   auto *Load = cast<LoadSDNode>(Op);
2881   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2882 
2883   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2884                                      Load->getMemoryVT(),
2885                                      *Load->getMemOperand()))
2886     return SDValue();
2887 
2888   SDLoc DL(Op);
2889   MVT VT = Op.getSimpleValueType();
2890   unsigned EltSizeBits = VT.getScalarSizeInBits();
2891   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2892          "Unexpected unaligned RVV load type");
2893   MVT NewVT =
2894       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2895   assert(NewVT.isValid() &&
2896          "Expecting equally-sized RVV vector types to be legal");
2897   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2898                           Load->getPointerInfo(), Load->getOriginalAlign(),
2899                           Load->getMemOperand()->getFlags());
2900   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2901 }
2902 
2903 // While RVV has alignment restrictions, we should always be able to store as a
2904 // legal equivalently-sized byte-typed vector instead. This method is
2905 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2906 // returns SDValue() if the store is already correctly aligned.
2907 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2908                                                      SelectionDAG &DAG) const {
2909   auto *Store = cast<StoreSDNode>(Op);
2910   assert(Store && Store->getValue().getValueType().isVector() &&
2911          "Expected vector store");
2912 
2913   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2914                                      Store->getMemoryVT(),
2915                                      *Store->getMemOperand()))
2916     return SDValue();
2917 
2918   SDLoc DL(Op);
2919   SDValue StoredVal = Store->getValue();
2920   MVT VT = StoredVal.getSimpleValueType();
2921   unsigned EltSizeBits = VT.getScalarSizeInBits();
2922   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2923          "Unexpected unaligned RVV store type");
2924   MVT NewVT =
2925       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2926   assert(NewVT.isValid() &&
2927          "Expecting equally-sized RVV vector types to be legal");
2928   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2929   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2930                       Store->getPointerInfo(), Store->getOriginalAlign(),
2931                       Store->getMemOperand()->getFlags());
2932 }
2933 
2934 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2935                                             SelectionDAG &DAG) const {
2936   switch (Op.getOpcode()) {
2937   default:
2938     report_fatal_error("unimplemented operand");
2939   case ISD::GlobalAddress:
2940     return lowerGlobalAddress(Op, DAG);
2941   case ISD::BlockAddress:
2942     return lowerBlockAddress(Op, DAG);
2943   case ISD::ConstantPool:
2944     return lowerConstantPool(Op, DAG);
2945   case ISD::JumpTable:
2946     return lowerJumpTable(Op, DAG);
2947   case ISD::GlobalTLSAddress:
2948     return lowerGlobalTLSAddress(Op, DAG);
2949   case ISD::SELECT:
2950     return lowerSELECT(Op, DAG);
2951   case ISD::BRCOND:
2952     return lowerBRCOND(Op, DAG);
2953   case ISD::VASTART:
2954     return lowerVASTART(Op, DAG);
2955   case ISD::FRAMEADDR:
2956     return lowerFRAMEADDR(Op, DAG);
2957   case ISD::RETURNADDR:
2958     return lowerRETURNADDR(Op, DAG);
2959   case ISD::SHL_PARTS:
2960     return lowerShiftLeftParts(Op, DAG);
2961   case ISD::SRA_PARTS:
2962     return lowerShiftRightParts(Op, DAG, true);
2963   case ISD::SRL_PARTS:
2964     return lowerShiftRightParts(Op, DAG, false);
2965   case ISD::BITCAST: {
2966     SDLoc DL(Op);
2967     EVT VT = Op.getValueType();
2968     SDValue Op0 = Op.getOperand(0);
2969     EVT Op0VT = Op0.getValueType();
2970     MVT XLenVT = Subtarget.getXLenVT();
2971     if (VT.isFixedLengthVector()) {
2972       // We can handle fixed length vector bitcasts with a simple replacement
2973       // in isel.
2974       if (Op0VT.isFixedLengthVector())
2975         return Op;
2976       // When bitcasting from scalar to fixed-length vector, insert the scalar
2977       // into a one-element vector of the result type, and perform a vector
2978       // bitcast.
2979       if (!Op0VT.isVector()) {
2980         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2981         if (!isTypeLegal(BVT))
2982           return SDValue();
2983         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2984                                               DAG.getUNDEF(BVT), Op0,
2985                                               DAG.getConstant(0, DL, XLenVT)));
2986       }
2987       return SDValue();
2988     }
2989     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2990     // thus: bitcast the vector to a one-element vector type whose element type
2991     // is the same as the result type, and extract the first element.
2992     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2993       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2994       if (!isTypeLegal(BVT))
2995         return SDValue();
2996       SDValue BVec = DAG.getBitcast(BVT, Op0);
2997       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2998                          DAG.getConstant(0, DL, XLenVT));
2999     }
3000     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3001       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3002       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3003       return FPConv;
3004     }
3005     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3006         Subtarget.hasStdExtF()) {
3007       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3008       SDValue FPConv =
3009           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3010       return FPConv;
3011     }
3012     return SDValue();
3013   }
3014   case ISD::INTRINSIC_WO_CHAIN:
3015     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3016   case ISD::INTRINSIC_W_CHAIN:
3017     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3018   case ISD::INTRINSIC_VOID:
3019     return LowerINTRINSIC_VOID(Op, DAG);
3020   case ISD::BSWAP:
3021   case ISD::BITREVERSE: {
3022     MVT VT = Op.getSimpleValueType();
3023     SDLoc DL(Op);
3024     if (Subtarget.hasStdExtZbp()) {
3025       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3026       // Start with the maximum immediate value which is the bitwidth - 1.
3027       unsigned Imm = VT.getSizeInBits() - 1;
3028       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3029       if (Op.getOpcode() == ISD::BSWAP)
3030         Imm &= ~0x7U;
3031       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3032                          DAG.getConstant(Imm, DL, VT));
3033     }
3034     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3035     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3036     // Expand bitreverse to a bswap(rev8) followed by brev8.
3037     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3038     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3039     // as brev8 by an isel pattern.
3040     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3041                        DAG.getConstant(7, DL, VT));
3042   }
3043   case ISD::FSHL:
3044   case ISD::FSHR: {
3045     MVT VT = Op.getSimpleValueType();
3046     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3047     SDLoc DL(Op);
3048     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3049     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3050     // accidentally setting the extra bit.
3051     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3052     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3053                                 DAG.getConstant(ShAmtWidth, DL, VT));
3054     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3055     // instruction use different orders. fshl will return its first operand for
3056     // shift of zero, fshr will return its second operand. fsl and fsr both
3057     // return rs1 so the ISD nodes need to have different operand orders.
3058     // Shift amount is in rs2.
3059     SDValue Op0 = Op.getOperand(0);
3060     SDValue Op1 = Op.getOperand(1);
3061     unsigned Opc = RISCVISD::FSL;
3062     if (Op.getOpcode() == ISD::FSHR) {
3063       std::swap(Op0, Op1);
3064       Opc = RISCVISD::FSR;
3065     }
3066     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3067   }
3068   case ISD::TRUNCATE:
3069     // Only custom-lower vector truncates
3070     if (!Op.getSimpleValueType().isVector())
3071       return Op;
3072     return lowerVectorTruncLike(Op, DAG);
3073   case ISD::ANY_EXTEND:
3074   case ISD::ZERO_EXTEND:
3075     if (Op.getOperand(0).getValueType().isVector() &&
3076         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3077       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3078     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3079   case ISD::SIGN_EXTEND:
3080     if (Op.getOperand(0).getValueType().isVector() &&
3081         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3082       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3083     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3084   case ISD::SPLAT_VECTOR_PARTS:
3085     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3086   case ISD::INSERT_VECTOR_ELT:
3087     return lowerINSERT_VECTOR_ELT(Op, DAG);
3088   case ISD::EXTRACT_VECTOR_ELT:
3089     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3090   case ISD::VSCALE: {
3091     MVT VT = Op.getSimpleValueType();
3092     SDLoc DL(Op);
3093     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3094     // We define our scalable vector types for lmul=1 to use a 64 bit known
3095     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3096     // vscale as VLENB / 8.
3097     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3098     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3099       report_fatal_error("Support for VLEN==32 is incomplete.");
3100     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3101       // We assume VLENB is a multiple of 8. We manually choose the best shift
3102       // here because SimplifyDemandedBits isn't always able to simplify it.
3103       uint64_t Val = Op.getConstantOperandVal(0);
3104       if (isPowerOf2_64(Val)) {
3105         uint64_t Log2 = Log2_64(Val);
3106         if (Log2 < 3)
3107           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3108                              DAG.getConstant(3 - Log2, DL, VT));
3109         if (Log2 > 3)
3110           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3111                              DAG.getConstant(Log2 - 3, DL, VT));
3112         return VLENB;
3113       }
3114       // If the multiplier is a multiple of 8, scale it down to avoid needing
3115       // to shift the VLENB value.
3116       if ((Val % 8) == 0)
3117         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3118                            DAG.getConstant(Val / 8, DL, VT));
3119     }
3120 
3121     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3122                                  DAG.getConstant(3, DL, VT));
3123     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3124   }
3125   case ISD::FPOWI: {
3126     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3127     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3128     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3129         Op.getOperand(1).getValueType() == MVT::i32) {
3130       SDLoc DL(Op);
3131       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3132       SDValue Powi =
3133           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3134       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3135                          DAG.getIntPtrConstant(0, DL));
3136     }
3137     return SDValue();
3138   }
3139   case ISD::FP_EXTEND:
3140   case ISD::FP_ROUND:
3141     if (!Op.getValueType().isVector())
3142       return Op;
3143     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3144   case ISD::FP_TO_SINT:
3145   case ISD::FP_TO_UINT:
3146   case ISD::SINT_TO_FP:
3147   case ISD::UINT_TO_FP: {
3148     // RVV can only do fp<->int conversions to types half/double the size as
3149     // the source. We custom-lower any conversions that do two hops into
3150     // sequences.
3151     MVT VT = Op.getSimpleValueType();
3152     if (!VT.isVector())
3153       return Op;
3154     SDLoc DL(Op);
3155     SDValue Src = Op.getOperand(0);
3156     MVT EltVT = VT.getVectorElementType();
3157     MVT SrcVT = Src.getSimpleValueType();
3158     MVT SrcEltVT = SrcVT.getVectorElementType();
3159     unsigned EltSize = EltVT.getSizeInBits();
3160     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3161     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3162            "Unexpected vector element types");
3163 
3164     bool IsInt2FP = SrcEltVT.isInteger();
3165     // Widening conversions
3166     if (EltSize > (2 * SrcEltSize)) {
3167       if (IsInt2FP) {
3168         // Do a regular integer sign/zero extension then convert to float.
3169         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3170                                       VT.getVectorElementCount());
3171         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3172                                  ? ISD::ZERO_EXTEND
3173                                  : ISD::SIGN_EXTEND;
3174         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3175         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3176       }
3177       // FP2Int
3178       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3179       // Do one doubling fp_extend then complete the operation by converting
3180       // to int.
3181       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3182       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3183       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3184     }
3185 
3186     // Narrowing conversions
3187     if (SrcEltSize > (2 * EltSize)) {
3188       if (IsInt2FP) {
3189         // One narrowing int_to_fp, then an fp_round.
3190         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3191         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3192         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3193         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3194       }
3195       // FP2Int
3196       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3197       // representable by the integer, the result is poison.
3198       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3199                                     VT.getVectorElementCount());
3200       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3201       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3202     }
3203 
3204     // Scalable vectors can exit here. Patterns will handle equally-sized
3205     // conversions halving/doubling ones.
3206     if (!VT.isFixedLengthVector())
3207       return Op;
3208 
3209     // For fixed-length vectors we lower to a custom "VL" node.
3210     unsigned RVVOpc = 0;
3211     switch (Op.getOpcode()) {
3212     default:
3213       llvm_unreachable("Impossible opcode");
3214     case ISD::FP_TO_SINT:
3215       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3216       break;
3217     case ISD::FP_TO_UINT:
3218       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3219       break;
3220     case ISD::SINT_TO_FP:
3221       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3222       break;
3223     case ISD::UINT_TO_FP:
3224       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3225       break;
3226     }
3227 
3228     MVT ContainerVT, SrcContainerVT;
3229     // Derive the reference container type from the larger vector type.
3230     if (SrcEltSize > EltSize) {
3231       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3232       ContainerVT =
3233           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3234     } else {
3235       ContainerVT = getContainerForFixedLengthVector(VT);
3236       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3237     }
3238 
3239     SDValue Mask, VL;
3240     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3241 
3242     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3243     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3244     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3245   }
3246   case ISD::FP_TO_SINT_SAT:
3247   case ISD::FP_TO_UINT_SAT:
3248     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3249   case ISD::FTRUNC:
3250   case ISD::FCEIL:
3251   case ISD::FFLOOR:
3252     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3253   case ISD::FROUND:
3254     return lowerFROUND(Op, DAG);
3255   case ISD::VECREDUCE_ADD:
3256   case ISD::VECREDUCE_UMAX:
3257   case ISD::VECREDUCE_SMAX:
3258   case ISD::VECREDUCE_UMIN:
3259   case ISD::VECREDUCE_SMIN:
3260     return lowerVECREDUCE(Op, DAG);
3261   case ISD::VECREDUCE_AND:
3262   case ISD::VECREDUCE_OR:
3263   case ISD::VECREDUCE_XOR:
3264     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3265       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3266     return lowerVECREDUCE(Op, DAG);
3267   case ISD::VECREDUCE_FADD:
3268   case ISD::VECREDUCE_SEQ_FADD:
3269   case ISD::VECREDUCE_FMIN:
3270   case ISD::VECREDUCE_FMAX:
3271     return lowerFPVECREDUCE(Op, DAG);
3272   case ISD::VP_REDUCE_ADD:
3273   case ISD::VP_REDUCE_UMAX:
3274   case ISD::VP_REDUCE_SMAX:
3275   case ISD::VP_REDUCE_UMIN:
3276   case ISD::VP_REDUCE_SMIN:
3277   case ISD::VP_REDUCE_FADD:
3278   case ISD::VP_REDUCE_SEQ_FADD:
3279   case ISD::VP_REDUCE_FMIN:
3280   case ISD::VP_REDUCE_FMAX:
3281     return lowerVPREDUCE(Op, DAG);
3282   case ISD::VP_REDUCE_AND:
3283   case ISD::VP_REDUCE_OR:
3284   case ISD::VP_REDUCE_XOR:
3285     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3286       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3287     return lowerVPREDUCE(Op, DAG);
3288   case ISD::INSERT_SUBVECTOR:
3289     return lowerINSERT_SUBVECTOR(Op, DAG);
3290   case ISD::EXTRACT_SUBVECTOR:
3291     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3292   case ISD::STEP_VECTOR:
3293     return lowerSTEP_VECTOR(Op, DAG);
3294   case ISD::VECTOR_REVERSE:
3295     return lowerVECTOR_REVERSE(Op, DAG);
3296   case ISD::VECTOR_SPLICE:
3297     return lowerVECTOR_SPLICE(Op, DAG);
3298   case ISD::BUILD_VECTOR:
3299     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3300   case ISD::SPLAT_VECTOR:
3301     if (Op.getValueType().getVectorElementType() == MVT::i1)
3302       return lowerVectorMaskSplat(Op, DAG);
3303     return SDValue();
3304   case ISD::VECTOR_SHUFFLE:
3305     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3306   case ISD::CONCAT_VECTORS: {
3307     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3308     // better than going through the stack, as the default expansion does.
3309     SDLoc DL(Op);
3310     MVT VT = Op.getSimpleValueType();
3311     unsigned NumOpElts =
3312         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3313     SDValue Vec = DAG.getUNDEF(VT);
3314     for (const auto &OpIdx : enumerate(Op->ops())) {
3315       SDValue SubVec = OpIdx.value();
3316       // Don't insert undef subvectors.
3317       if (SubVec.isUndef())
3318         continue;
3319       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3320                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3321     }
3322     return Vec;
3323   }
3324   case ISD::LOAD:
3325     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3326       return V;
3327     if (Op.getValueType().isFixedLengthVector())
3328       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3329     return Op;
3330   case ISD::STORE:
3331     if (auto V = expandUnalignedRVVStore(Op, DAG))
3332       return V;
3333     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3334       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3335     return Op;
3336   case ISD::MLOAD:
3337   case ISD::VP_LOAD:
3338     return lowerMaskedLoad(Op, DAG);
3339   case ISD::MSTORE:
3340   case ISD::VP_STORE:
3341     return lowerMaskedStore(Op, DAG);
3342   case ISD::SETCC:
3343     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3344   case ISD::ADD:
3345     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3346   case ISD::SUB:
3347     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3348   case ISD::MUL:
3349     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3350   case ISD::MULHS:
3351     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3352   case ISD::MULHU:
3353     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3354   case ISD::AND:
3355     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3356                                               RISCVISD::AND_VL);
3357   case ISD::OR:
3358     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3359                                               RISCVISD::OR_VL);
3360   case ISD::XOR:
3361     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3362                                               RISCVISD::XOR_VL);
3363   case ISD::SDIV:
3364     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3365   case ISD::SREM:
3366     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3367   case ISD::UDIV:
3368     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3369   case ISD::UREM:
3370     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3371   case ISD::SHL:
3372   case ISD::SRA:
3373   case ISD::SRL:
3374     if (Op.getSimpleValueType().isFixedLengthVector())
3375       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3376     // This can be called for an i32 shift amount that needs to be promoted.
3377     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3378            "Unexpected custom legalisation");
3379     return SDValue();
3380   case ISD::SADDSAT:
3381     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3382   case ISD::UADDSAT:
3383     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3384   case ISD::SSUBSAT:
3385     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3386   case ISD::USUBSAT:
3387     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3388   case ISD::FADD:
3389     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3390   case ISD::FSUB:
3391     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3392   case ISD::FMUL:
3393     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3394   case ISD::FDIV:
3395     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3396   case ISD::FNEG:
3397     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3398   case ISD::FABS:
3399     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3400   case ISD::FSQRT:
3401     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3402   case ISD::FMA:
3403     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3404   case ISD::SMIN:
3405     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3406   case ISD::SMAX:
3407     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3408   case ISD::UMIN:
3409     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3410   case ISD::UMAX:
3411     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3412   case ISD::FMINNUM:
3413     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3414   case ISD::FMAXNUM:
3415     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3416   case ISD::ABS:
3417     return lowerABS(Op, DAG);
3418   case ISD::CTLZ_ZERO_UNDEF:
3419   case ISD::CTTZ_ZERO_UNDEF:
3420     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3421   case ISD::VSELECT:
3422     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3423   case ISD::FCOPYSIGN:
3424     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3425   case ISD::MGATHER:
3426   case ISD::VP_GATHER:
3427     return lowerMaskedGather(Op, DAG);
3428   case ISD::MSCATTER:
3429   case ISD::VP_SCATTER:
3430     return lowerMaskedScatter(Op, DAG);
3431   case ISD::FLT_ROUNDS_:
3432     return lowerGET_ROUNDING(Op, DAG);
3433   case ISD::SET_ROUNDING:
3434     return lowerSET_ROUNDING(Op, DAG);
3435   case ISD::VP_SELECT:
3436     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3437   case ISD::VP_MERGE:
3438     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3439   case ISD::VP_ADD:
3440     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3441   case ISD::VP_SUB:
3442     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3443   case ISD::VP_MUL:
3444     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3445   case ISD::VP_SDIV:
3446     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3447   case ISD::VP_UDIV:
3448     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3449   case ISD::VP_SREM:
3450     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3451   case ISD::VP_UREM:
3452     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3453   case ISD::VP_AND:
3454     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3455   case ISD::VP_OR:
3456     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3457   case ISD::VP_XOR:
3458     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3459   case ISD::VP_ASHR:
3460     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3461   case ISD::VP_LSHR:
3462     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3463   case ISD::VP_SHL:
3464     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3465   case ISD::VP_FADD:
3466     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3467   case ISD::VP_FSUB:
3468     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3469   case ISD::VP_FMUL:
3470     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3471   case ISD::VP_FDIV:
3472     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3473   case ISD::VP_FNEG:
3474     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3475   case ISD::VP_FMA:
3476     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3477   case ISD::VP_SIGN_EXTEND:
3478   case ISD::VP_ZERO_EXTEND:
3479     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3480       return lowerVPExtMaskOp(Op, DAG);
3481     return lowerVPOp(Op, DAG,
3482                      Op.getOpcode() == ISD::VP_SIGN_EXTEND
3483                          ? RISCVISD::VSEXT_VL
3484                          : RISCVISD::VZEXT_VL);
3485   case ISD::VP_TRUNCATE:
3486     return lowerVectorTruncLike(Op, DAG);
3487   case ISD::VP_FP_EXTEND:
3488   case ISD::VP_FP_ROUND:
3489     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3490   case ISD::VP_FPTOSI:
3491     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3492   case ISD::VP_FPTOUI:
3493     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3494   case ISD::VP_SITOFP:
3495     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3496   case ISD::VP_UITOFP:
3497     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3498   case ISD::VP_SETCC:
3499     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3500       return lowerVPSetCCMaskOp(Op, DAG);
3501     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3502   }
3503 }
3504 
3505 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3506                              SelectionDAG &DAG, unsigned Flags) {
3507   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3508 }
3509 
3510 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3511                              SelectionDAG &DAG, unsigned Flags) {
3512   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3513                                    Flags);
3514 }
3515 
3516 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3517                              SelectionDAG &DAG, unsigned Flags) {
3518   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3519                                    N->getOffset(), Flags);
3520 }
3521 
3522 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3523                              SelectionDAG &DAG, unsigned Flags) {
3524   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3525 }
3526 
3527 template <class NodeTy>
3528 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3529                                      bool IsLocal) const {
3530   SDLoc DL(N);
3531   EVT Ty = getPointerTy(DAG.getDataLayout());
3532 
3533   if (isPositionIndependent()) {
3534     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3535     if (IsLocal)
3536       // Use PC-relative addressing to access the symbol. This generates the
3537       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3538       // %pcrel_lo(auipc)).
3539       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3540 
3541     // Use PC-relative addressing to access the GOT for this symbol, then load
3542     // the address from the GOT. This generates the pattern (PseudoLA sym),
3543     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3544     SDValue Load =
3545         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3546     MachineFunction &MF = DAG.getMachineFunction();
3547     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3548         MachinePointerInfo::getGOT(MF),
3549         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3550             MachineMemOperand::MOInvariant,
3551         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3552     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3553     return Load;
3554   }
3555 
3556   switch (getTargetMachine().getCodeModel()) {
3557   default:
3558     report_fatal_error("Unsupported code model for lowering");
3559   case CodeModel::Small: {
3560     // Generate a sequence for accessing addresses within the first 2 GiB of
3561     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3562     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3563     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3564     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3565     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3566   }
3567   case CodeModel::Medium: {
3568     // Generate a sequence for accessing addresses within any 2GiB range within
3569     // the address space. This generates the pattern (PseudoLLA sym), which
3570     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3571     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3572     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3573   }
3574   }
3575 }
3576 
3577 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>(
3578     GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3579 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>(
3580     BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3581 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>(
3582     ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3583 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>(
3584     JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3585 
3586 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3587                                                 SelectionDAG &DAG) const {
3588   SDLoc DL(Op);
3589   EVT Ty = Op.getValueType();
3590   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3591   int64_t Offset = N->getOffset();
3592   MVT XLenVT = Subtarget.getXLenVT();
3593 
3594   const GlobalValue *GV = N->getGlobal();
3595   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3596   SDValue Addr = getAddr(N, DAG, IsLocal);
3597 
3598   // In order to maximise the opportunity for common subexpression elimination,
3599   // emit a separate ADD node for the global address offset instead of folding
3600   // it in the global address node. Later peephole optimisations may choose to
3601   // fold it back in when profitable.
3602   if (Offset != 0)
3603     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3604                        DAG.getConstant(Offset, DL, XLenVT));
3605   return Addr;
3606 }
3607 
3608 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3609                                                SelectionDAG &DAG) const {
3610   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3611 
3612   return getAddr(N, DAG);
3613 }
3614 
3615 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3616                                                SelectionDAG &DAG) const {
3617   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3618 
3619   return getAddr(N, DAG);
3620 }
3621 
3622 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3623                                             SelectionDAG &DAG) const {
3624   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3625 
3626   return getAddr(N, DAG);
3627 }
3628 
3629 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3630                                               SelectionDAG &DAG,
3631                                               bool UseGOT) const {
3632   SDLoc DL(N);
3633   EVT Ty = getPointerTy(DAG.getDataLayout());
3634   const GlobalValue *GV = N->getGlobal();
3635   MVT XLenVT = Subtarget.getXLenVT();
3636 
3637   if (UseGOT) {
3638     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3639     // load the address from the GOT and add the thread pointer. This generates
3640     // the pattern (PseudoLA_TLS_IE sym), which expands to
3641     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3642     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3643     SDValue Load =
3644         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3645     MachineFunction &MF = DAG.getMachineFunction();
3646     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3647         MachinePointerInfo::getGOT(MF),
3648         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3649             MachineMemOperand::MOInvariant,
3650         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3651     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3652 
3653     // Add the thread pointer.
3654     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3655     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3656   }
3657 
3658   // Generate a sequence for accessing the address relative to the thread
3659   // pointer, with the appropriate adjustment for the thread pointer offset.
3660   // This generates the pattern
3661   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3662   SDValue AddrHi =
3663       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3664   SDValue AddrAdd =
3665       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3666   SDValue AddrLo =
3667       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3668 
3669   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3670   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3671   SDValue MNAdd = SDValue(
3672       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3673       0);
3674   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3675 }
3676 
3677 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3678                                                SelectionDAG &DAG) const {
3679   SDLoc DL(N);
3680   EVT Ty = getPointerTy(DAG.getDataLayout());
3681   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3682   const GlobalValue *GV = N->getGlobal();
3683 
3684   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3685   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3686   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3687   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3688   SDValue Load =
3689       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3690 
3691   // Prepare argument list to generate call.
3692   ArgListTy Args;
3693   ArgListEntry Entry;
3694   Entry.Node = Load;
3695   Entry.Ty = CallTy;
3696   Args.push_back(Entry);
3697 
3698   // Setup call to __tls_get_addr.
3699   TargetLowering::CallLoweringInfo CLI(DAG);
3700   CLI.setDebugLoc(DL)
3701       .setChain(DAG.getEntryNode())
3702       .setLibCallee(CallingConv::C, CallTy,
3703                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3704                     std::move(Args));
3705 
3706   return LowerCallTo(CLI).first;
3707 }
3708 
3709 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3710                                                    SelectionDAG &DAG) const {
3711   SDLoc DL(Op);
3712   EVT Ty = Op.getValueType();
3713   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3714   int64_t Offset = N->getOffset();
3715   MVT XLenVT = Subtarget.getXLenVT();
3716 
3717   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3718 
3719   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3720       CallingConv::GHC)
3721     report_fatal_error("In GHC calling convention TLS is not supported");
3722 
3723   SDValue Addr;
3724   switch (Model) {
3725   case TLSModel::LocalExec:
3726     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3727     break;
3728   case TLSModel::InitialExec:
3729     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3730     break;
3731   case TLSModel::LocalDynamic:
3732   case TLSModel::GeneralDynamic:
3733     Addr = getDynamicTLSAddr(N, DAG);
3734     break;
3735   }
3736 
3737   // In order to maximise the opportunity for common subexpression elimination,
3738   // emit a separate ADD node for the global address offset instead of folding
3739   // it in the global address node. Later peephole optimisations may choose to
3740   // fold it back in when profitable.
3741   if (Offset != 0)
3742     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3743                        DAG.getConstant(Offset, DL, XLenVT));
3744   return Addr;
3745 }
3746 
3747 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3748   SDValue CondV = Op.getOperand(0);
3749   SDValue TrueV = Op.getOperand(1);
3750   SDValue FalseV = Op.getOperand(2);
3751   SDLoc DL(Op);
3752   MVT VT = Op.getSimpleValueType();
3753   MVT XLenVT = Subtarget.getXLenVT();
3754 
3755   // Lower vector SELECTs to VSELECTs by splatting the condition.
3756   if (VT.isVector()) {
3757     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3758     SDValue CondSplat = VT.isScalableVector()
3759                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3760                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3761     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3762   }
3763 
3764   // If the result type is XLenVT and CondV is the output of a SETCC node
3765   // which also operated on XLenVT inputs, then merge the SETCC node into the
3766   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3767   // compare+branch instructions. i.e.:
3768   // (select (setcc lhs, rhs, cc), truev, falsev)
3769   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3770   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3771       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3772     SDValue LHS = CondV.getOperand(0);
3773     SDValue RHS = CondV.getOperand(1);
3774     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3775     ISD::CondCode CCVal = CC->get();
3776 
3777     // Special case for a select of 2 constants that have a diffence of 1.
3778     // Normally this is done by DAGCombine, but if the select is introduced by
3779     // type legalization or op legalization, we miss it. Restricting to SETLT
3780     // case for now because that is what signed saturating add/sub need.
3781     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3782     // but we would probably want to swap the true/false values if the condition
3783     // is SETGE/SETLE to avoid an XORI.
3784     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3785         CCVal == ISD::SETLT) {
3786       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3787       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3788       if (TrueVal - 1 == FalseVal)
3789         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3790       if (TrueVal + 1 == FalseVal)
3791         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3792     }
3793 
3794     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3795 
3796     SDValue TargetCC = DAG.getCondCode(CCVal);
3797     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3798     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3799   }
3800 
3801   // Otherwise:
3802   // (select condv, truev, falsev)
3803   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3804   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3805   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3806 
3807   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3808 
3809   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3810 }
3811 
3812 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3813   SDValue CondV = Op.getOperand(1);
3814   SDLoc DL(Op);
3815   MVT XLenVT = Subtarget.getXLenVT();
3816 
3817   if (CondV.getOpcode() == ISD::SETCC &&
3818       CondV.getOperand(0).getValueType() == XLenVT) {
3819     SDValue LHS = CondV.getOperand(0);
3820     SDValue RHS = CondV.getOperand(1);
3821     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3822 
3823     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3824 
3825     SDValue TargetCC = DAG.getCondCode(CCVal);
3826     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3827                        LHS, RHS, TargetCC, Op.getOperand(2));
3828   }
3829 
3830   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3831                      CondV, DAG.getConstant(0, DL, XLenVT),
3832                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3833 }
3834 
3835 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3836   MachineFunction &MF = DAG.getMachineFunction();
3837   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3838 
3839   SDLoc DL(Op);
3840   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3841                                  getPointerTy(MF.getDataLayout()));
3842 
3843   // vastart just stores the address of the VarArgsFrameIndex slot into the
3844   // memory location argument.
3845   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3846   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3847                       MachinePointerInfo(SV));
3848 }
3849 
3850 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3851                                             SelectionDAG &DAG) const {
3852   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3853   MachineFunction &MF = DAG.getMachineFunction();
3854   MachineFrameInfo &MFI = MF.getFrameInfo();
3855   MFI.setFrameAddressIsTaken(true);
3856   Register FrameReg = RI.getFrameRegister(MF);
3857   int XLenInBytes = Subtarget.getXLen() / 8;
3858 
3859   EVT VT = Op.getValueType();
3860   SDLoc DL(Op);
3861   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3862   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3863   while (Depth--) {
3864     int Offset = -(XLenInBytes * 2);
3865     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3866                               DAG.getIntPtrConstant(Offset, DL));
3867     FrameAddr =
3868         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3869   }
3870   return FrameAddr;
3871 }
3872 
3873 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3874                                              SelectionDAG &DAG) const {
3875   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3876   MachineFunction &MF = DAG.getMachineFunction();
3877   MachineFrameInfo &MFI = MF.getFrameInfo();
3878   MFI.setReturnAddressIsTaken(true);
3879   MVT XLenVT = Subtarget.getXLenVT();
3880   int XLenInBytes = Subtarget.getXLen() / 8;
3881 
3882   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3883     return SDValue();
3884 
3885   EVT VT = Op.getValueType();
3886   SDLoc DL(Op);
3887   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3888   if (Depth) {
3889     int Off = -XLenInBytes;
3890     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3891     SDValue Offset = DAG.getConstant(Off, DL, VT);
3892     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3893                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3894                        MachinePointerInfo());
3895   }
3896 
3897   // Return the value of the return address register, marking it an implicit
3898   // live-in.
3899   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3900   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3901 }
3902 
3903 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3904                                                  SelectionDAG &DAG) const {
3905   SDLoc DL(Op);
3906   SDValue Lo = Op.getOperand(0);
3907   SDValue Hi = Op.getOperand(1);
3908   SDValue Shamt = Op.getOperand(2);
3909   EVT VT = Lo.getValueType();
3910 
3911   // if Shamt-XLEN < 0: // Shamt < XLEN
3912   //   Lo = Lo << Shamt
3913   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
3914   // else:
3915   //   Lo = 0
3916   //   Hi = Lo << (Shamt-XLEN)
3917 
3918   SDValue Zero = DAG.getConstant(0, DL, VT);
3919   SDValue One = DAG.getConstant(1, DL, VT);
3920   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3921   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3922   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3923   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3924 
3925   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3926   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3927   SDValue ShiftRightLo =
3928       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3929   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3930   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3931   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3932 
3933   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3934 
3935   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3936   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3937 
3938   SDValue Parts[2] = {Lo, Hi};
3939   return DAG.getMergeValues(Parts, DL);
3940 }
3941 
3942 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3943                                                   bool IsSRA) const {
3944   SDLoc DL(Op);
3945   SDValue Lo = Op.getOperand(0);
3946   SDValue Hi = Op.getOperand(1);
3947   SDValue Shamt = Op.getOperand(2);
3948   EVT VT = Lo.getValueType();
3949 
3950   // SRA expansion:
3951   //   if Shamt-XLEN < 0: // Shamt < XLEN
3952   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3953   //     Hi = Hi >>s Shamt
3954   //   else:
3955   //     Lo = Hi >>s (Shamt-XLEN);
3956   //     Hi = Hi >>s (XLEN-1)
3957   //
3958   // SRL expansion:
3959   //   if Shamt-XLEN < 0: // Shamt < XLEN
3960   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3961   //     Hi = Hi >>u Shamt
3962   //   else:
3963   //     Lo = Hi >>u (Shamt-XLEN);
3964   //     Hi = 0;
3965 
3966   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3967 
3968   SDValue Zero = DAG.getConstant(0, DL, VT);
3969   SDValue One = DAG.getConstant(1, DL, VT);
3970   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3971   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3972   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3973   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3974 
3975   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3976   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3977   SDValue ShiftLeftHi =
3978       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3979   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3980   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3981   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3982   SDValue HiFalse =
3983       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3984 
3985   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3986 
3987   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3988   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3989 
3990   SDValue Parts[2] = {Lo, Hi};
3991   return DAG.getMergeValues(Parts, DL);
3992 }
3993 
3994 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3995 // legal equivalently-sized i8 type, so we can use that as a go-between.
3996 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3997                                                   SelectionDAG &DAG) const {
3998   SDLoc DL(Op);
3999   MVT VT = Op.getSimpleValueType();
4000   SDValue SplatVal = Op.getOperand(0);
4001   // All-zeros or all-ones splats are handled specially.
4002   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4003     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4004     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4005   }
4006   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4007     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4008     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4009   }
4010   MVT XLenVT = Subtarget.getXLenVT();
4011   assert(SplatVal.getValueType() == XLenVT &&
4012          "Unexpected type for i1 splat value");
4013   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4014   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4015                          DAG.getConstant(1, DL, XLenVT));
4016   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4017   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4018   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4019 }
4020 
4021 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4022 // illegal (currently only vXi64 RV32).
4023 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4024 // them to VMV_V_X_VL.
4025 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4026                                                      SelectionDAG &DAG) const {
4027   SDLoc DL(Op);
4028   MVT VecVT = Op.getSimpleValueType();
4029   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4030          "Unexpected SPLAT_VECTOR_PARTS lowering");
4031 
4032   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4033   SDValue Lo = Op.getOperand(0);
4034   SDValue Hi = Op.getOperand(1);
4035 
4036   if (VecVT.isFixedLengthVector()) {
4037     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4038     SDLoc DL(Op);
4039     SDValue Mask, VL;
4040     std::tie(Mask, VL) =
4041         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4042 
4043     SDValue Res =
4044         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4045     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4046   }
4047 
4048   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4049     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4050     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4051     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4052     // node in order to try and match RVV vector/scalar instructions.
4053     if ((LoC >> 31) == HiC)
4054       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4055                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4056   }
4057 
4058   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4059   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4060       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4061       Hi.getConstantOperandVal(1) == 31)
4062     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4063                        DAG.getRegister(RISCV::X0, MVT::i32));
4064 
4065   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4066   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4067                      DAG.getUNDEF(VecVT), Lo, Hi,
4068                      DAG.getRegister(RISCV::X0, MVT::i32));
4069 }
4070 
4071 // Custom-lower extensions from mask vectors by using a vselect either with 1
4072 // for zero/any-extension or -1 for sign-extension:
4073 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4074 // Note that any-extension is lowered identically to zero-extension.
4075 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4076                                                 int64_t ExtTrueVal) const {
4077   SDLoc DL(Op);
4078   MVT VecVT = Op.getSimpleValueType();
4079   SDValue Src = Op.getOperand(0);
4080   // Only custom-lower extensions from mask types
4081   assert(Src.getValueType().isVector() &&
4082          Src.getValueType().getVectorElementType() == MVT::i1);
4083 
4084   if (VecVT.isScalableVector()) {
4085     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4086     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4087     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4088   }
4089 
4090   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4091   MVT I1ContainerVT =
4092       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4093 
4094   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4095 
4096   SDValue Mask, VL;
4097   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4098 
4099   MVT XLenVT = Subtarget.getXLenVT();
4100   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4101   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4102 
4103   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4104                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4105   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4106                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4107   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4108                                SplatTrueVal, SplatZero, VL);
4109 
4110   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4111 }
4112 
4113 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4114     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4115   MVT ExtVT = Op.getSimpleValueType();
4116   // Only custom-lower extensions from fixed-length vector types.
4117   if (!ExtVT.isFixedLengthVector())
4118     return Op;
4119   MVT VT = Op.getOperand(0).getSimpleValueType();
4120   // Grab the canonical container type for the extended type. Infer the smaller
4121   // type from that to ensure the same number of vector elements, as we know
4122   // the LMUL will be sufficient to hold the smaller type.
4123   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4124   // Get the extended container type manually to ensure the same number of
4125   // vector elements between source and dest.
4126   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4127                                      ContainerExtVT.getVectorElementCount());
4128 
4129   SDValue Op1 =
4130       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4131 
4132   SDLoc DL(Op);
4133   SDValue Mask, VL;
4134   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4135 
4136   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4137 
4138   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4139 }
4140 
4141 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4142 // setcc operation:
4143 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4144 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
4145                                                       SelectionDAG &DAG) const {
4146   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4147   SDLoc DL(Op);
4148   EVT MaskVT = Op.getValueType();
4149   // Only expect to custom-lower truncations to mask types
4150   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4151          "Unexpected type for vector mask lowering");
4152   SDValue Src = Op.getOperand(0);
4153   MVT VecVT = Src.getSimpleValueType();
4154   SDValue Mask, VL;
4155   if (IsVPTrunc) {
4156     Mask = Op.getOperand(1);
4157     VL = Op.getOperand(2);
4158   }
4159   // If this is a fixed vector, we need to convert it to a scalable vector.
4160   MVT ContainerVT = VecVT;
4161 
4162   if (VecVT.isFixedLengthVector()) {
4163     ContainerVT = getContainerForFixedLengthVector(VecVT);
4164     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4165     if (IsVPTrunc) {
4166       MVT MaskContainerVT =
4167           getContainerForFixedLengthVector(Mask.getSimpleValueType());
4168       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
4169     }
4170   }
4171 
4172   if (!IsVPTrunc) {
4173     std::tie(Mask, VL) =
4174         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4175   }
4176 
4177   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4178   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4179 
4180   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4181                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
4182   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4183                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4184 
4185   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4186   SDValue Trunc =
4187       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4188   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4189                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4190   if (MaskVT.isFixedLengthVector())
4191     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4192   return Trunc;
4193 }
4194 
4195 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
4196                                                   SelectionDAG &DAG) const {
4197   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4198   SDLoc DL(Op);
4199 
4200   MVT VT = Op.getSimpleValueType();
4201   // Only custom-lower vector truncates
4202   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4203 
4204   // Truncates to mask types are handled differently
4205   if (VT.getVectorElementType() == MVT::i1)
4206     return lowerVectorMaskTruncLike(Op, DAG);
4207 
4208   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
4209   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
4210   // truncate by one power of two at a time.
4211   MVT DstEltVT = VT.getVectorElementType();
4212 
4213   SDValue Src = Op.getOperand(0);
4214   MVT SrcVT = Src.getSimpleValueType();
4215   MVT SrcEltVT = SrcVT.getVectorElementType();
4216 
4217   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
4218          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
4219          "Unexpected vector truncate lowering");
4220 
4221   MVT ContainerVT = SrcVT;
4222   SDValue Mask, VL;
4223   if (IsVPTrunc) {
4224     Mask = Op.getOperand(1);
4225     VL = Op.getOperand(2);
4226   }
4227   if (SrcVT.isFixedLengthVector()) {
4228     ContainerVT = getContainerForFixedLengthVector(SrcVT);
4229     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4230     if (IsVPTrunc) {
4231       MVT MaskVT = getMaskTypeFor(ContainerVT);
4232       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4233     }
4234   }
4235 
4236   SDValue Result = Src;
4237   if (!IsVPTrunc) {
4238     std::tie(Mask, VL) =
4239         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4240   }
4241 
4242   LLVMContext &Context = *DAG.getContext();
4243   const ElementCount Count = ContainerVT.getVectorElementCount();
4244   do {
4245     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
4246     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
4247     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
4248                          Mask, VL);
4249   } while (SrcEltVT != DstEltVT);
4250 
4251   if (SrcVT.isFixedLengthVector())
4252     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4253 
4254   return Result;
4255 }
4256 
4257 SDValue
4258 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
4259                                                     SelectionDAG &DAG) const {
4260   bool IsVP =
4261       Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
4262   bool IsExtend =
4263       Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
4264   // RVV can only do truncate fp to types half the size as the source. We
4265   // custom-lower f64->f16 rounds via RVV's round-to-odd float
4266   // conversion instruction.
4267   SDLoc DL(Op);
4268   MVT VT = Op.getSimpleValueType();
4269 
4270   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4271 
4272   SDValue Src = Op.getOperand(0);
4273   MVT SrcVT = Src.getSimpleValueType();
4274 
4275   bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
4276                                      SrcVT.getVectorElementType() != MVT::f16);
4277   bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
4278                                      SrcVT.getVectorElementType() != MVT::f64);
4279 
4280   bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
4281 
4282   // For FP_ROUND/FP_EXTEND of scalable vectors, leave it to the pattern.
4283   if (!VT.isFixedLengthVector() && !IsVP && IsDirectConv)
4284     return Op;
4285 
4286   // Prepare any fixed-length vector operands.
4287   MVT ContainerVT = VT;
4288   SDValue Mask, VL;
4289   if (IsVP) {
4290     Mask = Op.getOperand(1);
4291     VL = Op.getOperand(2);
4292   }
4293   if (VT.isFixedLengthVector()) {
4294     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
4295     ContainerVT =
4296         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
4297     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4298     if (IsVP) {
4299       MVT MaskVT = getMaskTypeFor(ContainerVT);
4300       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4301     }
4302   }
4303 
4304   if (!IsVP)
4305     std::tie(Mask, VL) =
4306         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4307 
4308   unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
4309 
4310   if (IsDirectConv) {
4311     Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
4312     if (VT.isFixedLengthVector())
4313       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
4314     return Src;
4315   }
4316 
4317   unsigned InterConvOpc =
4318       IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
4319 
4320   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
4321   SDValue IntermediateConv =
4322       DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
4323   SDValue Result =
4324       DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
4325   if (VT.isFixedLengthVector())
4326     return convertFromScalableVector(VT, Result, DAG, Subtarget);
4327   return Result;
4328 }
4329 
4330 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4331 // first position of a vector, and that vector is slid up to the insert index.
4332 // By limiting the active vector length to index+1 and merging with the
4333 // original vector (with an undisturbed tail policy for elements >= VL), we
4334 // achieve the desired result of leaving all elements untouched except the one
4335 // at VL-1, which is replaced with the desired value.
4336 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4337                                                     SelectionDAG &DAG) const {
4338   SDLoc DL(Op);
4339   MVT VecVT = Op.getSimpleValueType();
4340   SDValue Vec = Op.getOperand(0);
4341   SDValue Val = Op.getOperand(1);
4342   SDValue Idx = Op.getOperand(2);
4343 
4344   if (VecVT.getVectorElementType() == MVT::i1) {
4345     // FIXME: For now we just promote to an i8 vector and insert into that,
4346     // but this is probably not optimal.
4347     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4348     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4349     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4350     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4351   }
4352 
4353   MVT ContainerVT = VecVT;
4354   // If the operand is a fixed-length vector, convert to a scalable one.
4355   if (VecVT.isFixedLengthVector()) {
4356     ContainerVT = getContainerForFixedLengthVector(VecVT);
4357     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4358   }
4359 
4360   MVT XLenVT = Subtarget.getXLenVT();
4361 
4362   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4363   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4364   // Even i64-element vectors on RV32 can be lowered without scalar
4365   // legalization if the most-significant 32 bits of the value are not affected
4366   // by the sign-extension of the lower 32 bits.
4367   // TODO: We could also catch sign extensions of a 32-bit value.
4368   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4369     const auto *CVal = cast<ConstantSDNode>(Val);
4370     if (isInt<32>(CVal->getSExtValue())) {
4371       IsLegalInsert = true;
4372       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4373     }
4374   }
4375 
4376   SDValue Mask, VL;
4377   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4378 
4379   SDValue ValInVec;
4380 
4381   if (IsLegalInsert) {
4382     unsigned Opc =
4383         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4384     if (isNullConstant(Idx)) {
4385       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4386       if (!VecVT.isFixedLengthVector())
4387         return Vec;
4388       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4389     }
4390     ValInVec =
4391         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4392   } else {
4393     // On RV32, i64-element vectors must be specially handled to place the
4394     // value at element 0, by using two vslide1up instructions in sequence on
4395     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4396     // this.
4397     SDValue One = DAG.getConstant(1, DL, XLenVT);
4398     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4399     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4400     MVT I32ContainerVT =
4401         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4402     SDValue I32Mask =
4403         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4404     // Limit the active VL to two.
4405     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4406     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4407     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4408     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4409                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4410     // First slide in the hi value, then the lo in underneath it.
4411     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4412                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4413                            I32Mask, InsertI64VL);
4414     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4415                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4416                            I32Mask, InsertI64VL);
4417     // Bitcast back to the right container type.
4418     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4419   }
4420 
4421   // Now that the value is in a vector, slide it into position.
4422   SDValue InsertVL =
4423       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4424   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4425                                 ValInVec, Idx, Mask, InsertVL);
4426   if (!VecVT.isFixedLengthVector())
4427     return Slideup;
4428   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4429 }
4430 
4431 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4432 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4433 // types this is done using VMV_X_S to allow us to glean information about the
4434 // sign bits of the result.
4435 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4436                                                      SelectionDAG &DAG) const {
4437   SDLoc DL(Op);
4438   SDValue Idx = Op.getOperand(1);
4439   SDValue Vec = Op.getOperand(0);
4440   EVT EltVT = Op.getValueType();
4441   MVT VecVT = Vec.getSimpleValueType();
4442   MVT XLenVT = Subtarget.getXLenVT();
4443 
4444   if (VecVT.getVectorElementType() == MVT::i1) {
4445     if (VecVT.isFixedLengthVector()) {
4446       unsigned NumElts = VecVT.getVectorNumElements();
4447       if (NumElts >= 8) {
4448         MVT WideEltVT;
4449         unsigned WidenVecLen;
4450         SDValue ExtractElementIdx;
4451         SDValue ExtractBitIdx;
4452         unsigned MaxEEW = Subtarget.getELEN();
4453         MVT LargestEltVT = MVT::getIntegerVT(
4454             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4455         if (NumElts <= LargestEltVT.getSizeInBits()) {
4456           assert(isPowerOf2_32(NumElts) &&
4457                  "the number of elements should be power of 2");
4458           WideEltVT = MVT::getIntegerVT(NumElts);
4459           WidenVecLen = 1;
4460           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4461           ExtractBitIdx = Idx;
4462         } else {
4463           WideEltVT = LargestEltVT;
4464           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4465           // extract element index = index / element width
4466           ExtractElementIdx = DAG.getNode(
4467               ISD::SRL, DL, XLenVT, Idx,
4468               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4469           // mask bit index = index % element width
4470           ExtractBitIdx = DAG.getNode(
4471               ISD::AND, DL, XLenVT, Idx,
4472               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4473         }
4474         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4475         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4476         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4477                                          Vec, ExtractElementIdx);
4478         // Extract the bit from GPR.
4479         SDValue ShiftRight =
4480             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4481         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4482                            DAG.getConstant(1, DL, XLenVT));
4483       }
4484     }
4485     // Otherwise, promote to an i8 vector and extract from that.
4486     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4487     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4488     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4489   }
4490 
4491   // If this is a fixed vector, we need to convert it to a scalable vector.
4492   MVT ContainerVT = VecVT;
4493   if (VecVT.isFixedLengthVector()) {
4494     ContainerVT = getContainerForFixedLengthVector(VecVT);
4495     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4496   }
4497 
4498   // If the index is 0, the vector is already in the right position.
4499   if (!isNullConstant(Idx)) {
4500     // Use a VL of 1 to avoid processing more elements than we need.
4501     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4502     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
4503     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4504                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4505   }
4506 
4507   if (!EltVT.isInteger()) {
4508     // Floating-point extracts are handled in TableGen.
4509     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4510                        DAG.getConstant(0, DL, XLenVT));
4511   }
4512 
4513   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4514   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4515 }
4516 
4517 // Some RVV intrinsics may claim that they want an integer operand to be
4518 // promoted or expanded.
4519 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4520                                            const RISCVSubtarget &Subtarget) {
4521   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4522           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4523          "Unexpected opcode");
4524 
4525   if (!Subtarget.hasVInstructions())
4526     return SDValue();
4527 
4528   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4529   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4530   SDLoc DL(Op);
4531 
4532   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4533       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4534   if (!II || !II->hasScalarOperand())
4535     return SDValue();
4536 
4537   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4538   assert(SplatOp < Op.getNumOperands());
4539 
4540   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4541   SDValue &ScalarOp = Operands[SplatOp];
4542   MVT OpVT = ScalarOp.getSimpleValueType();
4543   MVT XLenVT = Subtarget.getXLenVT();
4544 
4545   // If this isn't a scalar, or its type is XLenVT we're done.
4546   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4547     return SDValue();
4548 
4549   // Simplest case is that the operand needs to be promoted to XLenVT.
4550   if (OpVT.bitsLT(XLenVT)) {
4551     // If the operand is a constant, sign extend to increase our chances
4552     // of being able to use a .vi instruction. ANY_EXTEND would become a
4553     // a zero extend and the simm5 check in isel would fail.
4554     // FIXME: Should we ignore the upper bits in isel instead?
4555     unsigned ExtOpc =
4556         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4557     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4558     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4559   }
4560 
4561   // Use the previous operand to get the vXi64 VT. The result might be a mask
4562   // VT for compares. Using the previous operand assumes that the previous
4563   // operand will never have a smaller element size than a scalar operand and
4564   // that a widening operation never uses SEW=64.
4565   // NOTE: If this fails the below assert, we can probably just find the
4566   // element count from any operand or result and use it to construct the VT.
4567   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4568   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4569 
4570   // The more complex case is when the scalar is larger than XLenVT.
4571   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4572          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4573 
4574   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4575   // instruction to sign-extend since SEW>XLEN.
4576   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4577     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4578     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4579   }
4580 
4581   switch (IntNo) {
4582   case Intrinsic::riscv_vslide1up:
4583   case Intrinsic::riscv_vslide1down:
4584   case Intrinsic::riscv_vslide1up_mask:
4585   case Intrinsic::riscv_vslide1down_mask: {
4586     // We need to special case these when the scalar is larger than XLen.
4587     unsigned NumOps = Op.getNumOperands();
4588     bool IsMasked = NumOps == 7;
4589 
4590     // Convert the vector source to the equivalent nxvXi32 vector.
4591     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4592     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4593 
4594     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4595                                    DAG.getConstant(0, DL, XLenVT));
4596     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4597                                    DAG.getConstant(1, DL, XLenVT));
4598 
4599     // Double the VL since we halved SEW.
4600     SDValue AVL = getVLOperand(Op);
4601     SDValue I32VL;
4602 
4603     // Optimize for constant AVL
4604     if (isa<ConstantSDNode>(AVL)) {
4605       unsigned EltSize = VT.getScalarSizeInBits();
4606       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4607 
4608       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4609       unsigned MaxVLMAX =
4610           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4611 
4612       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4613       unsigned MinVLMAX =
4614           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4615 
4616       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4617       if (AVLInt <= MinVLMAX) {
4618         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4619       } else if (AVLInt >= 2 * MaxVLMAX) {
4620         // Just set vl to VLMAX in this situation
4621         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4622         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4623         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4624         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4625         SDValue SETVLMAX = DAG.getTargetConstant(
4626             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4627         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4628                             LMUL);
4629       } else {
4630         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4631         // is related to the hardware implementation.
4632         // So let the following code handle
4633       }
4634     }
4635     if (!I32VL) {
4636       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4637       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4638       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4639       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4640       SDValue SETVL =
4641           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4642       // Using vsetvli instruction to get actually used length which related to
4643       // the hardware implementation
4644       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4645                                SEW, LMUL);
4646       I32VL =
4647           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4648     }
4649 
4650     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
4651 
4652     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4653     // instructions.
4654     SDValue Passthru;
4655     if (IsMasked)
4656       Passthru = DAG.getUNDEF(I32VT);
4657     else
4658       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4659 
4660     if (IntNo == Intrinsic::riscv_vslide1up ||
4661         IntNo == Intrinsic::riscv_vslide1up_mask) {
4662       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4663                         ScalarHi, I32Mask, I32VL);
4664       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4665                         ScalarLo, I32Mask, I32VL);
4666     } else {
4667       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4668                         ScalarLo, I32Mask, I32VL);
4669       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4670                         ScalarHi, I32Mask, I32VL);
4671     }
4672 
4673     // Convert back to nxvXi64.
4674     Vec = DAG.getBitcast(VT, Vec);
4675 
4676     if (!IsMasked)
4677       return Vec;
4678     // Apply mask after the operation.
4679     SDValue Mask = Operands[NumOps - 3];
4680     SDValue MaskedOff = Operands[1];
4681     // Assume Policy operand is the last operand.
4682     uint64_t Policy =
4683         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4684     // We don't need to select maskedoff if it's undef.
4685     if (MaskedOff.isUndef())
4686       return Vec;
4687     // TAMU
4688     if (Policy == RISCVII::TAIL_AGNOSTIC)
4689       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4690                          AVL);
4691     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4692     // It's fine because vmerge does not care mask policy.
4693     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4694                        AVL);
4695   }
4696   }
4697 
4698   // We need to convert the scalar to a splat vector.
4699   SDValue VL = getVLOperand(Op);
4700   assert(VL.getValueType() == XLenVT);
4701   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4702   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4703 }
4704 
4705 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4706                                                      SelectionDAG &DAG) const {
4707   unsigned IntNo = Op.getConstantOperandVal(0);
4708   SDLoc DL(Op);
4709   MVT XLenVT = Subtarget.getXLenVT();
4710 
4711   switch (IntNo) {
4712   default:
4713     break; // Don't custom lower most intrinsics.
4714   case Intrinsic::thread_pointer: {
4715     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4716     return DAG.getRegister(RISCV::X4, PtrVT);
4717   }
4718   case Intrinsic::riscv_orc_b:
4719   case Intrinsic::riscv_brev8: {
4720     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4721     unsigned Opc =
4722         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4723     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4724                        DAG.getConstant(7, DL, XLenVT));
4725   }
4726   case Intrinsic::riscv_grev:
4727   case Intrinsic::riscv_gorc: {
4728     unsigned Opc =
4729         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4730     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4731   }
4732   case Intrinsic::riscv_zip:
4733   case Intrinsic::riscv_unzip: {
4734     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4735     // For i32 the immediate is 15. For i64 the immediate is 31.
4736     unsigned Opc =
4737         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4738     unsigned BitWidth = Op.getValueSizeInBits();
4739     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4740     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4741                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4742   }
4743   case Intrinsic::riscv_shfl:
4744   case Intrinsic::riscv_unshfl: {
4745     unsigned Opc =
4746         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4747     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4748   }
4749   case Intrinsic::riscv_bcompress:
4750   case Intrinsic::riscv_bdecompress: {
4751     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4752                                                        : RISCVISD::BDECOMPRESS;
4753     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4754   }
4755   case Intrinsic::riscv_bfp:
4756     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4757                        Op.getOperand(2));
4758   case Intrinsic::riscv_fsl:
4759     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4760                        Op.getOperand(2), Op.getOperand(3));
4761   case Intrinsic::riscv_fsr:
4762     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4763                        Op.getOperand(2), Op.getOperand(3));
4764   case Intrinsic::riscv_vmv_x_s:
4765     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4766     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4767                        Op.getOperand(1));
4768   case Intrinsic::riscv_vmv_v_x:
4769     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4770                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4771                             Subtarget);
4772   case Intrinsic::riscv_vfmv_v_f:
4773     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4774                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4775   case Intrinsic::riscv_vmv_s_x: {
4776     SDValue Scalar = Op.getOperand(2);
4777 
4778     if (Scalar.getValueType().bitsLE(XLenVT)) {
4779       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4780       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4781                          Op.getOperand(1), Scalar, Op.getOperand(3));
4782     }
4783 
4784     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4785 
4786     // This is an i64 value that lives in two scalar registers. We have to
4787     // insert this in a convoluted way. First we build vXi64 splat containing
4788     // the two values that we assemble using some bit math. Next we'll use
4789     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4790     // to merge element 0 from our splat into the source vector.
4791     // FIXME: This is probably not the best way to do this, but it is
4792     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4793     // point.
4794     //   sw lo, (a0)
4795     //   sw hi, 4(a0)
4796     //   vlse vX, (a0)
4797     //
4798     //   vid.v      vVid
4799     //   vmseq.vx   mMask, vVid, 0
4800     //   vmerge.vvm vDest, vSrc, vVal, mMask
4801     MVT VT = Op.getSimpleValueType();
4802     SDValue Vec = Op.getOperand(1);
4803     SDValue VL = getVLOperand(Op);
4804 
4805     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4806     if (Op.getOperand(1).isUndef())
4807       return SplattedVal;
4808     SDValue SplattedIdx =
4809         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4810                     DAG.getConstant(0, DL, MVT::i32), VL);
4811 
4812     MVT MaskVT = getMaskTypeFor(VT);
4813     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
4814     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4815     SDValue SelectCond =
4816         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4817                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4818     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4819                        Vec, VL);
4820   }
4821   }
4822 
4823   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4824 }
4825 
4826 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4827                                                     SelectionDAG &DAG) const {
4828   unsigned IntNo = Op.getConstantOperandVal(1);
4829   switch (IntNo) {
4830   default:
4831     break;
4832   case Intrinsic::riscv_masked_strided_load: {
4833     SDLoc DL(Op);
4834     MVT XLenVT = Subtarget.getXLenVT();
4835 
4836     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4837     // the selection of the masked intrinsics doesn't do this for us.
4838     SDValue Mask = Op.getOperand(5);
4839     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4840 
4841     MVT VT = Op->getSimpleValueType(0);
4842     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4843 
4844     SDValue PassThru = Op.getOperand(2);
4845     if (!IsUnmasked) {
4846       MVT MaskVT = getMaskTypeFor(ContainerVT);
4847       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4848       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4849     }
4850 
4851     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4852 
4853     SDValue IntID = DAG.getTargetConstant(
4854         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4855         XLenVT);
4856 
4857     auto *Load = cast<MemIntrinsicSDNode>(Op);
4858     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4859     if (IsUnmasked)
4860       Ops.push_back(DAG.getUNDEF(ContainerVT));
4861     else
4862       Ops.push_back(PassThru);
4863     Ops.push_back(Op.getOperand(3)); // Ptr
4864     Ops.push_back(Op.getOperand(4)); // Stride
4865     if (!IsUnmasked)
4866       Ops.push_back(Mask);
4867     Ops.push_back(VL);
4868     if (!IsUnmasked) {
4869       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4870       Ops.push_back(Policy);
4871     }
4872 
4873     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4874     SDValue Result =
4875         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4876                                 Load->getMemoryVT(), Load->getMemOperand());
4877     SDValue Chain = Result.getValue(1);
4878     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4879     return DAG.getMergeValues({Result, Chain}, DL);
4880   }
4881   case Intrinsic::riscv_seg2_load:
4882   case Intrinsic::riscv_seg3_load:
4883   case Intrinsic::riscv_seg4_load:
4884   case Intrinsic::riscv_seg5_load:
4885   case Intrinsic::riscv_seg6_load:
4886   case Intrinsic::riscv_seg7_load:
4887   case Intrinsic::riscv_seg8_load: {
4888     SDLoc DL(Op);
4889     static const Intrinsic::ID VlsegInts[7] = {
4890         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4891         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4892         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4893         Intrinsic::riscv_vlseg8};
4894     unsigned NF = Op->getNumValues() - 1;
4895     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4896     MVT XLenVT = Subtarget.getXLenVT();
4897     MVT VT = Op->getSimpleValueType(0);
4898     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4899 
4900     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4901     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4902     auto *Load = cast<MemIntrinsicSDNode>(Op);
4903     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4904     ContainerVTs.push_back(MVT::Other);
4905     SDVTList VTs = DAG.getVTList(ContainerVTs);
4906     SDValue Result =
4907         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
4908                                 {Load->getChain(), IntID, Op.getOperand(2), VL},
4909                                 Load->getMemoryVT(), Load->getMemOperand());
4910     SmallVector<SDValue, 9> Results;
4911     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4912       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4913                                                   DAG, Subtarget));
4914     Results.push_back(Result.getValue(NF));
4915     return DAG.getMergeValues(Results, DL);
4916   }
4917   }
4918 
4919   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4920 }
4921 
4922 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4923                                                  SelectionDAG &DAG) const {
4924   unsigned IntNo = Op.getConstantOperandVal(1);
4925   switch (IntNo) {
4926   default:
4927     break;
4928   case Intrinsic::riscv_masked_strided_store: {
4929     SDLoc DL(Op);
4930     MVT XLenVT = Subtarget.getXLenVT();
4931 
4932     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4933     // the selection of the masked intrinsics doesn't do this for us.
4934     SDValue Mask = Op.getOperand(5);
4935     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4936 
4937     SDValue Val = Op.getOperand(2);
4938     MVT VT = Val.getSimpleValueType();
4939     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4940 
4941     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4942     if (!IsUnmasked) {
4943       MVT MaskVT = getMaskTypeFor(ContainerVT);
4944       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4945     }
4946 
4947     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4948 
4949     SDValue IntID = DAG.getTargetConstant(
4950         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4951         XLenVT);
4952 
4953     auto *Store = cast<MemIntrinsicSDNode>(Op);
4954     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4955     Ops.push_back(Val);
4956     Ops.push_back(Op.getOperand(3)); // Ptr
4957     Ops.push_back(Op.getOperand(4)); // Stride
4958     if (!IsUnmasked)
4959       Ops.push_back(Mask);
4960     Ops.push_back(VL);
4961 
4962     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4963                                    Ops, Store->getMemoryVT(),
4964                                    Store->getMemOperand());
4965   }
4966   }
4967 
4968   return SDValue();
4969 }
4970 
4971 static MVT getLMUL1VT(MVT VT) {
4972   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4973          "Unexpected vector MVT");
4974   return MVT::getScalableVectorVT(
4975       VT.getVectorElementType(),
4976       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4977 }
4978 
4979 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4980   switch (ISDOpcode) {
4981   default:
4982     llvm_unreachable("Unhandled reduction");
4983   case ISD::VECREDUCE_ADD:
4984     return RISCVISD::VECREDUCE_ADD_VL;
4985   case ISD::VECREDUCE_UMAX:
4986     return RISCVISD::VECREDUCE_UMAX_VL;
4987   case ISD::VECREDUCE_SMAX:
4988     return RISCVISD::VECREDUCE_SMAX_VL;
4989   case ISD::VECREDUCE_UMIN:
4990     return RISCVISD::VECREDUCE_UMIN_VL;
4991   case ISD::VECREDUCE_SMIN:
4992     return RISCVISD::VECREDUCE_SMIN_VL;
4993   case ISD::VECREDUCE_AND:
4994     return RISCVISD::VECREDUCE_AND_VL;
4995   case ISD::VECREDUCE_OR:
4996     return RISCVISD::VECREDUCE_OR_VL;
4997   case ISD::VECREDUCE_XOR:
4998     return RISCVISD::VECREDUCE_XOR_VL;
4999   }
5000 }
5001 
5002 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5003                                                          SelectionDAG &DAG,
5004                                                          bool IsVP) const {
5005   SDLoc DL(Op);
5006   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5007   MVT VecVT = Vec.getSimpleValueType();
5008   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5009           Op.getOpcode() == ISD::VECREDUCE_OR ||
5010           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5011           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5012           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5013           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5014          "Unexpected reduction lowering");
5015 
5016   MVT XLenVT = Subtarget.getXLenVT();
5017   assert(Op.getValueType() == XLenVT &&
5018          "Expected reduction output to be legalized to XLenVT");
5019 
5020   MVT ContainerVT = VecVT;
5021   if (VecVT.isFixedLengthVector()) {
5022     ContainerVT = getContainerForFixedLengthVector(VecVT);
5023     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5024   }
5025 
5026   SDValue Mask, VL;
5027   if (IsVP) {
5028     Mask = Op.getOperand(2);
5029     VL = Op.getOperand(3);
5030   } else {
5031     std::tie(Mask, VL) =
5032         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5033   }
5034 
5035   unsigned BaseOpc;
5036   ISD::CondCode CC;
5037   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5038 
5039   switch (Op.getOpcode()) {
5040   default:
5041     llvm_unreachable("Unhandled reduction");
5042   case ISD::VECREDUCE_AND:
5043   case ISD::VP_REDUCE_AND: {
5044     // vcpop ~x == 0
5045     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5046     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5047     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5048     CC = ISD::SETEQ;
5049     BaseOpc = ISD::AND;
5050     break;
5051   }
5052   case ISD::VECREDUCE_OR:
5053   case ISD::VP_REDUCE_OR:
5054     // vcpop x != 0
5055     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5056     CC = ISD::SETNE;
5057     BaseOpc = ISD::OR;
5058     break;
5059   case ISD::VECREDUCE_XOR:
5060   case ISD::VP_REDUCE_XOR: {
5061     // ((vcpop x) & 1) != 0
5062     SDValue One = DAG.getConstant(1, DL, XLenVT);
5063     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5064     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5065     CC = ISD::SETNE;
5066     BaseOpc = ISD::XOR;
5067     break;
5068   }
5069   }
5070 
5071   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5072 
5073   if (!IsVP)
5074     return SetCC;
5075 
5076   // Now include the start value in the operation.
5077   // Note that we must return the start value when no elements are operated
5078   // upon. The vcpop instructions we've emitted in each case above will return
5079   // 0 for an inactive vector, and so we've already received the neutral value:
5080   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5081   // can simply include the start value.
5082   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5083 }
5084 
5085 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5086                                             SelectionDAG &DAG) const {
5087   SDLoc DL(Op);
5088   SDValue Vec = Op.getOperand(0);
5089   EVT VecEVT = Vec.getValueType();
5090 
5091   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5092 
5093   // Due to ordering in legalize types we may have a vector type that needs to
5094   // be split. Do that manually so we can get down to a legal type.
5095   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5096          TargetLowering::TypeSplitVector) {
5097     SDValue Lo, Hi;
5098     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5099     VecEVT = Lo.getValueType();
5100     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5101   }
5102 
5103   // TODO: The type may need to be widened rather than split. Or widened before
5104   // it can be split.
5105   if (!isTypeLegal(VecEVT))
5106     return SDValue();
5107 
5108   MVT VecVT = VecEVT.getSimpleVT();
5109   MVT VecEltVT = VecVT.getVectorElementType();
5110   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5111 
5112   MVT ContainerVT = VecVT;
5113   if (VecVT.isFixedLengthVector()) {
5114     ContainerVT = getContainerForFixedLengthVector(VecVT);
5115     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5116   }
5117 
5118   MVT M1VT = getLMUL1VT(ContainerVT);
5119   MVT XLenVT = Subtarget.getXLenVT();
5120 
5121   SDValue Mask, VL;
5122   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5123 
5124   SDValue NeutralElem =
5125       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5126   SDValue IdentitySplat =
5127       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5128                        M1VT, DL, DAG, Subtarget);
5129   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5130                                   IdentitySplat, Mask, VL);
5131   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5132                              DAG.getConstant(0, DL, XLenVT));
5133   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5134 }
5135 
5136 // Given a reduction op, this function returns the matching reduction opcode,
5137 // the vector SDValue and the scalar SDValue required to lower this to a
5138 // RISCVISD node.
5139 static std::tuple<unsigned, SDValue, SDValue>
5140 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5141   SDLoc DL(Op);
5142   auto Flags = Op->getFlags();
5143   unsigned Opcode = Op.getOpcode();
5144   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5145   switch (Opcode) {
5146   default:
5147     llvm_unreachable("Unhandled reduction");
5148   case ISD::VECREDUCE_FADD: {
5149     // Use positive zero if we can. It is cheaper to materialize.
5150     SDValue Zero =
5151         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5152     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5153   }
5154   case ISD::VECREDUCE_SEQ_FADD:
5155     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5156                            Op.getOperand(0));
5157   case ISD::VECREDUCE_FMIN:
5158     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5159                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5160   case ISD::VECREDUCE_FMAX:
5161     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5162                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5163   }
5164 }
5165 
5166 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5167                                               SelectionDAG &DAG) const {
5168   SDLoc DL(Op);
5169   MVT VecEltVT = Op.getSimpleValueType();
5170 
5171   unsigned RVVOpcode;
5172   SDValue VectorVal, ScalarVal;
5173   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5174       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5175   MVT VecVT = VectorVal.getSimpleValueType();
5176 
5177   MVT ContainerVT = VecVT;
5178   if (VecVT.isFixedLengthVector()) {
5179     ContainerVT = getContainerForFixedLengthVector(VecVT);
5180     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5181   }
5182 
5183   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5184   MVT XLenVT = Subtarget.getXLenVT();
5185 
5186   SDValue Mask, VL;
5187   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5188 
5189   SDValue ScalarSplat =
5190       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5191                        M1VT, DL, DAG, Subtarget);
5192   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5193                                   VectorVal, ScalarSplat, Mask, VL);
5194   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5195                      DAG.getConstant(0, DL, XLenVT));
5196 }
5197 
5198 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5199   switch (ISDOpcode) {
5200   default:
5201     llvm_unreachable("Unhandled reduction");
5202   case ISD::VP_REDUCE_ADD:
5203     return RISCVISD::VECREDUCE_ADD_VL;
5204   case ISD::VP_REDUCE_UMAX:
5205     return RISCVISD::VECREDUCE_UMAX_VL;
5206   case ISD::VP_REDUCE_SMAX:
5207     return RISCVISD::VECREDUCE_SMAX_VL;
5208   case ISD::VP_REDUCE_UMIN:
5209     return RISCVISD::VECREDUCE_UMIN_VL;
5210   case ISD::VP_REDUCE_SMIN:
5211     return RISCVISD::VECREDUCE_SMIN_VL;
5212   case ISD::VP_REDUCE_AND:
5213     return RISCVISD::VECREDUCE_AND_VL;
5214   case ISD::VP_REDUCE_OR:
5215     return RISCVISD::VECREDUCE_OR_VL;
5216   case ISD::VP_REDUCE_XOR:
5217     return RISCVISD::VECREDUCE_XOR_VL;
5218   case ISD::VP_REDUCE_FADD:
5219     return RISCVISD::VECREDUCE_FADD_VL;
5220   case ISD::VP_REDUCE_SEQ_FADD:
5221     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5222   case ISD::VP_REDUCE_FMAX:
5223     return RISCVISD::VECREDUCE_FMAX_VL;
5224   case ISD::VP_REDUCE_FMIN:
5225     return RISCVISD::VECREDUCE_FMIN_VL;
5226   }
5227 }
5228 
5229 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5230                                            SelectionDAG &DAG) const {
5231   SDLoc DL(Op);
5232   SDValue Vec = Op.getOperand(1);
5233   EVT VecEVT = Vec.getValueType();
5234 
5235   // TODO: The type may need to be widened rather than split. Or widened before
5236   // it can be split.
5237   if (!isTypeLegal(VecEVT))
5238     return SDValue();
5239 
5240   MVT VecVT = VecEVT.getSimpleVT();
5241   MVT VecEltVT = VecVT.getVectorElementType();
5242   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5243 
5244   MVT ContainerVT = VecVT;
5245   if (VecVT.isFixedLengthVector()) {
5246     ContainerVT = getContainerForFixedLengthVector(VecVT);
5247     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5248   }
5249 
5250   SDValue VL = Op.getOperand(3);
5251   SDValue Mask = Op.getOperand(2);
5252 
5253   MVT M1VT = getLMUL1VT(ContainerVT);
5254   MVT XLenVT = Subtarget.getXLenVT();
5255   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5256 
5257   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5258                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5259                                         DL, DAG, Subtarget);
5260   SDValue Reduction =
5261       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5262   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5263                              DAG.getConstant(0, DL, XLenVT));
5264   if (!VecVT.isInteger())
5265     return Elt0;
5266   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5267 }
5268 
5269 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5270                                                    SelectionDAG &DAG) const {
5271   SDValue Vec = Op.getOperand(0);
5272   SDValue SubVec = Op.getOperand(1);
5273   MVT VecVT = Vec.getSimpleValueType();
5274   MVT SubVecVT = SubVec.getSimpleValueType();
5275 
5276   SDLoc DL(Op);
5277   MVT XLenVT = Subtarget.getXLenVT();
5278   unsigned OrigIdx = Op.getConstantOperandVal(2);
5279   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5280 
5281   // We don't have the ability to slide mask vectors up indexed by their i1
5282   // elements; the smallest we can do is i8. Often we are able to bitcast to
5283   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5284   // into a scalable one, we might not necessarily have enough scalable
5285   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5286   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5287       (OrigIdx != 0 || !Vec.isUndef())) {
5288     if (VecVT.getVectorMinNumElements() >= 8 &&
5289         SubVecVT.getVectorMinNumElements() >= 8) {
5290       assert(OrigIdx % 8 == 0 && "Invalid index");
5291       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5292              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5293              "Unexpected mask vector lowering");
5294       OrigIdx /= 8;
5295       SubVecVT =
5296           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5297                            SubVecVT.isScalableVector());
5298       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5299                                VecVT.isScalableVector());
5300       Vec = DAG.getBitcast(VecVT, Vec);
5301       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5302     } else {
5303       // We can't slide this mask vector up indexed by its i1 elements.
5304       // This poses a problem when we wish to insert a scalable vector which
5305       // can't be re-expressed as a larger type. Just choose the slow path and
5306       // extend to a larger type, then truncate back down.
5307       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5308       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5309       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5310       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5311       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5312                         Op.getOperand(2));
5313       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5314       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5315     }
5316   }
5317 
5318   // If the subvector vector is a fixed-length type, we cannot use subregister
5319   // manipulation to simplify the codegen; we don't know which register of a
5320   // LMUL group contains the specific subvector as we only know the minimum
5321   // register size. Therefore we must slide the vector group up the full
5322   // amount.
5323   if (SubVecVT.isFixedLengthVector()) {
5324     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5325       return Op;
5326     MVT ContainerVT = VecVT;
5327     if (VecVT.isFixedLengthVector()) {
5328       ContainerVT = getContainerForFixedLengthVector(VecVT);
5329       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5330     }
5331     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5332                          DAG.getUNDEF(ContainerVT), SubVec,
5333                          DAG.getConstant(0, DL, XLenVT));
5334     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5335       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5336       return DAG.getBitcast(Op.getValueType(), SubVec);
5337     }
5338     SDValue Mask =
5339         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5340     // Set the vector length to only the number of elements we care about. Note
5341     // that for slideup this includes the offset.
5342     SDValue VL =
5343         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5344     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5345     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5346                                   SubVec, SlideupAmt, Mask, VL);
5347     if (VecVT.isFixedLengthVector())
5348       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5349     return DAG.getBitcast(Op.getValueType(), Slideup);
5350   }
5351 
5352   unsigned SubRegIdx, RemIdx;
5353   std::tie(SubRegIdx, RemIdx) =
5354       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5355           VecVT, SubVecVT, OrigIdx, TRI);
5356 
5357   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5358   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5359                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5360                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5361 
5362   // 1. If the Idx has been completely eliminated and this subvector's size is
5363   // a vector register or a multiple thereof, or the surrounding elements are
5364   // undef, then this is a subvector insert which naturally aligns to a vector
5365   // register. These can easily be handled using subregister manipulation.
5366   // 2. If the subvector is smaller than a vector register, then the insertion
5367   // must preserve the undisturbed elements of the register. We do this by
5368   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5369   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5370   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5371   // LMUL=1 type back into the larger vector (resolving to another subregister
5372   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5373   // to avoid allocating a large register group to hold our subvector.
5374   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5375     return Op;
5376 
5377   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5378   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5379   // (in our case undisturbed). This means we can set up a subvector insertion
5380   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5381   // size of the subvector.
5382   MVT InterSubVT = VecVT;
5383   SDValue AlignedExtract = Vec;
5384   unsigned AlignedIdx = OrigIdx - RemIdx;
5385   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5386     InterSubVT = getLMUL1VT(VecVT);
5387     // Extract a subvector equal to the nearest full vector register type. This
5388     // should resolve to a EXTRACT_SUBREG instruction.
5389     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5390                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5391   }
5392 
5393   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5394   // For scalable vectors this must be further multiplied by vscale.
5395   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5396 
5397   SDValue Mask, VL;
5398   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5399 
5400   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5401   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5402   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5403   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5404 
5405   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5406                        DAG.getUNDEF(InterSubVT), SubVec,
5407                        DAG.getConstant(0, DL, XLenVT));
5408 
5409   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5410                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5411 
5412   // If required, insert this subvector back into the correct vector register.
5413   // This should resolve to an INSERT_SUBREG instruction.
5414   if (VecVT.bitsGT(InterSubVT))
5415     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5416                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5417 
5418   // We might have bitcast from a mask type: cast back to the original type if
5419   // required.
5420   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5421 }
5422 
5423 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5424                                                     SelectionDAG &DAG) const {
5425   SDValue Vec = Op.getOperand(0);
5426   MVT SubVecVT = Op.getSimpleValueType();
5427   MVT VecVT = Vec.getSimpleValueType();
5428 
5429   SDLoc DL(Op);
5430   MVT XLenVT = Subtarget.getXLenVT();
5431   unsigned OrigIdx = Op.getConstantOperandVal(1);
5432   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5433 
5434   // We don't have the ability to slide mask vectors down indexed by their i1
5435   // elements; the smallest we can do is i8. Often we are able to bitcast to
5436   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5437   // from a scalable one, we might not necessarily have enough scalable
5438   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5439   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5440     if (VecVT.getVectorMinNumElements() >= 8 &&
5441         SubVecVT.getVectorMinNumElements() >= 8) {
5442       assert(OrigIdx % 8 == 0 && "Invalid index");
5443       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5444              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5445              "Unexpected mask vector lowering");
5446       OrigIdx /= 8;
5447       SubVecVT =
5448           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5449                            SubVecVT.isScalableVector());
5450       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5451                                VecVT.isScalableVector());
5452       Vec = DAG.getBitcast(VecVT, Vec);
5453     } else {
5454       // We can't slide this mask vector down, indexed by its i1 elements.
5455       // This poses a problem when we wish to extract a scalable vector which
5456       // can't be re-expressed as a larger type. Just choose the slow path and
5457       // extend to a larger type, then truncate back down.
5458       // TODO: We could probably improve this when extracting certain fixed
5459       // from fixed, where we can extract as i8 and shift the correct element
5460       // right to reach the desired subvector?
5461       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5462       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5463       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5464       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5465                         Op.getOperand(1));
5466       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5467       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5468     }
5469   }
5470 
5471   // If the subvector vector is a fixed-length type, we cannot use subregister
5472   // manipulation to simplify the codegen; we don't know which register of a
5473   // LMUL group contains the specific subvector as we only know the minimum
5474   // register size. Therefore we must slide the vector group down the full
5475   // amount.
5476   if (SubVecVT.isFixedLengthVector()) {
5477     // With an index of 0 this is a cast-like subvector, which can be performed
5478     // with subregister operations.
5479     if (OrigIdx == 0)
5480       return Op;
5481     MVT ContainerVT = VecVT;
5482     if (VecVT.isFixedLengthVector()) {
5483       ContainerVT = getContainerForFixedLengthVector(VecVT);
5484       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5485     }
5486     SDValue Mask =
5487         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5488     // Set the vector length to only the number of elements we care about. This
5489     // avoids sliding down elements we're going to discard straight away.
5490     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5491     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5492     SDValue Slidedown =
5493         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5494                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5495     // Now we can use a cast-like subvector extract to get the result.
5496     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5497                             DAG.getConstant(0, DL, XLenVT));
5498     return DAG.getBitcast(Op.getValueType(), Slidedown);
5499   }
5500 
5501   unsigned SubRegIdx, RemIdx;
5502   std::tie(SubRegIdx, RemIdx) =
5503       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5504           VecVT, SubVecVT, OrigIdx, TRI);
5505 
5506   // If the Idx has been completely eliminated then this is a subvector extract
5507   // which naturally aligns to a vector register. These can easily be handled
5508   // using subregister manipulation.
5509   if (RemIdx == 0)
5510     return Op;
5511 
5512   // Else we must shift our vector register directly to extract the subvector.
5513   // Do this using VSLIDEDOWN.
5514 
5515   // If the vector type is an LMUL-group type, extract a subvector equal to the
5516   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5517   // instruction.
5518   MVT InterSubVT = VecVT;
5519   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5520     InterSubVT = getLMUL1VT(VecVT);
5521     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5522                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5523   }
5524 
5525   // Slide this vector register down by the desired number of elements in order
5526   // to place the desired subvector starting at element 0.
5527   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5528   // For scalable vectors this must be further multiplied by vscale.
5529   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5530 
5531   SDValue Mask, VL;
5532   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5533   SDValue Slidedown =
5534       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5535                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5536 
5537   // Now the vector is in the right position, extract our final subvector. This
5538   // should resolve to a COPY.
5539   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5540                           DAG.getConstant(0, DL, XLenVT));
5541 
5542   // We might have bitcast from a mask type: cast back to the original type if
5543   // required.
5544   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5545 }
5546 
5547 // Lower step_vector to the vid instruction. Any non-identity step value must
5548 // be accounted for my manual expansion.
5549 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5550                                               SelectionDAG &DAG) const {
5551   SDLoc DL(Op);
5552   MVT VT = Op.getSimpleValueType();
5553   MVT XLenVT = Subtarget.getXLenVT();
5554   SDValue Mask, VL;
5555   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5556   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5557   uint64_t StepValImm = Op.getConstantOperandVal(0);
5558   if (StepValImm != 1) {
5559     if (isPowerOf2_64(StepValImm)) {
5560       SDValue StepVal =
5561           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5562                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5563       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5564     } else {
5565       SDValue StepVal = lowerScalarSplat(
5566           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5567           VL, VT, DL, DAG, Subtarget);
5568       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5569     }
5570   }
5571   return StepVec;
5572 }
5573 
5574 // Implement vector_reverse using vrgather.vv with indices determined by
5575 // subtracting the id of each element from (VLMAX-1). This will convert
5576 // the indices like so:
5577 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5578 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5579 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5580                                                  SelectionDAG &DAG) const {
5581   SDLoc DL(Op);
5582   MVT VecVT = Op.getSimpleValueType();
5583   unsigned EltSize = VecVT.getScalarSizeInBits();
5584   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5585 
5586   unsigned MaxVLMAX = 0;
5587   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5588   if (VectorBitsMax != 0)
5589     MaxVLMAX =
5590         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5591 
5592   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5593   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5594 
5595   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5596   // to use vrgatherei16.vv.
5597   // TODO: It's also possible to use vrgatherei16.vv for other types to
5598   // decrease register width for the index calculation.
5599   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5600     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5601     // Reverse each half, then reassemble them in reverse order.
5602     // NOTE: It's also possible that after splitting that VLMAX no longer
5603     // requires vrgatherei16.vv.
5604     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5605       SDValue Lo, Hi;
5606       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5607       EVT LoVT, HiVT;
5608       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5609       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5610       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5611       // Reassemble the low and high pieces reversed.
5612       // FIXME: This is a CONCAT_VECTORS.
5613       SDValue Res =
5614           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5615                       DAG.getIntPtrConstant(0, DL));
5616       return DAG.getNode(
5617           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5618           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5619     }
5620 
5621     // Just promote the int type to i16 which will double the LMUL.
5622     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5623     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5624   }
5625 
5626   MVT XLenVT = Subtarget.getXLenVT();
5627   SDValue Mask, VL;
5628   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5629 
5630   // Calculate VLMAX-1 for the desired SEW.
5631   unsigned MinElts = VecVT.getVectorMinNumElements();
5632   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5633                               DAG.getConstant(MinElts, DL, XLenVT));
5634   SDValue VLMinus1 =
5635       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5636 
5637   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5638   bool IsRV32E64 =
5639       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5640   SDValue SplatVL;
5641   if (!IsRV32E64)
5642     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5643   else
5644     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5645                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5646 
5647   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5648   SDValue Indices =
5649       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5650 
5651   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5652 }
5653 
5654 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5655                                                 SelectionDAG &DAG) const {
5656   SDLoc DL(Op);
5657   SDValue V1 = Op.getOperand(0);
5658   SDValue V2 = Op.getOperand(1);
5659   MVT XLenVT = Subtarget.getXLenVT();
5660   MVT VecVT = Op.getSimpleValueType();
5661 
5662   unsigned MinElts = VecVT.getVectorMinNumElements();
5663   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5664                               DAG.getConstant(MinElts, DL, XLenVT));
5665 
5666   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5667   SDValue DownOffset, UpOffset;
5668   if (ImmValue >= 0) {
5669     // The operand is a TargetConstant, we need to rebuild it as a regular
5670     // constant.
5671     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5672     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5673   } else {
5674     // The operand is a TargetConstant, we need to rebuild it as a regular
5675     // constant rather than negating the original operand.
5676     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5677     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5678   }
5679 
5680   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
5681 
5682   SDValue SlideDown =
5683       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5684                   DownOffset, TrueMask, UpOffset);
5685   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5686                      TrueMask,
5687                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5688 }
5689 
5690 SDValue
5691 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5692                                                      SelectionDAG &DAG) const {
5693   SDLoc DL(Op);
5694   auto *Load = cast<LoadSDNode>(Op);
5695 
5696   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5697                                         Load->getMemoryVT(),
5698                                         *Load->getMemOperand()) &&
5699          "Expecting a correctly-aligned load");
5700 
5701   MVT VT = Op.getSimpleValueType();
5702   MVT XLenVT = Subtarget.getXLenVT();
5703   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5704 
5705   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5706 
5707   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5708   SDValue IntID = DAG.getTargetConstant(
5709       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5710   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5711   if (!IsMaskOp)
5712     Ops.push_back(DAG.getUNDEF(ContainerVT));
5713   Ops.push_back(Load->getBasePtr());
5714   Ops.push_back(VL);
5715   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5716   SDValue NewLoad =
5717       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5718                               Load->getMemoryVT(), Load->getMemOperand());
5719 
5720   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5721   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5722 }
5723 
5724 SDValue
5725 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5726                                                       SelectionDAG &DAG) const {
5727   SDLoc DL(Op);
5728   auto *Store = cast<StoreSDNode>(Op);
5729 
5730   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5731                                         Store->getMemoryVT(),
5732                                         *Store->getMemOperand()) &&
5733          "Expecting a correctly-aligned store");
5734 
5735   SDValue StoreVal = Store->getValue();
5736   MVT VT = StoreVal.getSimpleValueType();
5737   MVT XLenVT = Subtarget.getXLenVT();
5738 
5739   // If the size less than a byte, we need to pad with zeros to make a byte.
5740   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5741     VT = MVT::v8i1;
5742     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5743                            DAG.getConstant(0, DL, VT), StoreVal,
5744                            DAG.getIntPtrConstant(0, DL));
5745   }
5746 
5747   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5748 
5749   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5750 
5751   SDValue NewValue =
5752       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5753 
5754   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5755   SDValue IntID = DAG.getTargetConstant(
5756       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5757   return DAG.getMemIntrinsicNode(
5758       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5759       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5760       Store->getMemoryVT(), Store->getMemOperand());
5761 }
5762 
5763 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5764                                              SelectionDAG &DAG) const {
5765   SDLoc DL(Op);
5766   MVT VT = Op.getSimpleValueType();
5767 
5768   const auto *MemSD = cast<MemSDNode>(Op);
5769   EVT MemVT = MemSD->getMemoryVT();
5770   MachineMemOperand *MMO = MemSD->getMemOperand();
5771   SDValue Chain = MemSD->getChain();
5772   SDValue BasePtr = MemSD->getBasePtr();
5773 
5774   SDValue Mask, PassThru, VL;
5775   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5776     Mask = VPLoad->getMask();
5777     PassThru = DAG.getUNDEF(VT);
5778     VL = VPLoad->getVectorLength();
5779   } else {
5780     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5781     Mask = MLoad->getMask();
5782     PassThru = MLoad->getPassThru();
5783   }
5784 
5785   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5786 
5787   MVT XLenVT = Subtarget.getXLenVT();
5788 
5789   MVT ContainerVT = VT;
5790   if (VT.isFixedLengthVector()) {
5791     ContainerVT = getContainerForFixedLengthVector(VT);
5792     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5793     if (!IsUnmasked) {
5794       MVT MaskVT = getMaskTypeFor(ContainerVT);
5795       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5796     }
5797   }
5798 
5799   if (!VL)
5800     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5801 
5802   unsigned IntID =
5803       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5804   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5805   if (IsUnmasked)
5806     Ops.push_back(DAG.getUNDEF(ContainerVT));
5807   else
5808     Ops.push_back(PassThru);
5809   Ops.push_back(BasePtr);
5810   if (!IsUnmasked)
5811     Ops.push_back(Mask);
5812   Ops.push_back(VL);
5813   if (!IsUnmasked)
5814     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5815 
5816   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5817 
5818   SDValue Result =
5819       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5820   Chain = Result.getValue(1);
5821 
5822   if (VT.isFixedLengthVector())
5823     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5824 
5825   return DAG.getMergeValues({Result, Chain}, DL);
5826 }
5827 
5828 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5829                                               SelectionDAG &DAG) const {
5830   SDLoc DL(Op);
5831 
5832   const auto *MemSD = cast<MemSDNode>(Op);
5833   EVT MemVT = MemSD->getMemoryVT();
5834   MachineMemOperand *MMO = MemSD->getMemOperand();
5835   SDValue Chain = MemSD->getChain();
5836   SDValue BasePtr = MemSD->getBasePtr();
5837   SDValue Val, Mask, VL;
5838 
5839   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5840     Val = VPStore->getValue();
5841     Mask = VPStore->getMask();
5842     VL = VPStore->getVectorLength();
5843   } else {
5844     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5845     Val = MStore->getValue();
5846     Mask = MStore->getMask();
5847   }
5848 
5849   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5850 
5851   MVT VT = Val.getSimpleValueType();
5852   MVT XLenVT = Subtarget.getXLenVT();
5853 
5854   MVT ContainerVT = VT;
5855   if (VT.isFixedLengthVector()) {
5856     ContainerVT = getContainerForFixedLengthVector(VT);
5857 
5858     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5859     if (!IsUnmasked) {
5860       MVT MaskVT = getMaskTypeFor(ContainerVT);
5861       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5862     }
5863   }
5864 
5865   if (!VL)
5866     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5867 
5868   unsigned IntID =
5869       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5870   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5871   Ops.push_back(Val);
5872   Ops.push_back(BasePtr);
5873   if (!IsUnmasked)
5874     Ops.push_back(Mask);
5875   Ops.push_back(VL);
5876 
5877   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5878                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5879 }
5880 
5881 SDValue
5882 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5883                                                       SelectionDAG &DAG) const {
5884   MVT InVT = Op.getOperand(0).getSimpleValueType();
5885   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5886 
5887   MVT VT = Op.getSimpleValueType();
5888 
5889   SDValue Op1 =
5890       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5891   SDValue Op2 =
5892       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5893 
5894   SDLoc DL(Op);
5895   SDValue VL =
5896       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5897 
5898   MVT MaskVT = getMaskTypeFor(ContainerVT);
5899   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
5900 
5901   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5902                             Op.getOperand(2), Mask, VL);
5903 
5904   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5905 }
5906 
5907 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5908     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5909   MVT VT = Op.getSimpleValueType();
5910 
5911   if (VT.getVectorElementType() == MVT::i1)
5912     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5913 
5914   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5915 }
5916 
5917 SDValue
5918 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5919                                                       SelectionDAG &DAG) const {
5920   unsigned Opc;
5921   switch (Op.getOpcode()) {
5922   default: llvm_unreachable("Unexpected opcode!");
5923   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5924   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5925   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5926   }
5927 
5928   return lowerToScalableOp(Op, DAG, Opc);
5929 }
5930 
5931 // Lower vector ABS to smax(X, sub(0, X)).
5932 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5933   SDLoc DL(Op);
5934   MVT VT = Op.getSimpleValueType();
5935   SDValue X = Op.getOperand(0);
5936 
5937   assert(VT.isFixedLengthVector() && "Unexpected type");
5938 
5939   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5940   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5941 
5942   SDValue Mask, VL;
5943   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5944 
5945   SDValue SplatZero = DAG.getNode(
5946       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5947       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5948   SDValue NegX =
5949       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5950   SDValue Max =
5951       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5952 
5953   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5954 }
5955 
5956 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5957     SDValue Op, SelectionDAG &DAG) const {
5958   SDLoc DL(Op);
5959   MVT VT = Op.getSimpleValueType();
5960   SDValue Mag = Op.getOperand(0);
5961   SDValue Sign = Op.getOperand(1);
5962   assert(Mag.getValueType() == Sign.getValueType() &&
5963          "Can only handle COPYSIGN with matching types.");
5964 
5965   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5966   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5967   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5968 
5969   SDValue Mask, VL;
5970   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5971 
5972   SDValue CopySign =
5973       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5974 
5975   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5976 }
5977 
5978 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5979     SDValue Op, SelectionDAG &DAG) const {
5980   MVT VT = Op.getSimpleValueType();
5981   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5982 
5983   MVT I1ContainerVT =
5984       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5985 
5986   SDValue CC =
5987       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5988   SDValue Op1 =
5989       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5990   SDValue Op2 =
5991       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5992 
5993   SDLoc DL(Op);
5994   SDValue Mask, VL;
5995   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5996 
5997   SDValue Select =
5998       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5999 
6000   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6001 }
6002 
6003 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6004                                                unsigned NewOpc,
6005                                                bool HasMask) const {
6006   MVT VT = Op.getSimpleValueType();
6007   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6008 
6009   // Create list of operands by converting existing ones to scalable types.
6010   SmallVector<SDValue, 6> Ops;
6011   for (const SDValue &V : Op->op_values()) {
6012     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6013 
6014     // Pass through non-vector operands.
6015     if (!V.getValueType().isVector()) {
6016       Ops.push_back(V);
6017       continue;
6018     }
6019 
6020     // "cast" fixed length vector to a scalable vector.
6021     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6022            "Only fixed length vectors are supported!");
6023     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6024   }
6025 
6026   SDLoc DL(Op);
6027   SDValue Mask, VL;
6028   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6029   if (HasMask)
6030     Ops.push_back(Mask);
6031   Ops.push_back(VL);
6032 
6033   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6034   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6035 }
6036 
6037 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6038 // * Operands of each node are assumed to be in the same order.
6039 // * The EVL operand is promoted from i32 to i64 on RV64.
6040 // * Fixed-length vectors are converted to their scalable-vector container
6041 //   types.
6042 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6043                                        unsigned RISCVISDOpc) const {
6044   SDLoc DL(Op);
6045   MVT VT = Op.getSimpleValueType();
6046   SmallVector<SDValue, 4> Ops;
6047 
6048   for (const auto &OpIdx : enumerate(Op->ops())) {
6049     SDValue V = OpIdx.value();
6050     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6051     // Pass through operands which aren't fixed-length vectors.
6052     if (!V.getValueType().isFixedLengthVector()) {
6053       Ops.push_back(V);
6054       continue;
6055     }
6056     // "cast" fixed length vector to a scalable vector.
6057     MVT OpVT = V.getSimpleValueType();
6058     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6059     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6060            "Only fixed length vectors are supported!");
6061     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6062   }
6063 
6064   if (!VT.isFixedLengthVector())
6065     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6066 
6067   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6068 
6069   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6070 
6071   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6072 }
6073 
6074 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6075                                               SelectionDAG &DAG) const {
6076   SDLoc DL(Op);
6077   MVT VT = Op.getSimpleValueType();
6078 
6079   SDValue Src = Op.getOperand(0);
6080   // NOTE: Mask is dropped.
6081   SDValue VL = Op.getOperand(2);
6082 
6083   MVT ContainerVT = VT;
6084   if (VT.isFixedLengthVector()) {
6085     ContainerVT = getContainerForFixedLengthVector(VT);
6086     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6087     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6088   }
6089 
6090   MVT XLenVT = Subtarget.getXLenVT();
6091   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6092   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6093                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6094 
6095   SDValue SplatValue = DAG.getConstant(
6096       Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
6097   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6098                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6099 
6100   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6101                                Splat, ZeroSplat, VL);
6102   if (!VT.isFixedLengthVector())
6103     return Result;
6104   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6105 }
6106 
6107 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
6108                                                 SelectionDAG &DAG) const {
6109   SDLoc DL(Op);
6110   MVT VT = Op.getSimpleValueType();
6111 
6112   SDValue Op1 = Op.getOperand(0);
6113   SDValue Op2 = Op.getOperand(1);
6114   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6115   // NOTE: Mask is dropped.
6116   SDValue VL = Op.getOperand(4);
6117 
6118   MVT ContainerVT = VT;
6119   if (VT.isFixedLengthVector()) {
6120     ContainerVT = getContainerForFixedLengthVector(VT);
6121     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6122     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6123   }
6124 
6125   SDValue Result;
6126   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
6127 
6128   switch (Condition) {
6129   default:
6130     break;
6131   // X != Y  --> (X^Y)
6132   case ISD::SETNE:
6133     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6134     break;
6135   // X == Y  --> ~(X^Y)
6136   case ISD::SETEQ: {
6137     SDValue Temp =
6138         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6139     Result =
6140         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
6141     break;
6142   }
6143   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
6144   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
6145   case ISD::SETGT:
6146   case ISD::SETULT: {
6147     SDValue Temp =
6148         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6149     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
6150     break;
6151   }
6152   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
6153   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
6154   case ISD::SETLT:
6155   case ISD::SETUGT: {
6156     SDValue Temp =
6157         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6158     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
6159     break;
6160   }
6161   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
6162   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
6163   case ISD::SETGE:
6164   case ISD::SETULE: {
6165     SDValue Temp =
6166         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6167     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
6168     break;
6169   }
6170   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
6171   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
6172   case ISD::SETLE:
6173   case ISD::SETUGE: {
6174     SDValue Temp =
6175         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6176     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
6177     break;
6178   }
6179   }
6180 
6181   if (!VT.isFixedLengthVector())
6182     return Result;
6183   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6184 }
6185 
6186 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6187 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6188                                                 unsigned RISCVISDOpc) const {
6189   SDLoc DL(Op);
6190 
6191   SDValue Src = Op.getOperand(0);
6192   SDValue Mask = Op.getOperand(1);
6193   SDValue VL = Op.getOperand(2);
6194 
6195   MVT DstVT = Op.getSimpleValueType();
6196   MVT SrcVT = Src.getSimpleValueType();
6197   if (DstVT.isFixedLengthVector()) {
6198     DstVT = getContainerForFixedLengthVector(DstVT);
6199     SrcVT = getContainerForFixedLengthVector(SrcVT);
6200     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6201     MVT MaskVT = getMaskTypeFor(DstVT);
6202     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6203   }
6204 
6205   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6206                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6207                                 ? RISCVISD::VSEXT_VL
6208                                 : RISCVISD::VZEXT_VL;
6209 
6210   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6211   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6212 
6213   SDValue Result;
6214   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6215     if (SrcVT.isInteger()) {
6216       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6217 
6218       // Do we need to do any pre-widening before converting?
6219       if (SrcEltSize == 1) {
6220         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6221         MVT XLenVT = Subtarget.getXLenVT();
6222         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6223         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6224                                         DAG.getUNDEF(IntVT), Zero, VL);
6225         SDValue One = DAG.getConstant(
6226             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6227         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6228                                        DAG.getUNDEF(IntVT), One, VL);
6229         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6230                           ZeroSplat, VL);
6231       } else if (DstEltSize > (2 * SrcEltSize)) {
6232         // Widen before converting.
6233         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6234                                      DstVT.getVectorElementCount());
6235         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6236       }
6237 
6238       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6239     } else {
6240       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6241              "Wrong input/output vector types");
6242 
6243       // Convert f16 to f32 then convert f32 to i64.
6244       if (DstEltSize > (2 * SrcEltSize)) {
6245         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6246         MVT InterimFVT =
6247             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6248         Src =
6249             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6250       }
6251 
6252       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6253     }
6254   } else { // Narrowing + Conversion
6255     if (SrcVT.isInteger()) {
6256       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6257       // First do a narrowing convert to an FP type half the size, then round
6258       // the FP type to a small FP type if needed.
6259 
6260       MVT InterimFVT = DstVT;
6261       if (SrcEltSize > (2 * DstEltSize)) {
6262         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6263         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6264         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6265       }
6266 
6267       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6268 
6269       if (InterimFVT != DstVT) {
6270         Src = Result;
6271         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6272       }
6273     } else {
6274       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6275              "Wrong input/output vector types");
6276       // First do a narrowing conversion to an integer half the size, then
6277       // truncate if needed.
6278 
6279       if (DstEltSize == 1) {
6280         // First convert to the same size integer, then convert to mask using
6281         // setcc.
6282         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6283         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6284                                           DstVT.getVectorElementCount());
6285         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6286 
6287         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6288         // otherwise the conversion was undefined.
6289         MVT XLenVT = Subtarget.getXLenVT();
6290         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6291         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6292                                 DAG.getUNDEF(InterimIVT), SplatZero);
6293         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6294                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6295       } else {
6296         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6297                                           DstVT.getVectorElementCount());
6298 
6299         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6300 
6301         while (InterimIVT != DstVT) {
6302           SrcEltSize /= 2;
6303           Src = Result;
6304           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6305                                         DstVT.getVectorElementCount());
6306           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6307                                Src, Mask, VL);
6308         }
6309       }
6310     }
6311   }
6312 
6313   MVT VT = Op.getSimpleValueType();
6314   if (!VT.isFixedLengthVector())
6315     return Result;
6316   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6317 }
6318 
6319 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6320                                             unsigned MaskOpc,
6321                                             unsigned VecOpc) const {
6322   MVT VT = Op.getSimpleValueType();
6323   if (VT.getVectorElementType() != MVT::i1)
6324     return lowerVPOp(Op, DAG, VecOpc);
6325 
6326   // It is safe to drop mask parameter as masked-off elements are undef.
6327   SDValue Op1 = Op->getOperand(0);
6328   SDValue Op2 = Op->getOperand(1);
6329   SDValue VL = Op->getOperand(3);
6330 
6331   MVT ContainerVT = VT;
6332   const bool IsFixed = VT.isFixedLengthVector();
6333   if (IsFixed) {
6334     ContainerVT = getContainerForFixedLengthVector(VT);
6335     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6336     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6337   }
6338 
6339   SDLoc DL(Op);
6340   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6341   if (!IsFixed)
6342     return Val;
6343   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6344 }
6345 
6346 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6347 // matched to a RVV indexed load. The RVV indexed load instructions only
6348 // support the "unsigned unscaled" addressing mode; indices are implicitly
6349 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6350 // signed or scaled indexing is extended to the XLEN value type and scaled
6351 // accordingly.
6352 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6353                                                SelectionDAG &DAG) const {
6354   SDLoc DL(Op);
6355   MVT VT = Op.getSimpleValueType();
6356 
6357   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6358   EVT MemVT = MemSD->getMemoryVT();
6359   MachineMemOperand *MMO = MemSD->getMemOperand();
6360   SDValue Chain = MemSD->getChain();
6361   SDValue BasePtr = MemSD->getBasePtr();
6362 
6363   ISD::LoadExtType LoadExtType;
6364   SDValue Index, Mask, PassThru, VL;
6365 
6366   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6367     Index = VPGN->getIndex();
6368     Mask = VPGN->getMask();
6369     PassThru = DAG.getUNDEF(VT);
6370     VL = VPGN->getVectorLength();
6371     // VP doesn't support extending loads.
6372     LoadExtType = ISD::NON_EXTLOAD;
6373   } else {
6374     // Else it must be a MGATHER.
6375     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6376     Index = MGN->getIndex();
6377     Mask = MGN->getMask();
6378     PassThru = MGN->getPassThru();
6379     LoadExtType = MGN->getExtensionType();
6380   }
6381 
6382   MVT IndexVT = Index.getSimpleValueType();
6383   MVT XLenVT = Subtarget.getXLenVT();
6384 
6385   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6386          "Unexpected VTs!");
6387   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6388   // Targets have to explicitly opt-in for extending vector loads.
6389   assert(LoadExtType == ISD::NON_EXTLOAD &&
6390          "Unexpected extending MGATHER/VP_GATHER");
6391   (void)LoadExtType;
6392 
6393   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6394   // the selection of the masked intrinsics doesn't do this for us.
6395   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6396 
6397   MVT ContainerVT = VT;
6398   if (VT.isFixedLengthVector()) {
6399     // We need to use the larger of the result and index type to determine the
6400     // scalable type to use so we don't increase LMUL for any operand/result.
6401     if (VT.bitsGE(IndexVT)) {
6402       ContainerVT = getContainerForFixedLengthVector(VT);
6403       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6404                                  ContainerVT.getVectorElementCount());
6405     } else {
6406       IndexVT = getContainerForFixedLengthVector(IndexVT);
6407       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6408                                      IndexVT.getVectorElementCount());
6409     }
6410 
6411     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6412 
6413     if (!IsUnmasked) {
6414       MVT MaskVT = getMaskTypeFor(ContainerVT);
6415       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6416       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6417     }
6418   }
6419 
6420   if (!VL)
6421     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6422 
6423   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6424     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6425     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6426                                    VL);
6427     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6428                         TrueMask, VL);
6429   }
6430 
6431   unsigned IntID =
6432       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6433   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6434   if (IsUnmasked)
6435     Ops.push_back(DAG.getUNDEF(ContainerVT));
6436   else
6437     Ops.push_back(PassThru);
6438   Ops.push_back(BasePtr);
6439   Ops.push_back(Index);
6440   if (!IsUnmasked)
6441     Ops.push_back(Mask);
6442   Ops.push_back(VL);
6443   if (!IsUnmasked)
6444     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6445 
6446   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6447   SDValue Result =
6448       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6449   Chain = Result.getValue(1);
6450 
6451   if (VT.isFixedLengthVector())
6452     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6453 
6454   return DAG.getMergeValues({Result, Chain}, DL);
6455 }
6456 
6457 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6458 // matched to a RVV indexed store. The RVV indexed store instructions only
6459 // support the "unsigned unscaled" addressing mode; indices are implicitly
6460 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6461 // signed or scaled indexing is extended to the XLEN value type and scaled
6462 // accordingly.
6463 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6464                                                 SelectionDAG &DAG) const {
6465   SDLoc DL(Op);
6466   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6467   EVT MemVT = MemSD->getMemoryVT();
6468   MachineMemOperand *MMO = MemSD->getMemOperand();
6469   SDValue Chain = MemSD->getChain();
6470   SDValue BasePtr = MemSD->getBasePtr();
6471 
6472   bool IsTruncatingStore = false;
6473   SDValue Index, Mask, Val, VL;
6474 
6475   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6476     Index = VPSN->getIndex();
6477     Mask = VPSN->getMask();
6478     Val = VPSN->getValue();
6479     VL = VPSN->getVectorLength();
6480     // VP doesn't support truncating stores.
6481     IsTruncatingStore = false;
6482   } else {
6483     // Else it must be a MSCATTER.
6484     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6485     Index = MSN->getIndex();
6486     Mask = MSN->getMask();
6487     Val = MSN->getValue();
6488     IsTruncatingStore = MSN->isTruncatingStore();
6489   }
6490 
6491   MVT VT = Val.getSimpleValueType();
6492   MVT IndexVT = Index.getSimpleValueType();
6493   MVT XLenVT = Subtarget.getXLenVT();
6494 
6495   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6496          "Unexpected VTs!");
6497   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6498   // Targets have to explicitly opt-in for extending vector loads and
6499   // truncating vector stores.
6500   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6501   (void)IsTruncatingStore;
6502 
6503   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6504   // the selection of the masked intrinsics doesn't do this for us.
6505   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6506 
6507   MVT ContainerVT = VT;
6508   if (VT.isFixedLengthVector()) {
6509     // We need to use the larger of the value and index type to determine the
6510     // scalable type to use so we don't increase LMUL for any operand/result.
6511     if (VT.bitsGE(IndexVT)) {
6512       ContainerVT = getContainerForFixedLengthVector(VT);
6513       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6514                                  ContainerVT.getVectorElementCount());
6515     } else {
6516       IndexVT = getContainerForFixedLengthVector(IndexVT);
6517       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6518                                      IndexVT.getVectorElementCount());
6519     }
6520 
6521     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6522     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6523 
6524     if (!IsUnmasked) {
6525       MVT MaskVT = getMaskTypeFor(ContainerVT);
6526       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6527     }
6528   }
6529 
6530   if (!VL)
6531     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6532 
6533   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6534     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6535     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6536                                    VL);
6537     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6538                         TrueMask, VL);
6539   }
6540 
6541   unsigned IntID =
6542       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6543   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6544   Ops.push_back(Val);
6545   Ops.push_back(BasePtr);
6546   Ops.push_back(Index);
6547   if (!IsUnmasked)
6548     Ops.push_back(Mask);
6549   Ops.push_back(VL);
6550 
6551   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6552                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6553 }
6554 
6555 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6556                                                SelectionDAG &DAG) const {
6557   const MVT XLenVT = Subtarget.getXLenVT();
6558   SDLoc DL(Op);
6559   SDValue Chain = Op->getOperand(0);
6560   SDValue SysRegNo = DAG.getTargetConstant(
6561       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6562   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6563   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6564 
6565   // Encoding used for rounding mode in RISCV differs from that used in
6566   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6567   // table, which consists of a sequence of 4-bit fields, each representing
6568   // corresponding FLT_ROUNDS mode.
6569   static const int Table =
6570       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6571       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6572       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6573       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6574       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6575 
6576   SDValue Shift =
6577       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6578   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6579                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6580   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6581                                DAG.getConstant(7, DL, XLenVT));
6582 
6583   return DAG.getMergeValues({Masked, Chain}, DL);
6584 }
6585 
6586 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6587                                                SelectionDAG &DAG) const {
6588   const MVT XLenVT = Subtarget.getXLenVT();
6589   SDLoc DL(Op);
6590   SDValue Chain = Op->getOperand(0);
6591   SDValue RMValue = Op->getOperand(1);
6592   SDValue SysRegNo = DAG.getTargetConstant(
6593       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6594 
6595   // Encoding used for rounding mode in RISCV differs from that used in
6596   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6597   // a table, which consists of a sequence of 4-bit fields, each representing
6598   // corresponding RISCV mode.
6599   static const unsigned Table =
6600       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6601       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6602       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6603       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6604       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6605 
6606   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6607                               DAG.getConstant(2, DL, XLenVT));
6608   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6609                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6610   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6611                         DAG.getConstant(0x7, DL, XLenVT));
6612   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6613                      RMValue);
6614 }
6615 
6616 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6617   switch (IntNo) {
6618   default:
6619     llvm_unreachable("Unexpected Intrinsic");
6620   case Intrinsic::riscv_bcompress:
6621     return RISCVISD::BCOMPRESSW;
6622   case Intrinsic::riscv_bdecompress:
6623     return RISCVISD::BDECOMPRESSW;
6624   case Intrinsic::riscv_bfp:
6625     return RISCVISD::BFPW;
6626   case Intrinsic::riscv_fsl:
6627     return RISCVISD::FSLW;
6628   case Intrinsic::riscv_fsr:
6629     return RISCVISD::FSRW;
6630   }
6631 }
6632 
6633 // Converts the given intrinsic to a i64 operation with any extension.
6634 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6635                                          unsigned IntNo) {
6636   SDLoc DL(N);
6637   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6638   // Deal with the Instruction Operands
6639   SmallVector<SDValue, 3> NewOps;
6640   for (SDValue Op : drop_begin(N->ops()))
6641     // Promote the operand to i64 type
6642     NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op));
6643   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps);
6644   // ReplaceNodeResults requires we maintain the same type for the return value.
6645   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6646 }
6647 
6648 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6649 // form of the given Opcode.
6650 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6651   switch (Opcode) {
6652   default:
6653     llvm_unreachable("Unexpected opcode");
6654   case ISD::SHL:
6655     return RISCVISD::SLLW;
6656   case ISD::SRA:
6657     return RISCVISD::SRAW;
6658   case ISD::SRL:
6659     return RISCVISD::SRLW;
6660   case ISD::SDIV:
6661     return RISCVISD::DIVW;
6662   case ISD::UDIV:
6663     return RISCVISD::DIVUW;
6664   case ISD::UREM:
6665     return RISCVISD::REMUW;
6666   case ISD::ROTL:
6667     return RISCVISD::ROLW;
6668   case ISD::ROTR:
6669     return RISCVISD::RORW;
6670   }
6671 }
6672 
6673 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6674 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6675 // otherwise be promoted to i64, making it difficult to select the
6676 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6677 // type i8/i16/i32 is lost.
6678 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6679                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6680   SDLoc DL(N);
6681   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6682   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6683   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6684   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6685   // ReplaceNodeResults requires we maintain the same type for the return value.
6686   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6687 }
6688 
6689 // Converts the given 32-bit operation to a i64 operation with signed extension
6690 // semantic to reduce the signed extension instructions.
6691 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6692   SDLoc DL(N);
6693   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6694   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6695   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6696   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6697                                DAG.getValueType(MVT::i32));
6698   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6699 }
6700 
6701 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6702                                              SmallVectorImpl<SDValue> &Results,
6703                                              SelectionDAG &DAG) const {
6704   SDLoc DL(N);
6705   switch (N->getOpcode()) {
6706   default:
6707     llvm_unreachable("Don't know how to custom type legalize this operation!");
6708   case ISD::STRICT_FP_TO_SINT:
6709   case ISD::STRICT_FP_TO_UINT:
6710   case ISD::FP_TO_SINT:
6711   case ISD::FP_TO_UINT: {
6712     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6713            "Unexpected custom legalisation");
6714     bool IsStrict = N->isStrictFPOpcode();
6715     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6716                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6717     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6718     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6719         TargetLowering::TypeSoftenFloat) {
6720       if (!isTypeLegal(Op0.getValueType()))
6721         return;
6722       if (IsStrict) {
6723         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6724                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6725         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6726         SDValue Res = DAG.getNode(
6727             Opc, DL, VTs, N->getOperand(0), Op0,
6728             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6729         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6730         Results.push_back(Res.getValue(1));
6731         return;
6732       }
6733       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6734       SDValue Res =
6735           DAG.getNode(Opc, DL, MVT::i64, Op0,
6736                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6737       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6738       return;
6739     }
6740     // If the FP type needs to be softened, emit a library call using the 'si'
6741     // version. If we left it to default legalization we'd end up with 'di'. If
6742     // the FP type doesn't need to be softened just let generic type
6743     // legalization promote the result type.
6744     RTLIB::Libcall LC;
6745     if (IsSigned)
6746       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6747     else
6748       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6749     MakeLibCallOptions CallOptions;
6750     EVT OpVT = Op0.getValueType();
6751     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6752     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6753     SDValue Result;
6754     std::tie(Result, Chain) =
6755         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6756     Results.push_back(Result);
6757     if (IsStrict)
6758       Results.push_back(Chain);
6759     break;
6760   }
6761   case ISD::READCYCLECOUNTER: {
6762     assert(!Subtarget.is64Bit() &&
6763            "READCYCLECOUNTER only has custom type legalization on riscv32");
6764 
6765     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6766     SDValue RCW =
6767         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6768 
6769     Results.push_back(
6770         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6771     Results.push_back(RCW.getValue(2));
6772     break;
6773   }
6774   case ISD::MUL: {
6775     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6776     unsigned XLen = Subtarget.getXLen();
6777     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6778     if (Size > XLen) {
6779       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6780       SDValue LHS = N->getOperand(0);
6781       SDValue RHS = N->getOperand(1);
6782       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6783 
6784       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6785       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6786       // We need exactly one side to be unsigned.
6787       if (LHSIsU == RHSIsU)
6788         return;
6789 
6790       auto MakeMULPair = [&](SDValue S, SDValue U) {
6791         MVT XLenVT = Subtarget.getXLenVT();
6792         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6793         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6794         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6795         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6796         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6797       };
6798 
6799       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6800       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6801 
6802       // The other operand should be signed, but still prefer MULH when
6803       // possible.
6804       if (RHSIsU && LHSIsS && !RHSIsS)
6805         Results.push_back(MakeMULPair(LHS, RHS));
6806       else if (LHSIsU && RHSIsS && !LHSIsS)
6807         Results.push_back(MakeMULPair(RHS, LHS));
6808 
6809       return;
6810     }
6811     LLVM_FALLTHROUGH;
6812   }
6813   case ISD::ADD:
6814   case ISD::SUB:
6815     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6816            "Unexpected custom legalisation");
6817     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6818     break;
6819   case ISD::SHL:
6820   case ISD::SRA:
6821   case ISD::SRL:
6822     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6823            "Unexpected custom legalisation");
6824     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6825       // If we can use a BSET instruction, allow default promotion to apply.
6826       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
6827           isOneConstant(N->getOperand(0)))
6828         break;
6829       Results.push_back(customLegalizeToWOp(N, DAG));
6830       break;
6831     }
6832 
6833     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6834     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6835     // shift amount.
6836     if (N->getOpcode() == ISD::SHL) {
6837       SDLoc DL(N);
6838       SDValue NewOp0 =
6839           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6840       SDValue NewOp1 =
6841           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6842       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6843       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6844                                    DAG.getValueType(MVT::i32));
6845       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6846     }
6847 
6848     break;
6849   case ISD::ROTL:
6850   case ISD::ROTR:
6851     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6852            "Unexpected custom legalisation");
6853     Results.push_back(customLegalizeToWOp(N, DAG));
6854     break;
6855   case ISD::CTTZ:
6856   case ISD::CTTZ_ZERO_UNDEF:
6857   case ISD::CTLZ:
6858   case ISD::CTLZ_ZERO_UNDEF: {
6859     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6860            "Unexpected custom legalisation");
6861 
6862     SDValue NewOp0 =
6863         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6864     bool IsCTZ =
6865         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6866     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6867     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6868     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6869     return;
6870   }
6871   case ISD::SDIV:
6872   case ISD::UDIV:
6873   case ISD::UREM: {
6874     MVT VT = N->getSimpleValueType(0);
6875     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6876            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6877            "Unexpected custom legalisation");
6878     // Don't promote division/remainder by constant since we should expand those
6879     // to multiply by magic constant.
6880     // FIXME: What if the expansion is disabled for minsize.
6881     if (N->getOperand(1).getOpcode() == ISD::Constant)
6882       return;
6883 
6884     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6885     // the upper 32 bits. For other types we need to sign or zero extend
6886     // based on the opcode.
6887     unsigned ExtOpc = ISD::ANY_EXTEND;
6888     if (VT != MVT::i32)
6889       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6890                                            : ISD::ZERO_EXTEND;
6891 
6892     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6893     break;
6894   }
6895   case ISD::UADDO:
6896   case ISD::USUBO: {
6897     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6898            "Unexpected custom legalisation");
6899     bool IsAdd = N->getOpcode() == ISD::UADDO;
6900     // Create an ADDW or SUBW.
6901     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6902     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6903     SDValue Res =
6904         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6905     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6906                       DAG.getValueType(MVT::i32));
6907 
6908     SDValue Overflow;
6909     if (IsAdd && isOneConstant(RHS)) {
6910       // Special case uaddo X, 1 overflowed if the addition result is 0.
6911       // The general case (X + C) < C is not necessarily beneficial. Although we
6912       // reduce the live range of X, we may introduce the materialization of
6913       // constant C, especially when the setcc result is used by branch. We have
6914       // no compare with constant and branch instructions.
6915       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6916                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6917     } else {
6918       // Sign extend the LHS and perform an unsigned compare with the ADDW
6919       // result. Since the inputs are sign extended from i32, this is equivalent
6920       // to comparing the lower 32 bits.
6921       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6922       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6923                               IsAdd ? ISD::SETULT : ISD::SETUGT);
6924     }
6925 
6926     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6927     Results.push_back(Overflow);
6928     return;
6929   }
6930   case ISD::UADDSAT:
6931   case ISD::USUBSAT: {
6932     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6933            "Unexpected custom legalisation");
6934     if (Subtarget.hasStdExtZbb()) {
6935       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6936       // sign extend allows overflow of the lower 32 bits to be detected on
6937       // the promoted size.
6938       SDValue LHS =
6939           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6940       SDValue RHS =
6941           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6942       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6943       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6944       return;
6945     }
6946 
6947     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6948     // promotion for UADDO/USUBO.
6949     Results.push_back(expandAddSubSat(N, DAG));
6950     return;
6951   }
6952   case ISD::ABS: {
6953     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6954            "Unexpected custom legalisation");
6955           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6956 
6957     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6958 
6959     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6960 
6961     // Freeze the source so we can increase it's use count.
6962     Src = DAG.getFreeze(Src);
6963 
6964     // Copy sign bit to all bits using the sraiw pattern.
6965     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6966                                    DAG.getValueType(MVT::i32));
6967     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6968                            DAG.getConstant(31, DL, MVT::i64));
6969 
6970     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6971     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6972 
6973     // NOTE: The result is only required to be anyextended, but sext is
6974     // consistent with type legalization of sub.
6975     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6976                          DAG.getValueType(MVT::i32));
6977     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6978     return;
6979   }
6980   case ISD::BITCAST: {
6981     EVT VT = N->getValueType(0);
6982     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6983     SDValue Op0 = N->getOperand(0);
6984     EVT Op0VT = Op0.getValueType();
6985     MVT XLenVT = Subtarget.getXLenVT();
6986     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6987       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6988       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6989     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6990                Subtarget.hasStdExtF()) {
6991       SDValue FPConv =
6992           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6993       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6994     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6995                isTypeLegal(Op0VT)) {
6996       // Custom-legalize bitcasts from fixed-length vector types to illegal
6997       // scalar types in order to improve codegen. Bitcast the vector to a
6998       // one-element vector type whose element type is the same as the result
6999       // type, and extract the first element.
7000       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
7001       if (isTypeLegal(BVT)) {
7002         SDValue BVec = DAG.getBitcast(BVT, Op0);
7003         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
7004                                       DAG.getConstant(0, DL, XLenVT)));
7005       }
7006     }
7007     break;
7008   }
7009   case RISCVISD::GREV:
7010   case RISCVISD::GORC:
7011   case RISCVISD::SHFL: {
7012     MVT VT = N->getSimpleValueType(0);
7013     MVT XLenVT = Subtarget.getXLenVT();
7014     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
7015            "Unexpected custom legalisation");
7016     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
7017     assert((Subtarget.hasStdExtZbp() ||
7018             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
7019              N->getConstantOperandVal(1) == 7)) &&
7020            "Unexpected extension");
7021     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7022     SDValue NewOp1 =
7023         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7024     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7025     // ReplaceNodeResults requires we maintain the same type for the return
7026     // value.
7027     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7028     break;
7029   }
7030   case ISD::BSWAP:
7031   case ISD::BITREVERSE: {
7032     MVT VT = N->getSimpleValueType(0);
7033     MVT XLenVT = Subtarget.getXLenVT();
7034     assert((VT == MVT::i8 || VT == MVT::i16 ||
7035             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7036            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7037     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7038     unsigned Imm = VT.getSizeInBits() - 1;
7039     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7040     if (N->getOpcode() == ISD::BSWAP)
7041       Imm &= ~0x7U;
7042     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7043                                 DAG.getConstant(Imm, DL, XLenVT));
7044     // ReplaceNodeResults requires we maintain the same type for the return
7045     // value.
7046     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7047     break;
7048   }
7049   case ISD::FSHL:
7050   case ISD::FSHR: {
7051     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7052            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7053     SDValue NewOp0 =
7054         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7055     SDValue NewOp1 =
7056         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7057     SDValue NewShAmt =
7058         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7059     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7060     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7061     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7062                            DAG.getConstant(0x1f, DL, MVT::i64));
7063     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7064     // instruction use different orders. fshl will return its first operand for
7065     // shift of zero, fshr will return its second operand. fsl and fsr both
7066     // return rs1 so the ISD nodes need to have different operand orders.
7067     // Shift amount is in rs2.
7068     unsigned Opc = RISCVISD::FSLW;
7069     if (N->getOpcode() == ISD::FSHR) {
7070       std::swap(NewOp0, NewOp1);
7071       Opc = RISCVISD::FSRW;
7072     }
7073     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7074     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7075     break;
7076   }
7077   case ISD::EXTRACT_VECTOR_ELT: {
7078     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7079     // type is illegal (currently only vXi64 RV32).
7080     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7081     // transferred to the destination register. We issue two of these from the
7082     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7083     // first element.
7084     SDValue Vec = N->getOperand(0);
7085     SDValue Idx = N->getOperand(1);
7086 
7087     // The vector type hasn't been legalized yet so we can't issue target
7088     // specific nodes if it needs legalization.
7089     // FIXME: We would manually legalize if it's important.
7090     if (!isTypeLegal(Vec.getValueType()))
7091       return;
7092 
7093     MVT VecVT = Vec.getSimpleValueType();
7094 
7095     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7096            VecVT.getVectorElementType() == MVT::i64 &&
7097            "Unexpected EXTRACT_VECTOR_ELT legalization");
7098 
7099     // If this is a fixed vector, we need to convert it to a scalable vector.
7100     MVT ContainerVT = VecVT;
7101     if (VecVT.isFixedLengthVector()) {
7102       ContainerVT = getContainerForFixedLengthVector(VecVT);
7103       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7104     }
7105 
7106     MVT XLenVT = Subtarget.getXLenVT();
7107 
7108     // Use a VL of 1 to avoid processing more elements than we need.
7109     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7110     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
7111 
7112     // Unless the index is known to be 0, we must slide the vector down to get
7113     // the desired element into index 0.
7114     if (!isNullConstant(Idx)) {
7115       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7116                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7117     }
7118 
7119     // Extract the lower XLEN bits of the correct vector element.
7120     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7121 
7122     // To extract the upper XLEN bits of the vector element, shift the first
7123     // element right by 32 bits and re-extract the lower XLEN bits.
7124     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7125                                      DAG.getUNDEF(ContainerVT),
7126                                      DAG.getConstant(32, DL, XLenVT), VL);
7127     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7128                                  ThirtyTwoV, Mask, VL);
7129 
7130     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7131 
7132     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7133     break;
7134   }
7135   case ISD::INTRINSIC_WO_CHAIN: {
7136     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7137     switch (IntNo) {
7138     default:
7139       llvm_unreachable(
7140           "Don't know how to custom type legalize this intrinsic!");
7141     case Intrinsic::riscv_grev:
7142     case Intrinsic::riscv_gorc: {
7143       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7144              "Unexpected custom legalisation");
7145       SDValue NewOp1 =
7146           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7147       SDValue NewOp2 =
7148           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7149       unsigned Opc =
7150           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7151       // If the control is a constant, promote the node by clearing any extra
7152       // bits bits in the control. isel will form greviw/gorciw if the result is
7153       // sign extended.
7154       if (isa<ConstantSDNode>(NewOp2)) {
7155         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7156                              DAG.getConstant(0x1f, DL, MVT::i64));
7157         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7158       }
7159       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7160       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7161       break;
7162     }
7163     case Intrinsic::riscv_bcompress:
7164     case Intrinsic::riscv_bdecompress:
7165     case Intrinsic::riscv_bfp:
7166     case Intrinsic::riscv_fsl:
7167     case Intrinsic::riscv_fsr: {
7168       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7169              "Unexpected custom legalisation");
7170       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7171       break;
7172     }
7173     case Intrinsic::riscv_orc_b: {
7174       // Lower to the GORCI encoding for orc.b with the operand extended.
7175       SDValue NewOp =
7176           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7177       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7178                                 DAG.getConstant(7, DL, MVT::i64));
7179       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7180       return;
7181     }
7182     case Intrinsic::riscv_shfl:
7183     case Intrinsic::riscv_unshfl: {
7184       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7185              "Unexpected custom legalisation");
7186       SDValue NewOp1 =
7187           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7188       SDValue NewOp2 =
7189           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7190       unsigned Opc =
7191           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7192       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7193       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7194       // will be shuffled the same way as the lower 32 bit half, but the two
7195       // halves won't cross.
7196       if (isa<ConstantSDNode>(NewOp2)) {
7197         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7198                              DAG.getConstant(0xf, DL, MVT::i64));
7199         Opc =
7200             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7201       }
7202       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7203       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7204       break;
7205     }
7206     case Intrinsic::riscv_vmv_x_s: {
7207       EVT VT = N->getValueType(0);
7208       MVT XLenVT = Subtarget.getXLenVT();
7209       if (VT.bitsLT(XLenVT)) {
7210         // Simple case just extract using vmv.x.s and truncate.
7211         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7212                                       Subtarget.getXLenVT(), N->getOperand(1));
7213         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7214         return;
7215       }
7216 
7217       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7218              "Unexpected custom legalization");
7219 
7220       // We need to do the move in two steps.
7221       SDValue Vec = N->getOperand(1);
7222       MVT VecVT = Vec.getSimpleValueType();
7223 
7224       // First extract the lower XLEN bits of the element.
7225       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7226 
7227       // To extract the upper XLEN bits of the vector element, shift the first
7228       // element right by 32 bits and re-extract the lower XLEN bits.
7229       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7230       SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
7231 
7232       SDValue ThirtyTwoV =
7233           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7234                       DAG.getConstant(32, DL, XLenVT), VL);
7235       SDValue LShr32 =
7236           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7237       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7238 
7239       Results.push_back(
7240           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7241       break;
7242     }
7243     }
7244     break;
7245   }
7246   case ISD::VECREDUCE_ADD:
7247   case ISD::VECREDUCE_AND:
7248   case ISD::VECREDUCE_OR:
7249   case ISD::VECREDUCE_XOR:
7250   case ISD::VECREDUCE_SMAX:
7251   case ISD::VECREDUCE_UMAX:
7252   case ISD::VECREDUCE_SMIN:
7253   case ISD::VECREDUCE_UMIN:
7254     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7255       Results.push_back(V);
7256     break;
7257   case ISD::VP_REDUCE_ADD:
7258   case ISD::VP_REDUCE_AND:
7259   case ISD::VP_REDUCE_OR:
7260   case ISD::VP_REDUCE_XOR:
7261   case ISD::VP_REDUCE_SMAX:
7262   case ISD::VP_REDUCE_UMAX:
7263   case ISD::VP_REDUCE_SMIN:
7264   case ISD::VP_REDUCE_UMIN:
7265     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7266       Results.push_back(V);
7267     break;
7268   case ISD::FLT_ROUNDS_: {
7269     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7270     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7271     Results.push_back(Res.getValue(0));
7272     Results.push_back(Res.getValue(1));
7273     break;
7274   }
7275   }
7276 }
7277 
7278 // A structure to hold one of the bit-manipulation patterns below. Together, a
7279 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7280 //   (or (and (shl x, 1), 0xAAAAAAAA),
7281 //       (and (srl x, 1), 0x55555555))
7282 struct RISCVBitmanipPat {
7283   SDValue Op;
7284   unsigned ShAmt;
7285   bool IsSHL;
7286 
7287   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7288     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7289   }
7290 };
7291 
7292 // Matches patterns of the form
7293 //   (and (shl x, C2), (C1 << C2))
7294 //   (and (srl x, C2), C1)
7295 //   (shl (and x, C1), C2)
7296 //   (srl (and x, (C1 << C2)), C2)
7297 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7298 // The expected masks for each shift amount are specified in BitmanipMasks where
7299 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7300 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7301 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7302 // XLen is 64.
7303 static Optional<RISCVBitmanipPat>
7304 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7305   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7306          "Unexpected number of masks");
7307   Optional<uint64_t> Mask;
7308   // Optionally consume a mask around the shift operation.
7309   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7310     Mask = Op.getConstantOperandVal(1);
7311     Op = Op.getOperand(0);
7312   }
7313   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7314     return None;
7315   bool IsSHL = Op.getOpcode() == ISD::SHL;
7316 
7317   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7318     return None;
7319   uint64_t ShAmt = Op.getConstantOperandVal(1);
7320 
7321   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7322   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7323     return None;
7324   // If we don't have enough masks for 64 bit, then we must be trying to
7325   // match SHFL so we're only allowed to shift 1/4 of the width.
7326   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7327     return None;
7328 
7329   SDValue Src = Op.getOperand(0);
7330 
7331   // The expected mask is shifted left when the AND is found around SHL
7332   // patterns.
7333   //   ((x >> 1) & 0x55555555)
7334   //   ((x << 1) & 0xAAAAAAAA)
7335   bool SHLExpMask = IsSHL;
7336 
7337   if (!Mask) {
7338     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7339     // the mask is all ones: consume that now.
7340     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7341       Mask = Src.getConstantOperandVal(1);
7342       Src = Src.getOperand(0);
7343       // The expected mask is now in fact shifted left for SRL, so reverse the
7344       // decision.
7345       //   ((x & 0xAAAAAAAA) >> 1)
7346       //   ((x & 0x55555555) << 1)
7347       SHLExpMask = !SHLExpMask;
7348     } else {
7349       // Use a default shifted mask of all-ones if there's no AND, truncated
7350       // down to the expected width. This simplifies the logic later on.
7351       Mask = maskTrailingOnes<uint64_t>(Width);
7352       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7353     }
7354   }
7355 
7356   unsigned MaskIdx = Log2_32(ShAmt);
7357   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7358 
7359   if (SHLExpMask)
7360     ExpMask <<= ShAmt;
7361 
7362   if (Mask != ExpMask)
7363     return None;
7364 
7365   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7366 }
7367 
7368 // Matches any of the following bit-manipulation patterns:
7369 //   (and (shl x, 1), (0x55555555 << 1))
7370 //   (and (srl x, 1), 0x55555555)
7371 //   (shl (and x, 0x55555555), 1)
7372 //   (srl (and x, (0x55555555 << 1)), 1)
7373 // where the shift amount and mask may vary thus:
7374 //   [1]  = 0x55555555 / 0xAAAAAAAA
7375 //   [2]  = 0x33333333 / 0xCCCCCCCC
7376 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7377 //   [8]  = 0x00FF00FF / 0xFF00FF00
7378 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7379 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7380 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7381   // These are the unshifted masks which we use to match bit-manipulation
7382   // patterns. They may be shifted left in certain circumstances.
7383   static const uint64_t BitmanipMasks[] = {
7384       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7385       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7386 
7387   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7388 }
7389 
7390 // Try to fold (<bop> x, (reduction.<bop> vec, start))
7391 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
7392   auto BinOpToRVVReduce = [](unsigned Opc) {
7393     switch (Opc) {
7394     default:
7395       llvm_unreachable("Unhandled binary to transfrom reduction");
7396     case ISD::ADD:
7397       return RISCVISD::VECREDUCE_ADD_VL;
7398     case ISD::UMAX:
7399       return RISCVISD::VECREDUCE_UMAX_VL;
7400     case ISD::SMAX:
7401       return RISCVISD::VECREDUCE_SMAX_VL;
7402     case ISD::UMIN:
7403       return RISCVISD::VECREDUCE_UMIN_VL;
7404     case ISD::SMIN:
7405       return RISCVISD::VECREDUCE_SMIN_VL;
7406     case ISD::AND:
7407       return RISCVISD::VECREDUCE_AND_VL;
7408     case ISD::OR:
7409       return RISCVISD::VECREDUCE_OR_VL;
7410     case ISD::XOR:
7411       return RISCVISD::VECREDUCE_XOR_VL;
7412     case ISD::FADD:
7413       return RISCVISD::VECREDUCE_FADD_VL;
7414     case ISD::FMAXNUM:
7415       return RISCVISD::VECREDUCE_FMAX_VL;
7416     case ISD::FMINNUM:
7417       return RISCVISD::VECREDUCE_FMIN_VL;
7418     }
7419   };
7420 
7421   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
7422     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7423            isNullConstant(V.getOperand(1)) &&
7424            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
7425   };
7426 
7427   unsigned Opc = N->getOpcode();
7428   unsigned ReduceIdx;
7429   if (IsReduction(N->getOperand(0), Opc))
7430     ReduceIdx = 0;
7431   else if (IsReduction(N->getOperand(1), Opc))
7432     ReduceIdx = 1;
7433   else
7434     return SDValue();
7435 
7436   // Skip if FADD disallows reassociation but the combiner needs.
7437   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
7438     return SDValue();
7439 
7440   SDValue Extract = N->getOperand(ReduceIdx);
7441   SDValue Reduce = Extract.getOperand(0);
7442   if (!Reduce.hasOneUse())
7443     return SDValue();
7444 
7445   SDValue ScalarV = Reduce.getOperand(2);
7446 
7447   // Make sure that ScalarV is a splat with VL=1.
7448   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
7449       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
7450       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
7451     return SDValue();
7452 
7453   if (!isOneConstant(ScalarV.getOperand(2)))
7454     return SDValue();
7455 
7456   // TODO: Deal with value other than neutral element.
7457   auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) {
7458     if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() &&
7459         isNullFPConstant(V))
7460       return true;
7461     return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(),
7462                                  N->getFlags()) == V;
7463   };
7464 
7465   // Check the scalar of ScalarV is neutral element
7466   if (!IsRVVNeutralElement(N, ScalarV.getOperand(1)))
7467     return SDValue();
7468 
7469   if (!ScalarV.hasOneUse())
7470     return SDValue();
7471 
7472   EVT SplatVT = ScalarV.getValueType();
7473   SDValue NewStart = N->getOperand(1 - ReduceIdx);
7474   unsigned SplatOpc = RISCVISD::VFMV_S_F_VL;
7475   if (SplatVT.isInteger()) {
7476     auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode());
7477     if (!C || C->isZero() || !isInt<5>(C->getSExtValue()))
7478       SplatOpc = RISCVISD::VMV_S_X_VL;
7479     else
7480       SplatOpc = RISCVISD::VMV_V_X_VL;
7481   }
7482 
7483   SDValue NewScalarV =
7484       DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart,
7485                   ScalarV.getOperand(2));
7486   SDValue NewReduce =
7487       DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(),
7488                   Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
7489                   Reduce.getOperand(3), Reduce.getOperand(4));
7490   return DAG.getNode(Extract.getOpcode(), SDLoc(Extract),
7491                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
7492 }
7493 
7494 // Match the following pattern as a GREVI(W) operation
7495 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7496 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7497                                const RISCVSubtarget &Subtarget) {
7498   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7499   EVT VT = Op.getValueType();
7500 
7501   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7502     auto LHS = matchGREVIPat(Op.getOperand(0));
7503     auto RHS = matchGREVIPat(Op.getOperand(1));
7504     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7505       SDLoc DL(Op);
7506       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7507                          DAG.getConstant(LHS->ShAmt, DL, VT));
7508     }
7509   }
7510   return SDValue();
7511 }
7512 
7513 // Matches any the following pattern as a GORCI(W) operation
7514 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7515 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7516 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7517 // Note that with the variant of 3.,
7518 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7519 // the inner pattern will first be matched as GREVI and then the outer
7520 // pattern will be matched to GORC via the first rule above.
7521 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7522 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7523                                const RISCVSubtarget &Subtarget) {
7524   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7525   EVT VT = Op.getValueType();
7526 
7527   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7528     SDLoc DL(Op);
7529     SDValue Op0 = Op.getOperand(0);
7530     SDValue Op1 = Op.getOperand(1);
7531 
7532     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7533       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7534           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7535           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7536         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7537       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7538       if ((Reverse.getOpcode() == ISD::ROTL ||
7539            Reverse.getOpcode() == ISD::ROTR) &&
7540           Reverse.getOperand(0) == X &&
7541           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7542         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7543         if (RotAmt == (VT.getSizeInBits() / 2))
7544           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7545                              DAG.getConstant(RotAmt, DL, VT));
7546       }
7547       return SDValue();
7548     };
7549 
7550     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7551     if (SDValue V = MatchOROfReverse(Op0, Op1))
7552       return V;
7553     if (SDValue V = MatchOROfReverse(Op1, Op0))
7554       return V;
7555 
7556     // OR is commutable so canonicalize its OR operand to the left
7557     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7558       std::swap(Op0, Op1);
7559     if (Op0.getOpcode() != ISD::OR)
7560       return SDValue();
7561     SDValue OrOp0 = Op0.getOperand(0);
7562     SDValue OrOp1 = Op0.getOperand(1);
7563     auto LHS = matchGREVIPat(OrOp0);
7564     // OR is commutable so swap the operands and try again: x might have been
7565     // on the left
7566     if (!LHS) {
7567       std::swap(OrOp0, OrOp1);
7568       LHS = matchGREVIPat(OrOp0);
7569     }
7570     auto RHS = matchGREVIPat(Op1);
7571     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7572       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7573                          DAG.getConstant(LHS->ShAmt, DL, VT));
7574     }
7575   }
7576   return SDValue();
7577 }
7578 
7579 // Matches any of the following bit-manipulation patterns:
7580 //   (and (shl x, 1), (0x22222222 << 1))
7581 //   (and (srl x, 1), 0x22222222)
7582 //   (shl (and x, 0x22222222), 1)
7583 //   (srl (and x, (0x22222222 << 1)), 1)
7584 // where the shift amount and mask may vary thus:
7585 //   [1]  = 0x22222222 / 0x44444444
7586 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7587 //   [4]  = 0x00F000F0 / 0x0F000F00
7588 //   [8]  = 0x0000FF00 / 0x00FF0000
7589 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7590 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7591   // These are the unshifted masks which we use to match bit-manipulation
7592   // patterns. They may be shifted left in certain circumstances.
7593   static const uint64_t BitmanipMasks[] = {
7594       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7595       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7596 
7597   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7598 }
7599 
7600 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7601 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7602                                const RISCVSubtarget &Subtarget) {
7603   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7604   EVT VT = Op.getValueType();
7605 
7606   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7607     return SDValue();
7608 
7609   SDValue Op0 = Op.getOperand(0);
7610   SDValue Op1 = Op.getOperand(1);
7611 
7612   // Or is commutable so canonicalize the second OR to the LHS.
7613   if (Op0.getOpcode() != ISD::OR)
7614     std::swap(Op0, Op1);
7615   if (Op0.getOpcode() != ISD::OR)
7616     return SDValue();
7617 
7618   // We found an inner OR, so our operands are the operands of the inner OR
7619   // and the other operand of the outer OR.
7620   SDValue A = Op0.getOperand(0);
7621   SDValue B = Op0.getOperand(1);
7622   SDValue C = Op1;
7623 
7624   auto Match1 = matchSHFLPat(A);
7625   auto Match2 = matchSHFLPat(B);
7626 
7627   // If neither matched, we failed.
7628   if (!Match1 && !Match2)
7629     return SDValue();
7630 
7631   // We had at least one match. if one failed, try the remaining C operand.
7632   if (!Match1) {
7633     std::swap(A, C);
7634     Match1 = matchSHFLPat(A);
7635     if (!Match1)
7636       return SDValue();
7637   } else if (!Match2) {
7638     std::swap(B, C);
7639     Match2 = matchSHFLPat(B);
7640     if (!Match2)
7641       return SDValue();
7642   }
7643   assert(Match1 && Match2);
7644 
7645   // Make sure our matches pair up.
7646   if (!Match1->formsPairWith(*Match2))
7647     return SDValue();
7648 
7649   // All the remains is to make sure C is an AND with the same input, that masks
7650   // out the bits that are being shuffled.
7651   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7652       C.getOperand(0) != Match1->Op)
7653     return SDValue();
7654 
7655   uint64_t Mask = C.getConstantOperandVal(1);
7656 
7657   static const uint64_t BitmanipMasks[] = {
7658       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7659       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7660   };
7661 
7662   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7663   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7664   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7665 
7666   if (Mask != ExpMask)
7667     return SDValue();
7668 
7669   SDLoc DL(Op);
7670   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7671                      DAG.getConstant(Match1->ShAmt, DL, VT));
7672 }
7673 
7674 // Optimize (add (shl x, c0), (shl y, c1)) ->
7675 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7676 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7677                                   const RISCVSubtarget &Subtarget) {
7678   // Perform this optimization only in the zba extension.
7679   if (!Subtarget.hasStdExtZba())
7680     return SDValue();
7681 
7682   // Skip for vector types and larger types.
7683   EVT VT = N->getValueType(0);
7684   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7685     return SDValue();
7686 
7687   // The two operand nodes must be SHL and have no other use.
7688   SDValue N0 = N->getOperand(0);
7689   SDValue N1 = N->getOperand(1);
7690   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7691       !N0->hasOneUse() || !N1->hasOneUse())
7692     return SDValue();
7693 
7694   // Check c0 and c1.
7695   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7696   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7697   if (!N0C || !N1C)
7698     return SDValue();
7699   int64_t C0 = N0C->getSExtValue();
7700   int64_t C1 = N1C->getSExtValue();
7701   if (C0 <= 0 || C1 <= 0)
7702     return SDValue();
7703 
7704   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7705   int64_t Bits = std::min(C0, C1);
7706   int64_t Diff = std::abs(C0 - C1);
7707   if (Diff != 1 && Diff != 2 && Diff != 3)
7708     return SDValue();
7709 
7710   // Build nodes.
7711   SDLoc DL(N);
7712   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7713   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7714   SDValue NA0 =
7715       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7716   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7717   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7718 }
7719 
7720 // Combine
7721 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7722 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7723 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7724 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7725 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7726 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7727 // The grev patterns represents BSWAP.
7728 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7729 // off the grev.
7730 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7731                                           const RISCVSubtarget &Subtarget) {
7732   bool IsWInstruction =
7733       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7734   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7735           IsWInstruction) &&
7736          "Unexpected opcode!");
7737   SDValue Src = N->getOperand(0);
7738   EVT VT = N->getValueType(0);
7739   SDLoc DL(N);
7740 
7741   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7742     return SDValue();
7743 
7744   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7745       !isa<ConstantSDNode>(Src.getOperand(1)))
7746     return SDValue();
7747 
7748   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7749   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7750 
7751   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7752   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7753   unsigned ShAmt1 = N->getConstantOperandVal(1);
7754   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7755   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7756     return SDValue();
7757 
7758   Src = Src.getOperand(0);
7759 
7760   // Toggle bit the MSB of the shift.
7761   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7762   if (CombinedShAmt == 0)
7763     return Src;
7764 
7765   SDValue Res = DAG.getNode(
7766       RISCVISD::GREV, DL, VT, Src,
7767       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7768   if (!IsWInstruction)
7769     return Res;
7770 
7771   // Sign extend the result to match the behavior of the rotate. This will be
7772   // selected to GREVIW in isel.
7773   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7774                      DAG.getValueType(MVT::i32));
7775 }
7776 
7777 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7778 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7779 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7780 // not undo itself, but they are redundant.
7781 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7782   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7783   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7784   SDValue Src = N->getOperand(0);
7785 
7786   if (Src.getOpcode() != N->getOpcode())
7787     return SDValue();
7788 
7789   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7790       !isa<ConstantSDNode>(Src.getOperand(1)))
7791     return SDValue();
7792 
7793   unsigned ShAmt1 = N->getConstantOperandVal(1);
7794   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7795   Src = Src.getOperand(0);
7796 
7797   unsigned CombinedShAmt;
7798   if (IsGORC)
7799     CombinedShAmt = ShAmt1 | ShAmt2;
7800   else
7801     CombinedShAmt = ShAmt1 ^ ShAmt2;
7802 
7803   if (CombinedShAmt == 0)
7804     return Src;
7805 
7806   SDLoc DL(N);
7807   return DAG.getNode(
7808       N->getOpcode(), DL, N->getValueType(0), Src,
7809       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7810 }
7811 
7812 // Combine a constant select operand into its use:
7813 //
7814 // (and (select cond, -1, c), x)
7815 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7816 // (or  (select cond, 0, c), x)
7817 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7818 // (xor (select cond, 0, c), x)
7819 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7820 // (add (select cond, 0, c), x)
7821 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7822 // (sub x, (select cond, 0, c))
7823 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7824 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7825                                    SelectionDAG &DAG, bool AllOnes) {
7826   EVT VT = N->getValueType(0);
7827 
7828   // Skip vectors.
7829   if (VT.isVector())
7830     return SDValue();
7831 
7832   if ((Slct.getOpcode() != ISD::SELECT &&
7833        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7834       !Slct.hasOneUse())
7835     return SDValue();
7836 
7837   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7838     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7839   };
7840 
7841   bool SwapSelectOps;
7842   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7843   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7844   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7845   SDValue NonConstantVal;
7846   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7847     SwapSelectOps = false;
7848     NonConstantVal = FalseVal;
7849   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7850     SwapSelectOps = true;
7851     NonConstantVal = TrueVal;
7852   } else
7853     return SDValue();
7854 
7855   // Slct is now know to be the desired identity constant when CC is true.
7856   TrueVal = OtherOp;
7857   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7858   // Unless SwapSelectOps says the condition should be false.
7859   if (SwapSelectOps)
7860     std::swap(TrueVal, FalseVal);
7861 
7862   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7863     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7864                        {Slct.getOperand(0), Slct.getOperand(1),
7865                         Slct.getOperand(2), TrueVal, FalseVal});
7866 
7867   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7868                      {Slct.getOperand(0), TrueVal, FalseVal});
7869 }
7870 
7871 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7872 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7873                                               bool AllOnes) {
7874   SDValue N0 = N->getOperand(0);
7875   SDValue N1 = N->getOperand(1);
7876   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7877     return Result;
7878   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7879     return Result;
7880   return SDValue();
7881 }
7882 
7883 // Transform (add (mul x, c0), c1) ->
7884 //           (add (mul (add x, c1/c0), c0), c1%c0).
7885 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7886 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7887 // to an infinite loop in DAGCombine if transformed.
7888 // Or transform (add (mul x, c0), c1) ->
7889 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7890 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7891 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7892 // lead to an infinite loop in DAGCombine if transformed.
7893 // Or transform (add (mul x, c0), c1) ->
7894 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7895 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7896 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7897 // lead to an infinite loop in DAGCombine if transformed.
7898 // Or transform (add (mul x, c0), c1) ->
7899 //              (mul (add x, c1/c0), c0).
7900 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7901 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7902                                      const RISCVSubtarget &Subtarget) {
7903   // Skip for vector types and larger types.
7904   EVT VT = N->getValueType(0);
7905   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7906     return SDValue();
7907   // The first operand node must be a MUL and has no other use.
7908   SDValue N0 = N->getOperand(0);
7909   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7910     return SDValue();
7911   // Check if c0 and c1 match above conditions.
7912   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7913   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7914   if (!N0C || !N1C)
7915     return SDValue();
7916   // If N0C has multiple uses it's possible one of the cases in
7917   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7918   // in an infinite loop.
7919   if (!N0C->hasOneUse())
7920     return SDValue();
7921   int64_t C0 = N0C->getSExtValue();
7922   int64_t C1 = N1C->getSExtValue();
7923   int64_t CA, CB;
7924   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7925     return SDValue();
7926   // Search for proper CA (non-zero) and CB that both are simm12.
7927   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7928       !isInt<12>(C0 * (C1 / C0))) {
7929     CA = C1 / C0;
7930     CB = C1 % C0;
7931   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7932              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7933     CA = C1 / C0 + 1;
7934     CB = C1 % C0 - C0;
7935   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7936              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7937     CA = C1 / C0 - 1;
7938     CB = C1 % C0 + C0;
7939   } else
7940     return SDValue();
7941   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7942   SDLoc DL(N);
7943   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7944                              DAG.getConstant(CA, DL, VT));
7945   SDValue New1 =
7946       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7947   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7948 }
7949 
7950 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7951                                  const RISCVSubtarget &Subtarget) {
7952   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7953     return V;
7954   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7955     return V;
7956   if (SDValue V = combineBinOpToReduce(N, DAG))
7957     return V;
7958   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7959   //      (select lhs, rhs, cc, x, (add x, y))
7960   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7961 }
7962 
7963 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7964   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7965   //      (select lhs, rhs, cc, x, (sub x, y))
7966   SDValue N0 = N->getOperand(0);
7967   SDValue N1 = N->getOperand(1);
7968   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7969 }
7970 
7971 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
7972                                  const RISCVSubtarget &Subtarget) {
7973   SDValue N0 = N->getOperand(0);
7974   // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
7975   // extending X. This is safe since we only need the LSB after the shift and
7976   // shift amounts larger than 31 would produce poison. If we wait until
7977   // type legalization, we'll create RISCVISD::SRLW and we can't recover it
7978   // to use a BEXT instruction.
7979   if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
7980       N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
7981       N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
7982       N0.hasOneUse()) {
7983     SDLoc DL(N);
7984     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
7985     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
7986     SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
7987     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl,
7988                               DAG.getConstant(1, DL, MVT::i64));
7989     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
7990   }
7991 
7992   if (SDValue V = combineBinOpToReduce(N, DAG))
7993     return V;
7994 
7995   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7996   //      (select lhs, rhs, cc, x, (and x, y))
7997   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7998 }
7999 
8000 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
8001                                 const RISCVSubtarget &Subtarget) {
8002   if (Subtarget.hasStdExtZbp()) {
8003     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
8004       return GREV;
8005     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
8006       return GORC;
8007     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
8008       return SHFL;
8009   }
8010 
8011   if (SDValue V = combineBinOpToReduce(N, DAG))
8012     return V;
8013   // fold (or (select cond, 0, y), x) ->
8014   //      (select cond, x, (or x, y))
8015   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8016 }
8017 
8018 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
8019   SDValue N0 = N->getOperand(0);
8020   SDValue N1 = N->getOperand(1);
8021 
8022   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
8023   // NOTE: Assumes ROL being legal means ROLW is legal.
8024   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8025   if (N0.getOpcode() == RISCVISD::SLLW &&
8026       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
8027       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
8028     SDLoc DL(N);
8029     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
8030                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
8031   }
8032 
8033   if (SDValue V = combineBinOpToReduce(N, DAG))
8034     return V;
8035   // fold (xor (select cond, 0, y), x) ->
8036   //      (select cond, x, (xor x, y))
8037   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8038 }
8039 
8040 static SDValue
8041 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
8042                                 const RISCVSubtarget &Subtarget) {
8043   SDValue Src = N->getOperand(0);
8044   EVT VT = N->getValueType(0);
8045 
8046   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
8047   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8048       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
8049     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
8050                        Src.getOperand(0));
8051 
8052   // Fold (i64 (sext_inreg (abs X), i32)) ->
8053   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
8054   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
8055   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
8056   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
8057   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
8058   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
8059   // may get combined into an earlier operation so we need to use
8060   // ComputeNumSignBits.
8061   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
8062   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
8063   // we can't assume that X has 33 sign bits. We must check.
8064   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
8065       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
8066       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
8067       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
8068     SDLoc DL(N);
8069     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
8070     SDValue Neg =
8071         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
8072     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
8073                       DAG.getValueType(MVT::i32));
8074     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
8075   }
8076 
8077   return SDValue();
8078 }
8079 
8080 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
8081 // vwadd(u).vv/vx or vwsub(u).vv/vx.
8082 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
8083                                              bool Commute = false) {
8084   assert((N->getOpcode() == RISCVISD::ADD_VL ||
8085           N->getOpcode() == RISCVISD::SUB_VL) &&
8086          "Unexpected opcode");
8087   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
8088   SDValue Op0 = N->getOperand(0);
8089   SDValue Op1 = N->getOperand(1);
8090   if (Commute)
8091     std::swap(Op0, Op1);
8092 
8093   MVT VT = N->getSimpleValueType(0);
8094 
8095   // Determine the narrow size for a widening add/sub.
8096   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8097   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8098                                   VT.getVectorElementCount());
8099 
8100   SDValue Mask = N->getOperand(2);
8101   SDValue VL = N->getOperand(3);
8102 
8103   SDLoc DL(N);
8104 
8105   // If the RHS is a sext or zext, we can form a widening op.
8106   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
8107        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
8108       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
8109     unsigned ExtOpc = Op1.getOpcode();
8110     Op1 = Op1.getOperand(0);
8111     // Re-introduce narrower extends if needed.
8112     if (Op1.getValueType() != NarrowVT)
8113       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8114 
8115     unsigned WOpc;
8116     if (ExtOpc == RISCVISD::VSEXT_VL)
8117       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
8118     else
8119       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
8120 
8121     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
8122   }
8123 
8124   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
8125   // sext/zext?
8126 
8127   return SDValue();
8128 }
8129 
8130 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
8131 // vwsub(u).vv/vx.
8132 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
8133   SDValue Op0 = N->getOperand(0);
8134   SDValue Op1 = N->getOperand(1);
8135   SDValue Mask = N->getOperand(2);
8136   SDValue VL = N->getOperand(3);
8137 
8138   MVT VT = N->getSimpleValueType(0);
8139   MVT NarrowVT = Op1.getSimpleValueType();
8140   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
8141 
8142   unsigned VOpc;
8143   switch (N->getOpcode()) {
8144   default: llvm_unreachable("Unexpected opcode");
8145   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
8146   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8147   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8148   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8149   }
8150 
8151   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8152                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8153 
8154   SDLoc DL(N);
8155 
8156   // If the LHS is a sext or zext, we can narrow this op to the same size as
8157   // the RHS.
8158   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8159        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8160       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8161     unsigned ExtOpc = Op0.getOpcode();
8162     Op0 = Op0.getOperand(0);
8163     // Re-introduce narrower extends if needed.
8164     if (Op0.getValueType() != NarrowVT)
8165       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8166     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8167   }
8168 
8169   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8170                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8171 
8172   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8173   // to commute and use a vwadd(u).vx instead.
8174   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8175       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8176     Op0 = Op0.getOperand(1);
8177 
8178     // See if have enough sign bits or zero bits in the scalar to use a
8179     // widening add/sub by splatting to smaller element size.
8180     unsigned EltBits = VT.getScalarSizeInBits();
8181     unsigned ScalarBits = Op0.getValueSizeInBits();
8182     // Make sure we're getting all element bits from the scalar register.
8183     // FIXME: Support implicit sign extension of vmv.v.x?
8184     if (ScalarBits < EltBits)
8185       return SDValue();
8186 
8187     if (IsSigned) {
8188       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8189         return SDValue();
8190     } else {
8191       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8192       if (!DAG.MaskedValueIsZero(Op0, Mask))
8193         return SDValue();
8194     }
8195 
8196     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8197                       DAG.getUNDEF(NarrowVT), Op0, VL);
8198     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8199   }
8200 
8201   return SDValue();
8202 }
8203 
8204 // Try to form VWMUL, VWMULU or VWMULSU.
8205 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8206 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8207                                        bool Commute) {
8208   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8209   SDValue Op0 = N->getOperand(0);
8210   SDValue Op1 = N->getOperand(1);
8211   if (Commute)
8212     std::swap(Op0, Op1);
8213 
8214   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8215   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8216   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8217   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8218     return SDValue();
8219 
8220   SDValue Mask = N->getOperand(2);
8221   SDValue VL = N->getOperand(3);
8222 
8223   // Make sure the mask and VL match.
8224   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8225     return SDValue();
8226 
8227   MVT VT = N->getSimpleValueType(0);
8228 
8229   // Determine the narrow size for a widening multiply.
8230   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8231   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8232                                   VT.getVectorElementCount());
8233 
8234   SDLoc DL(N);
8235 
8236   // See if the other operand is the same opcode.
8237   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8238     if (!Op1.hasOneUse())
8239       return SDValue();
8240 
8241     // Make sure the mask and VL match.
8242     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8243       return SDValue();
8244 
8245     Op1 = Op1.getOperand(0);
8246   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8247     // The operand is a splat of a scalar.
8248 
8249     // The pasthru must be undef for tail agnostic
8250     if (!Op1.getOperand(0).isUndef())
8251       return SDValue();
8252     // The VL must be the same.
8253     if (Op1.getOperand(2) != VL)
8254       return SDValue();
8255 
8256     // Get the scalar value.
8257     Op1 = Op1.getOperand(1);
8258 
8259     // See if have enough sign bits or zero bits in the scalar to use a
8260     // widening multiply by splatting to smaller element size.
8261     unsigned EltBits = VT.getScalarSizeInBits();
8262     unsigned ScalarBits = Op1.getValueSizeInBits();
8263     // Make sure we're getting all element bits from the scalar register.
8264     // FIXME: Support implicit sign extension of vmv.v.x?
8265     if (ScalarBits < EltBits)
8266       return SDValue();
8267 
8268     // If the LHS is a sign extend, try to use vwmul.
8269     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8270       // Can use vwmul.
8271     } else {
8272       // Otherwise try to use vwmulu or vwmulsu.
8273       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8274       if (DAG.MaskedValueIsZero(Op1, Mask))
8275         IsVWMULSU = IsSignExt;
8276       else
8277         return SDValue();
8278     }
8279 
8280     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8281                       DAG.getUNDEF(NarrowVT), Op1, VL);
8282   } else
8283     return SDValue();
8284 
8285   Op0 = Op0.getOperand(0);
8286 
8287   // Re-introduce narrower extends if needed.
8288   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8289   if (Op0.getValueType() != NarrowVT)
8290     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8291   // vwmulsu requires second operand to be zero extended.
8292   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8293   if (Op1.getValueType() != NarrowVT)
8294     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8295 
8296   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8297   if (!IsVWMULSU)
8298     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8299   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8300 }
8301 
8302 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8303   switch (Op.getOpcode()) {
8304   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8305   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8306   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8307   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8308   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8309   }
8310 
8311   return RISCVFPRndMode::Invalid;
8312 }
8313 
8314 // Fold
8315 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8316 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8317 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8318 //   (fp_to_int (fceil X))      -> fcvt X, rup
8319 //   (fp_to_int (fround X))     -> fcvt X, rmm
8320 static SDValue performFP_TO_INTCombine(SDNode *N,
8321                                        TargetLowering::DAGCombinerInfo &DCI,
8322                                        const RISCVSubtarget &Subtarget) {
8323   SelectionDAG &DAG = DCI.DAG;
8324   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8325   MVT XLenVT = Subtarget.getXLenVT();
8326 
8327   // Only handle XLen or i32 types. Other types narrower than XLen will
8328   // eventually be legalized to XLenVT.
8329   EVT VT = N->getValueType(0);
8330   if (VT != MVT::i32 && VT != XLenVT)
8331     return SDValue();
8332 
8333   SDValue Src = N->getOperand(0);
8334 
8335   // Ensure the FP type is also legal.
8336   if (!TLI.isTypeLegal(Src.getValueType()))
8337     return SDValue();
8338 
8339   // Don't do this for f16 with Zfhmin and not Zfh.
8340   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8341     return SDValue();
8342 
8343   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8344   if (FRM == RISCVFPRndMode::Invalid)
8345     return SDValue();
8346 
8347   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8348 
8349   unsigned Opc;
8350   if (VT == XLenVT)
8351     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8352   else
8353     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8354 
8355   SDLoc DL(N);
8356   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8357                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8358   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8359 }
8360 
8361 // Fold
8362 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8363 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8364 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8365 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8366 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8367 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8368                                        TargetLowering::DAGCombinerInfo &DCI,
8369                                        const RISCVSubtarget &Subtarget) {
8370   SelectionDAG &DAG = DCI.DAG;
8371   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8372   MVT XLenVT = Subtarget.getXLenVT();
8373 
8374   // Only handle XLen types. Other types narrower than XLen will eventually be
8375   // legalized to XLenVT.
8376   EVT DstVT = N->getValueType(0);
8377   if (DstVT != XLenVT)
8378     return SDValue();
8379 
8380   SDValue Src = N->getOperand(0);
8381 
8382   // Ensure the FP type is also legal.
8383   if (!TLI.isTypeLegal(Src.getValueType()))
8384     return SDValue();
8385 
8386   // Don't do this for f16 with Zfhmin and not Zfh.
8387   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8388     return SDValue();
8389 
8390   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8391 
8392   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8393   if (FRM == RISCVFPRndMode::Invalid)
8394     return SDValue();
8395 
8396   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8397 
8398   unsigned Opc;
8399   if (SatVT == DstVT)
8400     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8401   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8402     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8403   else
8404     return SDValue();
8405   // FIXME: Support other SatVTs by clamping before or after the conversion.
8406 
8407   Src = Src.getOperand(0);
8408 
8409   SDLoc DL(N);
8410   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8411                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8412 
8413   // RISCV FP-to-int conversions saturate to the destination register size, but
8414   // don't produce 0 for nan.
8415   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8416   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8417 }
8418 
8419 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8420 // smaller than XLenVT.
8421 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8422                                         const RISCVSubtarget &Subtarget) {
8423   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8424 
8425   SDValue Src = N->getOperand(0);
8426   if (Src.getOpcode() != ISD::BSWAP)
8427     return SDValue();
8428 
8429   EVT VT = N->getValueType(0);
8430   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8431       !isPowerOf2_32(VT.getSizeInBits()))
8432     return SDValue();
8433 
8434   SDLoc DL(N);
8435   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8436                      DAG.getConstant(7, DL, VT));
8437 }
8438 
8439 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8440                                                DAGCombinerInfo &DCI) const {
8441   SelectionDAG &DAG = DCI.DAG;
8442 
8443   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8444   // bits are demanded. N will be added to the Worklist if it was not deleted.
8445   // Caller should return SDValue(N, 0) if this returns true.
8446   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8447     SDValue Op = N->getOperand(OpNo);
8448     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8449     if (!SimplifyDemandedBits(Op, Mask, DCI))
8450       return false;
8451 
8452     if (N->getOpcode() != ISD::DELETED_NODE)
8453       DCI.AddToWorklist(N);
8454     return true;
8455   };
8456 
8457   switch (N->getOpcode()) {
8458   default:
8459     break;
8460   case RISCVISD::SplitF64: {
8461     SDValue Op0 = N->getOperand(0);
8462     // If the input to SplitF64 is just BuildPairF64 then the operation is
8463     // redundant. Instead, use BuildPairF64's operands directly.
8464     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8465       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8466 
8467     if (Op0->isUndef()) {
8468       SDValue Lo = DAG.getUNDEF(MVT::i32);
8469       SDValue Hi = DAG.getUNDEF(MVT::i32);
8470       return DCI.CombineTo(N, Lo, Hi);
8471     }
8472 
8473     SDLoc DL(N);
8474 
8475     // It's cheaper to materialise two 32-bit integers than to load a double
8476     // from the constant pool and transfer it to integer registers through the
8477     // stack.
8478     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8479       APInt V = C->getValueAPF().bitcastToAPInt();
8480       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8481       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8482       return DCI.CombineTo(N, Lo, Hi);
8483     }
8484 
8485     // This is a target-specific version of a DAGCombine performed in
8486     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8487     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8488     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8489     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8490         !Op0.getNode()->hasOneUse())
8491       break;
8492     SDValue NewSplitF64 =
8493         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8494                     Op0.getOperand(0));
8495     SDValue Lo = NewSplitF64.getValue(0);
8496     SDValue Hi = NewSplitF64.getValue(1);
8497     APInt SignBit = APInt::getSignMask(32);
8498     if (Op0.getOpcode() == ISD::FNEG) {
8499       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8500                                   DAG.getConstant(SignBit, DL, MVT::i32));
8501       return DCI.CombineTo(N, Lo, NewHi);
8502     }
8503     assert(Op0.getOpcode() == ISD::FABS);
8504     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8505                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8506     return DCI.CombineTo(N, Lo, NewHi);
8507   }
8508   case RISCVISD::SLLW:
8509   case RISCVISD::SRAW:
8510   case RISCVISD::SRLW: {
8511     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8512     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8513         SimplifyDemandedLowBitsHelper(1, 5))
8514       return SDValue(N, 0);
8515 
8516     break;
8517   }
8518   case ISD::ROTR:
8519   case ISD::ROTL:
8520   case RISCVISD::RORW:
8521   case RISCVISD::ROLW: {
8522     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8523       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8524       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8525           SimplifyDemandedLowBitsHelper(1, 5))
8526         return SDValue(N, 0);
8527     }
8528 
8529     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8530   }
8531   case RISCVISD::CLZW:
8532   case RISCVISD::CTZW: {
8533     // Only the lower 32 bits of the first operand are read
8534     if (SimplifyDemandedLowBitsHelper(0, 32))
8535       return SDValue(N, 0);
8536     break;
8537   }
8538   case RISCVISD::GREV:
8539   case RISCVISD::GORC: {
8540     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8541     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8542     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8543     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8544       return SDValue(N, 0);
8545 
8546     return combineGREVI_GORCI(N, DAG);
8547   }
8548   case RISCVISD::GREVW:
8549   case RISCVISD::GORCW: {
8550     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8551     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8552         SimplifyDemandedLowBitsHelper(1, 5))
8553       return SDValue(N, 0);
8554 
8555     break;
8556   }
8557   case RISCVISD::SHFL:
8558   case RISCVISD::UNSHFL: {
8559     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8560     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8561     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8562     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8563       return SDValue(N, 0);
8564 
8565     break;
8566   }
8567   case RISCVISD::SHFLW:
8568   case RISCVISD::UNSHFLW: {
8569     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8570     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8571         SimplifyDemandedLowBitsHelper(1, 4))
8572       return SDValue(N, 0);
8573 
8574     break;
8575   }
8576   case RISCVISD::BCOMPRESSW:
8577   case RISCVISD::BDECOMPRESSW: {
8578     // Only the lower 32 bits of LHS and RHS are read.
8579     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8580         SimplifyDemandedLowBitsHelper(1, 32))
8581       return SDValue(N, 0);
8582 
8583     break;
8584   }
8585   case RISCVISD::FSR:
8586   case RISCVISD::FSL:
8587   case RISCVISD::FSRW:
8588   case RISCVISD::FSLW: {
8589     bool IsWInstruction =
8590         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8591     unsigned BitWidth =
8592         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8593     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8594     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8595     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8596       return SDValue(N, 0);
8597 
8598     break;
8599   }
8600   case RISCVISD::FMV_X_ANYEXTH:
8601   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8602     SDLoc DL(N);
8603     SDValue Op0 = N->getOperand(0);
8604     MVT VT = N->getSimpleValueType(0);
8605     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8606     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8607     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8608     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8609          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8610         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8611          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8612       assert(Op0.getOperand(0).getValueType() == VT &&
8613              "Unexpected value type!");
8614       return Op0.getOperand(0);
8615     }
8616 
8617     // This is a target-specific version of a DAGCombine performed in
8618     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8619     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8620     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8621     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8622         !Op0.getNode()->hasOneUse())
8623       break;
8624     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8625     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8626     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8627     if (Op0.getOpcode() == ISD::FNEG)
8628       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8629                          DAG.getConstant(SignBit, DL, VT));
8630 
8631     assert(Op0.getOpcode() == ISD::FABS);
8632     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8633                        DAG.getConstant(~SignBit, DL, VT));
8634   }
8635   case ISD::ADD:
8636     return performADDCombine(N, DAG, Subtarget);
8637   case ISD::SUB:
8638     return performSUBCombine(N, DAG);
8639   case ISD::AND:
8640     return performANDCombine(N, DAG, Subtarget);
8641   case ISD::OR:
8642     return performORCombine(N, DAG, Subtarget);
8643   case ISD::XOR:
8644     return performXORCombine(N, DAG);
8645   case ISD::FADD:
8646   case ISD::UMAX:
8647   case ISD::UMIN:
8648   case ISD::SMAX:
8649   case ISD::SMIN:
8650   case ISD::FMAXNUM:
8651   case ISD::FMINNUM:
8652     return combineBinOpToReduce(N, DAG);
8653   case ISD::SIGN_EXTEND_INREG:
8654     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8655   case ISD::ZERO_EXTEND:
8656     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8657     // type legalization. This is safe because fp_to_uint produces poison if
8658     // it overflows.
8659     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8660       SDValue Src = N->getOperand(0);
8661       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8662           isTypeLegal(Src.getOperand(0).getValueType()))
8663         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8664                            Src.getOperand(0));
8665       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8666           isTypeLegal(Src.getOperand(1).getValueType())) {
8667         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8668         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8669                                   Src.getOperand(0), Src.getOperand(1));
8670         DCI.CombineTo(N, Res);
8671         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8672         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8673         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8674       }
8675     }
8676     return SDValue();
8677   case RISCVISD::SELECT_CC: {
8678     // Transform
8679     SDValue LHS = N->getOperand(0);
8680     SDValue RHS = N->getOperand(1);
8681     SDValue TrueV = N->getOperand(3);
8682     SDValue FalseV = N->getOperand(4);
8683 
8684     // If the True and False values are the same, we don't need a select_cc.
8685     if (TrueV == FalseV)
8686       return TrueV;
8687 
8688     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8689     if (!ISD::isIntEqualitySetCC(CCVal))
8690       break;
8691 
8692     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8693     //      (select_cc X, Y, lt, trueV, falseV)
8694     // Sometimes the setcc is introduced after select_cc has been formed.
8695     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8696         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8697       // If we're looking for eq 0 instead of ne 0, we need to invert the
8698       // condition.
8699       bool Invert = CCVal == ISD::SETEQ;
8700       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8701       if (Invert)
8702         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8703 
8704       SDLoc DL(N);
8705       RHS = LHS.getOperand(1);
8706       LHS = LHS.getOperand(0);
8707       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8708 
8709       SDValue TargetCC = DAG.getCondCode(CCVal);
8710       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8711                          {LHS, RHS, TargetCC, TrueV, FalseV});
8712     }
8713 
8714     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8715     //      (select_cc X, Y, eq/ne, trueV, falseV)
8716     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8717       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8718                          {LHS.getOperand(0), LHS.getOperand(1),
8719                           N->getOperand(2), TrueV, FalseV});
8720     // (select_cc X, 1, setne, trueV, falseV) ->
8721     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8722     // This can occur when legalizing some floating point comparisons.
8723     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8724     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8725       SDLoc DL(N);
8726       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8727       SDValue TargetCC = DAG.getCondCode(CCVal);
8728       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8729       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8730                          {LHS, RHS, TargetCC, TrueV, FalseV});
8731     }
8732 
8733     break;
8734   }
8735   case RISCVISD::BR_CC: {
8736     SDValue LHS = N->getOperand(1);
8737     SDValue RHS = N->getOperand(2);
8738     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8739     if (!ISD::isIntEqualitySetCC(CCVal))
8740       break;
8741 
8742     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8743     //      (br_cc X, Y, lt, dest)
8744     // Sometimes the setcc is introduced after br_cc has been formed.
8745     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8746         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8747       // If we're looking for eq 0 instead of ne 0, we need to invert the
8748       // condition.
8749       bool Invert = CCVal == ISD::SETEQ;
8750       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8751       if (Invert)
8752         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8753 
8754       SDLoc DL(N);
8755       RHS = LHS.getOperand(1);
8756       LHS = LHS.getOperand(0);
8757       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8758 
8759       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8760                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8761                          N->getOperand(4));
8762     }
8763 
8764     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8765     //      (br_cc X, Y, eq/ne, trueV, falseV)
8766     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8767       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8768                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8769                          N->getOperand(3), N->getOperand(4));
8770 
8771     // (br_cc X, 1, setne, br_cc) ->
8772     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8773     // This can occur when legalizing some floating point comparisons.
8774     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8775     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8776       SDLoc DL(N);
8777       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8778       SDValue TargetCC = DAG.getCondCode(CCVal);
8779       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8780       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8781                          N->getOperand(0), LHS, RHS, TargetCC,
8782                          N->getOperand(4));
8783     }
8784     break;
8785   }
8786   case ISD::BITREVERSE:
8787     return performBITREVERSECombine(N, DAG, Subtarget);
8788   case ISD::FP_TO_SINT:
8789   case ISD::FP_TO_UINT:
8790     return performFP_TO_INTCombine(N, DCI, Subtarget);
8791   case ISD::FP_TO_SINT_SAT:
8792   case ISD::FP_TO_UINT_SAT:
8793     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8794   case ISD::FCOPYSIGN: {
8795     EVT VT = N->getValueType(0);
8796     if (!VT.isVector())
8797       break;
8798     // There is a form of VFSGNJ which injects the negated sign of its second
8799     // operand. Try and bubble any FNEG up after the extend/round to produce
8800     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8801     // TRUNC=1.
8802     SDValue In2 = N->getOperand(1);
8803     // Avoid cases where the extend/round has multiple uses, as duplicating
8804     // those is typically more expensive than removing a fneg.
8805     if (!In2.hasOneUse())
8806       break;
8807     if (In2.getOpcode() != ISD::FP_EXTEND &&
8808         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8809       break;
8810     In2 = In2.getOperand(0);
8811     if (In2.getOpcode() != ISD::FNEG)
8812       break;
8813     SDLoc DL(N);
8814     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8815     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8816                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8817   }
8818   case ISD::MGATHER:
8819   case ISD::MSCATTER:
8820   case ISD::VP_GATHER:
8821   case ISD::VP_SCATTER: {
8822     if (!DCI.isBeforeLegalize())
8823       break;
8824     SDValue Index, ScaleOp;
8825     bool IsIndexScaled = false;
8826     bool IsIndexSigned = false;
8827     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8828       Index = VPGSN->getIndex();
8829       ScaleOp = VPGSN->getScale();
8830       IsIndexScaled = VPGSN->isIndexScaled();
8831       IsIndexSigned = VPGSN->isIndexSigned();
8832     } else {
8833       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8834       Index = MGSN->getIndex();
8835       ScaleOp = MGSN->getScale();
8836       IsIndexScaled = MGSN->isIndexScaled();
8837       IsIndexSigned = MGSN->isIndexSigned();
8838     }
8839     EVT IndexVT = Index.getValueType();
8840     MVT XLenVT = Subtarget.getXLenVT();
8841     // RISCV indexed loads only support the "unsigned unscaled" addressing
8842     // mode, so anything else must be manually legalized.
8843     bool NeedsIdxLegalization =
8844         IsIndexScaled ||
8845         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8846     if (!NeedsIdxLegalization)
8847       break;
8848 
8849     SDLoc DL(N);
8850 
8851     // Any index legalization should first promote to XLenVT, so we don't lose
8852     // bits when scaling. This may create an illegal index type so we let
8853     // LLVM's legalization take care of the splitting.
8854     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8855     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8856       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8857       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8858                           DL, IndexVT, Index);
8859     }
8860 
8861     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8862     if (IsIndexScaled && Scale != 1) {
8863       // Manually scale the indices by the element size.
8864       // TODO: Sanitize the scale operand here?
8865       // TODO: For VP nodes, should we use VP_SHL here?
8866       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8867       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8868       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8869     }
8870 
8871     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8872     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8873       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8874                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8875                               VPGN->getScale(), VPGN->getMask(),
8876                               VPGN->getVectorLength()},
8877                              VPGN->getMemOperand(), NewIndexTy);
8878     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8879       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8880                               {VPSN->getChain(), VPSN->getValue(),
8881                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8882                                VPSN->getMask(), VPSN->getVectorLength()},
8883                               VPSN->getMemOperand(), NewIndexTy);
8884     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8885       return DAG.getMaskedGather(
8886           N->getVTList(), MGN->getMemoryVT(), DL,
8887           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8888            MGN->getBasePtr(), Index, MGN->getScale()},
8889           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8890     const auto *MSN = cast<MaskedScatterSDNode>(N);
8891     return DAG.getMaskedScatter(
8892         N->getVTList(), MSN->getMemoryVT(), DL,
8893         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8894          Index, MSN->getScale()},
8895         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8896   }
8897   case RISCVISD::SRA_VL:
8898   case RISCVISD::SRL_VL:
8899   case RISCVISD::SHL_VL: {
8900     SDValue ShAmt = N->getOperand(1);
8901     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8902       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8903       SDLoc DL(N);
8904       SDValue VL = N->getOperand(3);
8905       EVT VT = N->getValueType(0);
8906       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8907                           ShAmt.getOperand(1), VL);
8908       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8909                          N->getOperand(2), N->getOperand(3));
8910     }
8911     break;
8912   }
8913   case ISD::SRA:
8914   case ISD::SRL:
8915   case ISD::SHL: {
8916     SDValue ShAmt = N->getOperand(1);
8917     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8918       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8919       SDLoc DL(N);
8920       EVT VT = N->getValueType(0);
8921       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8922                           ShAmt.getOperand(1),
8923                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8924       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8925     }
8926     break;
8927   }
8928   case RISCVISD::ADD_VL:
8929     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8930       return V;
8931     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8932   case RISCVISD::SUB_VL:
8933     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8934   case RISCVISD::VWADD_W_VL:
8935   case RISCVISD::VWADDU_W_VL:
8936   case RISCVISD::VWSUB_W_VL:
8937   case RISCVISD::VWSUBU_W_VL:
8938     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8939   case RISCVISD::MUL_VL:
8940     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8941       return V;
8942     // Mul is commutative.
8943     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8944   case ISD::STORE: {
8945     auto *Store = cast<StoreSDNode>(N);
8946     SDValue Val = Store->getValue();
8947     // Combine store of vmv.x.s to vse with VL of 1.
8948     // FIXME: Support FP.
8949     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8950       SDValue Src = Val.getOperand(0);
8951       EVT VecVT = Src.getValueType();
8952       EVT MemVT = Store->getMemoryVT();
8953       // The memory VT and the element type must match.
8954       if (VecVT.getVectorElementType() == MemVT) {
8955         SDLoc DL(N);
8956         MVT MaskVT = getMaskTypeFor(VecVT);
8957         return DAG.getStoreVP(
8958             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8959             DAG.getConstant(1, DL, MaskVT),
8960             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8961             Store->getMemOperand(), Store->getAddressingMode(),
8962             Store->isTruncatingStore(), /*IsCompress*/ false);
8963       }
8964     }
8965 
8966     break;
8967   }
8968   case ISD::SPLAT_VECTOR: {
8969     EVT VT = N->getValueType(0);
8970     // Only perform this combine on legal MVT types.
8971     if (!isTypeLegal(VT))
8972       break;
8973     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8974                                          DAG, Subtarget))
8975       return Gather;
8976     break;
8977   }
8978   case RISCVISD::VMV_V_X_VL: {
8979     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8980     // scalar input.
8981     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8982     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8983     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8984       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8985         return SDValue(N, 0);
8986 
8987     break;
8988   }
8989   case ISD::INTRINSIC_WO_CHAIN: {
8990     unsigned IntNo = N->getConstantOperandVal(0);
8991     switch (IntNo) {
8992       // By default we do not combine any intrinsic.
8993     default:
8994       return SDValue();
8995     case Intrinsic::riscv_vcpop:
8996     case Intrinsic::riscv_vcpop_mask:
8997     case Intrinsic::riscv_vfirst:
8998     case Intrinsic::riscv_vfirst_mask: {
8999       SDValue VL = N->getOperand(2);
9000       if (IntNo == Intrinsic::riscv_vcpop_mask ||
9001           IntNo == Intrinsic::riscv_vfirst_mask)
9002         VL = N->getOperand(3);
9003       if (!isNullConstant(VL))
9004         return SDValue();
9005       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
9006       SDLoc DL(N);
9007       EVT VT = N->getValueType(0);
9008       if (IntNo == Intrinsic::riscv_vfirst ||
9009           IntNo == Intrinsic::riscv_vfirst_mask)
9010         return DAG.getConstant(-1, DL, VT);
9011       return DAG.getConstant(0, DL, VT);
9012     }
9013     }
9014   }
9015   }
9016 
9017   return SDValue();
9018 }
9019 
9020 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
9021     const SDNode *N, CombineLevel Level) const {
9022   // The following folds are only desirable if `(OP _, c1 << c2)` can be
9023   // materialised in fewer instructions than `(OP _, c1)`:
9024   //
9025   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
9026   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
9027   SDValue N0 = N->getOperand(0);
9028   EVT Ty = N0.getValueType();
9029   if (Ty.isScalarInteger() &&
9030       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
9031     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
9032     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
9033     if (C1 && C2) {
9034       const APInt &C1Int = C1->getAPIntValue();
9035       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
9036 
9037       // We can materialise `c1 << c2` into an add immediate, so it's "free",
9038       // and the combine should happen, to potentially allow further combines
9039       // later.
9040       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
9041           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
9042         return true;
9043 
9044       // We can materialise `c1` in an add immediate, so it's "free", and the
9045       // combine should be prevented.
9046       if (C1Int.getMinSignedBits() <= 64 &&
9047           isLegalAddImmediate(C1Int.getSExtValue()))
9048         return false;
9049 
9050       // Neither constant will fit into an immediate, so find materialisation
9051       // costs.
9052       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
9053                                               Subtarget.getFeatureBits(),
9054                                               /*CompressionCost*/true);
9055       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
9056           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
9057           /*CompressionCost*/true);
9058 
9059       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
9060       // combine should be prevented.
9061       if (C1Cost < ShiftedC1Cost)
9062         return false;
9063     }
9064   }
9065   return true;
9066 }
9067 
9068 bool RISCVTargetLowering::targetShrinkDemandedConstant(
9069     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
9070     TargetLoweringOpt &TLO) const {
9071   // Delay this optimization as late as possible.
9072   if (!TLO.LegalOps)
9073     return false;
9074 
9075   EVT VT = Op.getValueType();
9076   if (VT.isVector())
9077     return false;
9078 
9079   // Only handle AND for now.
9080   if (Op.getOpcode() != ISD::AND)
9081     return false;
9082 
9083   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
9084   if (!C)
9085     return false;
9086 
9087   const APInt &Mask = C->getAPIntValue();
9088 
9089   // Clear all non-demanded bits initially.
9090   APInt ShrunkMask = Mask & DemandedBits;
9091 
9092   // Try to make a smaller immediate by setting undemanded bits.
9093 
9094   APInt ExpandedMask = Mask | ~DemandedBits;
9095 
9096   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
9097     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
9098   };
9099   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
9100     if (NewMask == Mask)
9101       return true;
9102     SDLoc DL(Op);
9103     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
9104     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
9105     return TLO.CombineTo(Op, NewOp);
9106   };
9107 
9108   // If the shrunk mask fits in sign extended 12 bits, let the target
9109   // independent code apply it.
9110   if (ShrunkMask.isSignedIntN(12))
9111     return false;
9112 
9113   // Preserve (and X, 0xffff) when zext.h is supported.
9114   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
9115     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
9116     if (IsLegalMask(NewMask))
9117       return UseMask(NewMask);
9118   }
9119 
9120   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
9121   if (VT == MVT::i64) {
9122     APInt NewMask = APInt(64, 0xffffffff);
9123     if (IsLegalMask(NewMask))
9124       return UseMask(NewMask);
9125   }
9126 
9127   // For the remaining optimizations, we need to be able to make a negative
9128   // number through a combination of mask and undemanded bits.
9129   if (!ExpandedMask.isNegative())
9130     return false;
9131 
9132   // What is the fewest number of bits we need to represent the negative number.
9133   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
9134 
9135   // Try to make a 12 bit negative immediate. If that fails try to make a 32
9136   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
9137   APInt NewMask = ShrunkMask;
9138   if (MinSignedBits <= 12)
9139     NewMask.setBitsFrom(11);
9140   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
9141     NewMask.setBitsFrom(31);
9142   else
9143     return false;
9144 
9145   // Check that our new mask is a subset of the demanded mask.
9146   assert(IsLegalMask(NewMask));
9147   return UseMask(NewMask);
9148 }
9149 
9150 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
9151   static const uint64_t GREVMasks[] = {
9152       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
9153       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
9154 
9155   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9156     unsigned Shift = 1 << Stage;
9157     if (ShAmt & Shift) {
9158       uint64_t Mask = GREVMasks[Stage];
9159       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9160       if (IsGORC)
9161         Res |= x;
9162       x = Res;
9163     }
9164   }
9165 
9166   return x;
9167 }
9168 
9169 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9170                                                         KnownBits &Known,
9171                                                         const APInt &DemandedElts,
9172                                                         const SelectionDAG &DAG,
9173                                                         unsigned Depth) const {
9174   unsigned BitWidth = Known.getBitWidth();
9175   unsigned Opc = Op.getOpcode();
9176   assert((Opc >= ISD::BUILTIN_OP_END ||
9177           Opc == ISD::INTRINSIC_WO_CHAIN ||
9178           Opc == ISD::INTRINSIC_W_CHAIN ||
9179           Opc == ISD::INTRINSIC_VOID) &&
9180          "Should use MaskedValueIsZero if you don't know whether Op"
9181          " is a target node!");
9182 
9183   Known.resetAll();
9184   switch (Opc) {
9185   default: break;
9186   case RISCVISD::SELECT_CC: {
9187     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9188     // If we don't know any bits, early out.
9189     if (Known.isUnknown())
9190       break;
9191     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9192 
9193     // Only known if known in both the LHS and RHS.
9194     Known = KnownBits::commonBits(Known, Known2);
9195     break;
9196   }
9197   case RISCVISD::REMUW: {
9198     KnownBits Known2;
9199     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9200     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9201     // We only care about the lower 32 bits.
9202     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9203     // Restore the original width by sign extending.
9204     Known = Known.sext(BitWidth);
9205     break;
9206   }
9207   case RISCVISD::DIVUW: {
9208     KnownBits Known2;
9209     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9210     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9211     // We only care about the lower 32 bits.
9212     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9213     // Restore the original width by sign extending.
9214     Known = Known.sext(BitWidth);
9215     break;
9216   }
9217   case RISCVISD::CTZW: {
9218     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9219     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9220     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9221     Known.Zero.setBitsFrom(LowBits);
9222     break;
9223   }
9224   case RISCVISD::CLZW: {
9225     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9226     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9227     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9228     Known.Zero.setBitsFrom(LowBits);
9229     break;
9230   }
9231   case RISCVISD::GREV:
9232   case RISCVISD::GORC: {
9233     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9234       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9235       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9236       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9237       // To compute zeros, we need to invert the value and invert it back after.
9238       Known.Zero =
9239           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9240       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9241     }
9242     break;
9243   }
9244   case RISCVISD::READ_VLENB: {
9245     // If we know the minimum VLen from Zvl extensions, we can use that to
9246     // determine the trailing zeros of VLENB.
9247     // FIXME: Limit to 128 bit vectors until we have more testing.
9248     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
9249     if (MinVLenB > 0)
9250       Known.Zero.setLowBits(Log2_32(MinVLenB));
9251     // We assume VLENB is no more than 65536 / 8 bytes.
9252     Known.Zero.setBitsFrom(14);
9253     break;
9254   }
9255   case ISD::INTRINSIC_W_CHAIN:
9256   case ISD::INTRINSIC_WO_CHAIN: {
9257     unsigned IntNo =
9258         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9259     switch (IntNo) {
9260     default:
9261       // We can't do anything for most intrinsics.
9262       break;
9263     case Intrinsic::riscv_vsetvli:
9264     case Intrinsic::riscv_vsetvlimax:
9265     case Intrinsic::riscv_vsetvli_opt:
9266     case Intrinsic::riscv_vsetvlimax_opt:
9267       // Assume that VL output is positive and would fit in an int32_t.
9268       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9269       if (BitWidth >= 32)
9270         Known.Zero.setBitsFrom(31);
9271       break;
9272     }
9273     break;
9274   }
9275   }
9276 }
9277 
9278 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9279     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9280     unsigned Depth) const {
9281   switch (Op.getOpcode()) {
9282   default:
9283     break;
9284   case RISCVISD::SELECT_CC: {
9285     unsigned Tmp =
9286         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9287     if (Tmp == 1) return 1;  // Early out.
9288     unsigned Tmp2 =
9289         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9290     return std::min(Tmp, Tmp2);
9291   }
9292   case RISCVISD::SLLW:
9293   case RISCVISD::SRAW:
9294   case RISCVISD::SRLW:
9295   case RISCVISD::DIVW:
9296   case RISCVISD::DIVUW:
9297   case RISCVISD::REMUW:
9298   case RISCVISD::ROLW:
9299   case RISCVISD::RORW:
9300   case RISCVISD::GREVW:
9301   case RISCVISD::GORCW:
9302   case RISCVISD::FSLW:
9303   case RISCVISD::FSRW:
9304   case RISCVISD::SHFLW:
9305   case RISCVISD::UNSHFLW:
9306   case RISCVISD::BCOMPRESSW:
9307   case RISCVISD::BDECOMPRESSW:
9308   case RISCVISD::BFPW:
9309   case RISCVISD::FCVT_W_RV64:
9310   case RISCVISD::FCVT_WU_RV64:
9311   case RISCVISD::STRICT_FCVT_W_RV64:
9312   case RISCVISD::STRICT_FCVT_WU_RV64:
9313     // TODO: As the result is sign-extended, this is conservatively correct. A
9314     // more precise answer could be calculated for SRAW depending on known
9315     // bits in the shift amount.
9316     return 33;
9317   case RISCVISD::SHFL:
9318   case RISCVISD::UNSHFL: {
9319     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9320     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9321     // will stay within the upper 32 bits. If there were more than 32 sign bits
9322     // before there will be at least 33 sign bits after.
9323     if (Op.getValueType() == MVT::i64 &&
9324         isa<ConstantSDNode>(Op.getOperand(1)) &&
9325         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9326       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9327       if (Tmp > 32)
9328         return 33;
9329     }
9330     break;
9331   }
9332   case RISCVISD::VMV_X_S: {
9333     // The number of sign bits of the scalar result is computed by obtaining the
9334     // element type of the input vector operand, subtracting its width from the
9335     // XLEN, and then adding one (sign bit within the element type). If the
9336     // element type is wider than XLen, the least-significant XLEN bits are
9337     // taken.
9338     unsigned XLen = Subtarget.getXLen();
9339     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9340     if (EltBits <= XLen)
9341       return XLen - EltBits + 1;
9342     break;
9343   }
9344   }
9345 
9346   return 1;
9347 }
9348 
9349 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9350                                                   MachineBasicBlock *BB) {
9351   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9352 
9353   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9354   // Should the count have wrapped while it was being read, we need to try
9355   // again.
9356   // ...
9357   // read:
9358   // rdcycleh x3 # load high word of cycle
9359   // rdcycle  x2 # load low word of cycle
9360   // rdcycleh x4 # load high word of cycle
9361   // bne x3, x4, read # check if high word reads match, otherwise try again
9362   // ...
9363 
9364   MachineFunction &MF = *BB->getParent();
9365   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9366   MachineFunction::iterator It = ++BB->getIterator();
9367 
9368   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9369   MF.insert(It, LoopMBB);
9370 
9371   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9372   MF.insert(It, DoneMBB);
9373 
9374   // Transfer the remainder of BB and its successor edges to DoneMBB.
9375   DoneMBB->splice(DoneMBB->begin(), BB,
9376                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9377   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9378 
9379   BB->addSuccessor(LoopMBB);
9380 
9381   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9382   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9383   Register LoReg = MI.getOperand(0).getReg();
9384   Register HiReg = MI.getOperand(1).getReg();
9385   DebugLoc DL = MI.getDebugLoc();
9386 
9387   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9388   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9389       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9390       .addReg(RISCV::X0);
9391   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9392       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9393       .addReg(RISCV::X0);
9394   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9395       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9396       .addReg(RISCV::X0);
9397 
9398   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9399       .addReg(HiReg)
9400       .addReg(ReadAgainReg)
9401       .addMBB(LoopMBB);
9402 
9403   LoopMBB->addSuccessor(LoopMBB);
9404   LoopMBB->addSuccessor(DoneMBB);
9405 
9406   MI.eraseFromParent();
9407 
9408   return DoneMBB;
9409 }
9410 
9411 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9412                                              MachineBasicBlock *BB) {
9413   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9414 
9415   MachineFunction &MF = *BB->getParent();
9416   DebugLoc DL = MI.getDebugLoc();
9417   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9418   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9419   Register LoReg = MI.getOperand(0).getReg();
9420   Register HiReg = MI.getOperand(1).getReg();
9421   Register SrcReg = MI.getOperand(2).getReg();
9422   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9423   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9424 
9425   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9426                           RI);
9427   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9428   MachineMemOperand *MMOLo =
9429       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9430   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9431       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9432   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9433       .addFrameIndex(FI)
9434       .addImm(0)
9435       .addMemOperand(MMOLo);
9436   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9437       .addFrameIndex(FI)
9438       .addImm(4)
9439       .addMemOperand(MMOHi);
9440   MI.eraseFromParent(); // The pseudo instruction is gone now.
9441   return BB;
9442 }
9443 
9444 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9445                                                  MachineBasicBlock *BB) {
9446   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9447          "Unexpected instruction");
9448 
9449   MachineFunction &MF = *BB->getParent();
9450   DebugLoc DL = MI.getDebugLoc();
9451   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9452   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9453   Register DstReg = MI.getOperand(0).getReg();
9454   Register LoReg = MI.getOperand(1).getReg();
9455   Register HiReg = MI.getOperand(2).getReg();
9456   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9457   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9458 
9459   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9460   MachineMemOperand *MMOLo =
9461       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9462   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9463       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9464   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9465       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9466       .addFrameIndex(FI)
9467       .addImm(0)
9468       .addMemOperand(MMOLo);
9469   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9470       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9471       .addFrameIndex(FI)
9472       .addImm(4)
9473       .addMemOperand(MMOHi);
9474   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9475   MI.eraseFromParent(); // The pseudo instruction is gone now.
9476   return BB;
9477 }
9478 
9479 static bool isSelectPseudo(MachineInstr &MI) {
9480   switch (MI.getOpcode()) {
9481   default:
9482     return false;
9483   case RISCV::Select_GPR_Using_CC_GPR:
9484   case RISCV::Select_FPR16_Using_CC_GPR:
9485   case RISCV::Select_FPR32_Using_CC_GPR:
9486   case RISCV::Select_FPR64_Using_CC_GPR:
9487     return true;
9488   }
9489 }
9490 
9491 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9492                                         unsigned RelOpcode, unsigned EqOpcode,
9493                                         const RISCVSubtarget &Subtarget) {
9494   DebugLoc DL = MI.getDebugLoc();
9495   Register DstReg = MI.getOperand(0).getReg();
9496   Register Src1Reg = MI.getOperand(1).getReg();
9497   Register Src2Reg = MI.getOperand(2).getReg();
9498   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9499   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9500   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9501 
9502   // Save the current FFLAGS.
9503   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9504 
9505   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9506                  .addReg(Src1Reg)
9507                  .addReg(Src2Reg);
9508   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9509     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9510 
9511   // Restore the FFLAGS.
9512   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9513       .addReg(SavedFFlags, RegState::Kill);
9514 
9515   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9516   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9517                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9518                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9519   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9520     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9521 
9522   // Erase the pseudoinstruction.
9523   MI.eraseFromParent();
9524   return BB;
9525 }
9526 
9527 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9528                                            MachineBasicBlock *BB,
9529                                            const RISCVSubtarget &Subtarget) {
9530   // To "insert" Select_* instructions, we actually have to insert the triangle
9531   // control-flow pattern.  The incoming instructions know the destination vreg
9532   // to set, the condition code register to branch on, the true/false values to
9533   // select between, and the condcode to use to select the appropriate branch.
9534   //
9535   // We produce the following control flow:
9536   //     HeadMBB
9537   //     |  \
9538   //     |  IfFalseMBB
9539   //     | /
9540   //    TailMBB
9541   //
9542   // When we find a sequence of selects we attempt to optimize their emission
9543   // by sharing the control flow. Currently we only handle cases where we have
9544   // multiple selects with the exact same condition (same LHS, RHS and CC).
9545   // The selects may be interleaved with other instructions if the other
9546   // instructions meet some requirements we deem safe:
9547   // - They are debug instructions. Otherwise,
9548   // - They do not have side-effects, do not access memory and their inputs do
9549   //   not depend on the results of the select pseudo-instructions.
9550   // The TrueV/FalseV operands of the selects cannot depend on the result of
9551   // previous selects in the sequence.
9552   // These conditions could be further relaxed. See the X86 target for a
9553   // related approach and more information.
9554   Register LHS = MI.getOperand(1).getReg();
9555   Register RHS = MI.getOperand(2).getReg();
9556   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9557 
9558   SmallVector<MachineInstr *, 4> SelectDebugValues;
9559   SmallSet<Register, 4> SelectDests;
9560   SelectDests.insert(MI.getOperand(0).getReg());
9561 
9562   MachineInstr *LastSelectPseudo = &MI;
9563 
9564   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9565        SequenceMBBI != E; ++SequenceMBBI) {
9566     if (SequenceMBBI->isDebugInstr())
9567       continue;
9568     if (isSelectPseudo(*SequenceMBBI)) {
9569       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9570           SequenceMBBI->getOperand(2).getReg() != RHS ||
9571           SequenceMBBI->getOperand(3).getImm() != CC ||
9572           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9573           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9574         break;
9575       LastSelectPseudo = &*SequenceMBBI;
9576       SequenceMBBI->collectDebugValues(SelectDebugValues);
9577       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9578     } else {
9579       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9580           SequenceMBBI->mayLoadOrStore())
9581         break;
9582       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9583             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9584           }))
9585         break;
9586     }
9587   }
9588 
9589   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9590   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9591   DebugLoc DL = MI.getDebugLoc();
9592   MachineFunction::iterator I = ++BB->getIterator();
9593 
9594   MachineBasicBlock *HeadMBB = BB;
9595   MachineFunction *F = BB->getParent();
9596   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9597   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9598 
9599   F->insert(I, IfFalseMBB);
9600   F->insert(I, TailMBB);
9601 
9602   // Transfer debug instructions associated with the selects to TailMBB.
9603   for (MachineInstr *DebugInstr : SelectDebugValues) {
9604     TailMBB->push_back(DebugInstr->removeFromParent());
9605   }
9606 
9607   // Move all instructions after the sequence to TailMBB.
9608   TailMBB->splice(TailMBB->end(), HeadMBB,
9609                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9610   // Update machine-CFG edges by transferring all successors of the current
9611   // block to the new block which will contain the Phi nodes for the selects.
9612   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9613   // Set the successors for HeadMBB.
9614   HeadMBB->addSuccessor(IfFalseMBB);
9615   HeadMBB->addSuccessor(TailMBB);
9616 
9617   // Insert appropriate branch.
9618   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9619     .addReg(LHS)
9620     .addReg(RHS)
9621     .addMBB(TailMBB);
9622 
9623   // IfFalseMBB just falls through to TailMBB.
9624   IfFalseMBB->addSuccessor(TailMBB);
9625 
9626   // Create PHIs for all of the select pseudo-instructions.
9627   auto SelectMBBI = MI.getIterator();
9628   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9629   auto InsertionPoint = TailMBB->begin();
9630   while (SelectMBBI != SelectEnd) {
9631     auto Next = std::next(SelectMBBI);
9632     if (isSelectPseudo(*SelectMBBI)) {
9633       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9634       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9635               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9636           .addReg(SelectMBBI->getOperand(4).getReg())
9637           .addMBB(HeadMBB)
9638           .addReg(SelectMBBI->getOperand(5).getReg())
9639           .addMBB(IfFalseMBB);
9640       SelectMBBI->eraseFromParent();
9641     }
9642     SelectMBBI = Next;
9643   }
9644 
9645   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9646   return TailMBB;
9647 }
9648 
9649 MachineBasicBlock *
9650 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9651                                                  MachineBasicBlock *BB) const {
9652   switch (MI.getOpcode()) {
9653   default:
9654     llvm_unreachable("Unexpected instr type to insert");
9655   case RISCV::ReadCycleWide:
9656     assert(!Subtarget.is64Bit() &&
9657            "ReadCycleWrite is only to be used on riscv32");
9658     return emitReadCycleWidePseudo(MI, BB);
9659   case RISCV::Select_GPR_Using_CC_GPR:
9660   case RISCV::Select_FPR16_Using_CC_GPR:
9661   case RISCV::Select_FPR32_Using_CC_GPR:
9662   case RISCV::Select_FPR64_Using_CC_GPR:
9663     return emitSelectPseudo(MI, BB, Subtarget);
9664   case RISCV::BuildPairF64Pseudo:
9665     return emitBuildPairF64Pseudo(MI, BB);
9666   case RISCV::SplitF64Pseudo:
9667     return emitSplitF64Pseudo(MI, BB);
9668   case RISCV::PseudoQuietFLE_H:
9669     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9670   case RISCV::PseudoQuietFLT_H:
9671     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9672   case RISCV::PseudoQuietFLE_S:
9673     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9674   case RISCV::PseudoQuietFLT_S:
9675     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9676   case RISCV::PseudoQuietFLE_D:
9677     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9678   case RISCV::PseudoQuietFLT_D:
9679     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9680   }
9681 }
9682 
9683 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9684                                                         SDNode *Node) const {
9685   // Add FRM dependency to any instructions with dynamic rounding mode.
9686   unsigned Opc = MI.getOpcode();
9687   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9688   if (Idx < 0)
9689     return;
9690   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9691     return;
9692   // If the instruction already reads FRM, don't add another read.
9693   if (MI.readsRegister(RISCV::FRM))
9694     return;
9695   MI.addOperand(
9696       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9697 }
9698 
9699 // Calling Convention Implementation.
9700 // The expectations for frontend ABI lowering vary from target to target.
9701 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9702 // details, but this is a longer term goal. For now, we simply try to keep the
9703 // role of the frontend as simple and well-defined as possible. The rules can
9704 // be summarised as:
9705 // * Never split up large scalar arguments. We handle them here.
9706 // * If a hardfloat calling convention is being used, and the struct may be
9707 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9708 // available, then pass as two separate arguments. If either the GPRs or FPRs
9709 // are exhausted, then pass according to the rule below.
9710 // * If a struct could never be passed in registers or directly in a stack
9711 // slot (as it is larger than 2*XLEN and the floating point rules don't
9712 // apply), then pass it using a pointer with the byval attribute.
9713 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9714 // word-sized array or a 2*XLEN scalar (depending on alignment).
9715 // * The frontend can determine whether a struct is returned by reference or
9716 // not based on its size and fields. If it will be returned by reference, the
9717 // frontend must modify the prototype so a pointer with the sret annotation is
9718 // passed as the first argument. This is not necessary for large scalar
9719 // returns.
9720 // * Struct return values and varargs should be coerced to structs containing
9721 // register-size fields in the same situations they would be for fixed
9722 // arguments.
9723 
9724 static const MCPhysReg ArgGPRs[] = {
9725   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9726   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9727 };
9728 static const MCPhysReg ArgFPR16s[] = {
9729   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9730   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9731 };
9732 static const MCPhysReg ArgFPR32s[] = {
9733   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9734   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9735 };
9736 static const MCPhysReg ArgFPR64s[] = {
9737   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9738   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9739 };
9740 // This is an interim calling convention and it may be changed in the future.
9741 static const MCPhysReg ArgVRs[] = {
9742     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9743     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9744     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9745 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9746                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9747                                      RISCV::V20M2, RISCV::V22M2};
9748 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9749                                      RISCV::V20M4};
9750 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9751 
9752 // Pass a 2*XLEN argument that has been split into two XLEN values through
9753 // registers or the stack as necessary.
9754 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9755                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9756                                 MVT ValVT2, MVT LocVT2,
9757                                 ISD::ArgFlagsTy ArgFlags2) {
9758   unsigned XLenInBytes = XLen / 8;
9759   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9760     // At least one half can be passed via register.
9761     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9762                                      VA1.getLocVT(), CCValAssign::Full));
9763   } else {
9764     // Both halves must be passed on the stack, with proper alignment.
9765     Align StackAlign =
9766         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9767     State.addLoc(
9768         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9769                             State.AllocateStack(XLenInBytes, StackAlign),
9770                             VA1.getLocVT(), CCValAssign::Full));
9771     State.addLoc(CCValAssign::getMem(
9772         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9773         LocVT2, CCValAssign::Full));
9774     return false;
9775   }
9776 
9777   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9778     // The second half can also be passed via register.
9779     State.addLoc(
9780         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9781   } else {
9782     // The second half is passed via the stack, without additional alignment.
9783     State.addLoc(CCValAssign::getMem(
9784         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9785         LocVT2, CCValAssign::Full));
9786   }
9787 
9788   return false;
9789 }
9790 
9791 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9792                                Optional<unsigned> FirstMaskArgument,
9793                                CCState &State, const RISCVTargetLowering &TLI) {
9794   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9795   if (RC == &RISCV::VRRegClass) {
9796     // Assign the first mask argument to V0.
9797     // This is an interim calling convention and it may be changed in the
9798     // future.
9799     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9800       return State.AllocateReg(RISCV::V0);
9801     return State.AllocateReg(ArgVRs);
9802   }
9803   if (RC == &RISCV::VRM2RegClass)
9804     return State.AllocateReg(ArgVRM2s);
9805   if (RC == &RISCV::VRM4RegClass)
9806     return State.AllocateReg(ArgVRM4s);
9807   if (RC == &RISCV::VRM8RegClass)
9808     return State.AllocateReg(ArgVRM8s);
9809   llvm_unreachable("Unhandled register class for ValueType");
9810 }
9811 
9812 // Implements the RISC-V calling convention. Returns true upon failure.
9813 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9814                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9815                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9816                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9817                      Optional<unsigned> FirstMaskArgument) {
9818   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9819   assert(XLen == 32 || XLen == 64);
9820   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9821 
9822   // Any return value split in to more than two values can't be returned
9823   // directly. Vectors are returned via the available vector registers.
9824   if (!LocVT.isVector() && IsRet && ValNo > 1)
9825     return true;
9826 
9827   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9828   // variadic argument, or if no F16/F32 argument registers are available.
9829   bool UseGPRForF16_F32 = true;
9830   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9831   // variadic argument, or if no F64 argument registers are available.
9832   bool UseGPRForF64 = true;
9833 
9834   switch (ABI) {
9835   default:
9836     llvm_unreachable("Unexpected ABI");
9837   case RISCVABI::ABI_ILP32:
9838   case RISCVABI::ABI_LP64:
9839     break;
9840   case RISCVABI::ABI_ILP32F:
9841   case RISCVABI::ABI_LP64F:
9842     UseGPRForF16_F32 = !IsFixed;
9843     break;
9844   case RISCVABI::ABI_ILP32D:
9845   case RISCVABI::ABI_LP64D:
9846     UseGPRForF16_F32 = !IsFixed;
9847     UseGPRForF64 = !IsFixed;
9848     break;
9849   }
9850 
9851   // FPR16, FPR32, and FPR64 alias each other.
9852   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9853     UseGPRForF16_F32 = true;
9854     UseGPRForF64 = true;
9855   }
9856 
9857   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9858   // similar local variables rather than directly checking against the target
9859   // ABI.
9860 
9861   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9862     LocVT = XLenVT;
9863     LocInfo = CCValAssign::BCvt;
9864   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9865     LocVT = MVT::i64;
9866     LocInfo = CCValAssign::BCvt;
9867   }
9868 
9869   // If this is a variadic argument, the RISC-V calling convention requires
9870   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9871   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9872   // be used regardless of whether the original argument was split during
9873   // legalisation or not. The argument will not be passed by registers if the
9874   // original type is larger than 2*XLEN, so the register alignment rule does
9875   // not apply.
9876   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9877   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9878       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9879     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9880     // Skip 'odd' register if necessary.
9881     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9882       State.AllocateReg(ArgGPRs);
9883   }
9884 
9885   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9886   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9887       State.getPendingArgFlags();
9888 
9889   assert(PendingLocs.size() == PendingArgFlags.size() &&
9890          "PendingLocs and PendingArgFlags out of sync");
9891 
9892   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9893   // registers are exhausted.
9894   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9895     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9896            "Can't lower f64 if it is split");
9897     // Depending on available argument GPRS, f64 may be passed in a pair of
9898     // GPRs, split between a GPR and the stack, or passed completely on the
9899     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9900     // cases.
9901     Register Reg = State.AllocateReg(ArgGPRs);
9902     LocVT = MVT::i32;
9903     if (!Reg) {
9904       unsigned StackOffset = State.AllocateStack(8, Align(8));
9905       State.addLoc(
9906           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9907       return false;
9908     }
9909     if (!State.AllocateReg(ArgGPRs))
9910       State.AllocateStack(4, Align(4));
9911     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9912     return false;
9913   }
9914 
9915   // Fixed-length vectors are located in the corresponding scalable-vector
9916   // container types.
9917   if (ValVT.isFixedLengthVector())
9918     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9919 
9920   // Split arguments might be passed indirectly, so keep track of the pending
9921   // values. Split vectors are passed via a mix of registers and indirectly, so
9922   // treat them as we would any other argument.
9923   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9924     LocVT = XLenVT;
9925     LocInfo = CCValAssign::Indirect;
9926     PendingLocs.push_back(
9927         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9928     PendingArgFlags.push_back(ArgFlags);
9929     if (!ArgFlags.isSplitEnd()) {
9930       return false;
9931     }
9932   }
9933 
9934   // If the split argument only had two elements, it should be passed directly
9935   // in registers or on the stack.
9936   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9937       PendingLocs.size() <= 2) {
9938     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9939     // Apply the normal calling convention rules to the first half of the
9940     // split argument.
9941     CCValAssign VA = PendingLocs[0];
9942     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9943     PendingLocs.clear();
9944     PendingArgFlags.clear();
9945     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9946                                ArgFlags);
9947   }
9948 
9949   // Allocate to a register if possible, or else a stack slot.
9950   Register Reg;
9951   unsigned StoreSizeBytes = XLen / 8;
9952   Align StackAlign = Align(XLen / 8);
9953 
9954   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9955     Reg = State.AllocateReg(ArgFPR16s);
9956   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9957     Reg = State.AllocateReg(ArgFPR32s);
9958   else if (ValVT == MVT::f64 && !UseGPRForF64)
9959     Reg = State.AllocateReg(ArgFPR64s);
9960   else if (ValVT.isVector()) {
9961     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9962     if (!Reg) {
9963       // For return values, the vector must be passed fully via registers or
9964       // via the stack.
9965       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9966       // but we're using all of them.
9967       if (IsRet)
9968         return true;
9969       // Try using a GPR to pass the address
9970       if ((Reg = State.AllocateReg(ArgGPRs))) {
9971         LocVT = XLenVT;
9972         LocInfo = CCValAssign::Indirect;
9973       } else if (ValVT.isScalableVector()) {
9974         LocVT = XLenVT;
9975         LocInfo = CCValAssign::Indirect;
9976       } else {
9977         // Pass fixed-length vectors on the stack.
9978         LocVT = ValVT;
9979         StoreSizeBytes = ValVT.getStoreSize();
9980         // Align vectors to their element sizes, being careful for vXi1
9981         // vectors.
9982         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9983       }
9984     }
9985   } else {
9986     Reg = State.AllocateReg(ArgGPRs);
9987   }
9988 
9989   unsigned StackOffset =
9990       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9991 
9992   // If we reach this point and PendingLocs is non-empty, we must be at the
9993   // end of a split argument that must be passed indirectly.
9994   if (!PendingLocs.empty()) {
9995     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9996     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9997 
9998     for (auto &It : PendingLocs) {
9999       if (Reg)
10000         It.convertToReg(Reg);
10001       else
10002         It.convertToMem(StackOffset);
10003       State.addLoc(It);
10004     }
10005     PendingLocs.clear();
10006     PendingArgFlags.clear();
10007     return false;
10008   }
10009 
10010   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
10011           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
10012          "Expected an XLenVT or vector types at this stage");
10013 
10014   if (Reg) {
10015     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10016     return false;
10017   }
10018 
10019   // When a floating-point value is passed on the stack, no bit-conversion is
10020   // needed.
10021   if (ValVT.isFloatingPoint()) {
10022     LocVT = ValVT;
10023     LocInfo = CCValAssign::Full;
10024   }
10025   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10026   return false;
10027 }
10028 
10029 template <typename ArgTy>
10030 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
10031   for (const auto &ArgIdx : enumerate(Args)) {
10032     MVT ArgVT = ArgIdx.value().VT;
10033     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
10034       return ArgIdx.index();
10035   }
10036   return None;
10037 }
10038 
10039 void RISCVTargetLowering::analyzeInputArgs(
10040     MachineFunction &MF, CCState &CCInfo,
10041     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
10042     RISCVCCAssignFn Fn) const {
10043   unsigned NumArgs = Ins.size();
10044   FunctionType *FType = MF.getFunction().getFunctionType();
10045 
10046   Optional<unsigned> FirstMaskArgument;
10047   if (Subtarget.hasVInstructions())
10048     FirstMaskArgument = preAssignMask(Ins);
10049 
10050   for (unsigned i = 0; i != NumArgs; ++i) {
10051     MVT ArgVT = Ins[i].VT;
10052     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
10053 
10054     Type *ArgTy = nullptr;
10055     if (IsRet)
10056       ArgTy = FType->getReturnType();
10057     else if (Ins[i].isOrigArg())
10058       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
10059 
10060     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10061     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10062            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
10063            FirstMaskArgument)) {
10064       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
10065                         << EVT(ArgVT).getEVTString() << '\n');
10066       llvm_unreachable(nullptr);
10067     }
10068   }
10069 }
10070 
10071 void RISCVTargetLowering::analyzeOutputArgs(
10072     MachineFunction &MF, CCState &CCInfo,
10073     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
10074     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
10075   unsigned NumArgs = Outs.size();
10076 
10077   Optional<unsigned> FirstMaskArgument;
10078   if (Subtarget.hasVInstructions())
10079     FirstMaskArgument = preAssignMask(Outs);
10080 
10081   for (unsigned i = 0; i != NumArgs; i++) {
10082     MVT ArgVT = Outs[i].VT;
10083     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10084     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
10085 
10086     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10087     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10088            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
10089            FirstMaskArgument)) {
10090       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
10091                         << EVT(ArgVT).getEVTString() << "\n");
10092       llvm_unreachable(nullptr);
10093     }
10094   }
10095 }
10096 
10097 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
10098 // values.
10099 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
10100                                    const CCValAssign &VA, const SDLoc &DL,
10101                                    const RISCVSubtarget &Subtarget) {
10102   switch (VA.getLocInfo()) {
10103   default:
10104     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10105   case CCValAssign::Full:
10106     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
10107       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
10108     break;
10109   case CCValAssign::BCvt:
10110     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10111       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
10112     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10113       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
10114     else
10115       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
10116     break;
10117   }
10118   return Val;
10119 }
10120 
10121 // The caller is responsible for loading the full value if the argument is
10122 // passed with CCValAssign::Indirect.
10123 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
10124                                 const CCValAssign &VA, const SDLoc &DL,
10125                                 const RISCVTargetLowering &TLI) {
10126   MachineFunction &MF = DAG.getMachineFunction();
10127   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10128   EVT LocVT = VA.getLocVT();
10129   SDValue Val;
10130   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
10131   Register VReg = RegInfo.createVirtualRegister(RC);
10132   RegInfo.addLiveIn(VA.getLocReg(), VReg);
10133   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
10134 
10135   if (VA.getLocInfo() == CCValAssign::Indirect)
10136     return Val;
10137 
10138   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
10139 }
10140 
10141 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
10142                                    const CCValAssign &VA, const SDLoc &DL,
10143                                    const RISCVSubtarget &Subtarget) {
10144   EVT LocVT = VA.getLocVT();
10145 
10146   switch (VA.getLocInfo()) {
10147   default:
10148     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10149   case CCValAssign::Full:
10150     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
10151       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
10152     break;
10153   case CCValAssign::BCvt:
10154     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10155       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10156     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10157       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10158     else
10159       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10160     break;
10161   }
10162   return Val;
10163 }
10164 
10165 // The caller is responsible for loading the full value if the argument is
10166 // passed with CCValAssign::Indirect.
10167 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10168                                 const CCValAssign &VA, const SDLoc &DL) {
10169   MachineFunction &MF = DAG.getMachineFunction();
10170   MachineFrameInfo &MFI = MF.getFrameInfo();
10171   EVT LocVT = VA.getLocVT();
10172   EVT ValVT = VA.getValVT();
10173   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10174   if (ValVT.isScalableVector()) {
10175     // When the value is a scalable vector, we save the pointer which points to
10176     // the scalable vector value in the stack. The ValVT will be the pointer
10177     // type, instead of the scalable vector type.
10178     ValVT = LocVT;
10179   }
10180   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10181                                  /*IsImmutable=*/true);
10182   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10183   SDValue Val;
10184 
10185   ISD::LoadExtType ExtType;
10186   switch (VA.getLocInfo()) {
10187   default:
10188     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10189   case CCValAssign::Full:
10190   case CCValAssign::Indirect:
10191   case CCValAssign::BCvt:
10192     ExtType = ISD::NON_EXTLOAD;
10193     break;
10194   }
10195   Val = DAG.getExtLoad(
10196       ExtType, DL, LocVT, Chain, FIN,
10197       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10198   return Val;
10199 }
10200 
10201 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10202                                        const CCValAssign &VA, const SDLoc &DL) {
10203   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10204          "Unexpected VA");
10205   MachineFunction &MF = DAG.getMachineFunction();
10206   MachineFrameInfo &MFI = MF.getFrameInfo();
10207   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10208 
10209   if (VA.isMemLoc()) {
10210     // f64 is passed on the stack.
10211     int FI =
10212         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10213     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10214     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10215                        MachinePointerInfo::getFixedStack(MF, FI));
10216   }
10217 
10218   assert(VA.isRegLoc() && "Expected register VA assignment");
10219 
10220   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10221   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10222   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10223   SDValue Hi;
10224   if (VA.getLocReg() == RISCV::X17) {
10225     // Second half of f64 is passed on the stack.
10226     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10227     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10228     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10229                      MachinePointerInfo::getFixedStack(MF, FI));
10230   } else {
10231     // Second half of f64 is passed in another GPR.
10232     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10233     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10234     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10235   }
10236   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10237 }
10238 
10239 // FastCC has less than 1% performance improvement for some particular
10240 // benchmark. But theoretically, it may has benenfit for some cases.
10241 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10242                             unsigned ValNo, MVT ValVT, MVT LocVT,
10243                             CCValAssign::LocInfo LocInfo,
10244                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10245                             bool IsFixed, bool IsRet, Type *OrigTy,
10246                             const RISCVTargetLowering &TLI,
10247                             Optional<unsigned> FirstMaskArgument) {
10248 
10249   // X5 and X6 might be used for save-restore libcall.
10250   static const MCPhysReg GPRList[] = {
10251       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10252       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10253       RISCV::X29, RISCV::X30, RISCV::X31};
10254 
10255   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10256     if (unsigned Reg = State.AllocateReg(GPRList)) {
10257       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10258       return false;
10259     }
10260   }
10261 
10262   if (LocVT == MVT::f16) {
10263     static const MCPhysReg FPR16List[] = {
10264         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10265         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10266         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10267         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10268     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10269       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10270       return false;
10271     }
10272   }
10273 
10274   if (LocVT == MVT::f32) {
10275     static const MCPhysReg FPR32List[] = {
10276         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10277         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10278         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10279         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10280     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10281       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10282       return false;
10283     }
10284   }
10285 
10286   if (LocVT == MVT::f64) {
10287     static const MCPhysReg FPR64List[] = {
10288         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10289         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10290         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10291         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10292     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10293       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10294       return false;
10295     }
10296   }
10297 
10298   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10299     unsigned Offset4 = State.AllocateStack(4, Align(4));
10300     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10301     return false;
10302   }
10303 
10304   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10305     unsigned Offset5 = State.AllocateStack(8, Align(8));
10306     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10307     return false;
10308   }
10309 
10310   if (LocVT.isVector()) {
10311     if (unsigned Reg =
10312             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10313       // Fixed-length vectors are located in the corresponding scalable-vector
10314       // container types.
10315       if (ValVT.isFixedLengthVector())
10316         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10317       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10318     } else {
10319       // Try and pass the address via a "fast" GPR.
10320       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10321         LocInfo = CCValAssign::Indirect;
10322         LocVT = TLI.getSubtarget().getXLenVT();
10323         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10324       } else if (ValVT.isFixedLengthVector()) {
10325         auto StackAlign =
10326             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10327         unsigned StackOffset =
10328             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10329         State.addLoc(
10330             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10331       } else {
10332         // Can't pass scalable vectors on the stack.
10333         return true;
10334       }
10335     }
10336 
10337     return false;
10338   }
10339 
10340   return true; // CC didn't match.
10341 }
10342 
10343 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10344                          CCValAssign::LocInfo LocInfo,
10345                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10346 
10347   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10348     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10349     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10350     static const MCPhysReg GPRList[] = {
10351         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10352         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10353     if (unsigned Reg = State.AllocateReg(GPRList)) {
10354       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10355       return false;
10356     }
10357   }
10358 
10359   if (LocVT == MVT::f32) {
10360     // Pass in STG registers: F1, ..., F6
10361     //                        fs0 ... fs5
10362     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10363                                           RISCV::F18_F, RISCV::F19_F,
10364                                           RISCV::F20_F, RISCV::F21_F};
10365     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10366       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10367       return false;
10368     }
10369   }
10370 
10371   if (LocVT == MVT::f64) {
10372     // Pass in STG registers: D1, ..., D6
10373     //                        fs6 ... fs11
10374     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10375                                           RISCV::F24_D, RISCV::F25_D,
10376                                           RISCV::F26_D, RISCV::F27_D};
10377     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10378       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10379       return false;
10380     }
10381   }
10382 
10383   report_fatal_error("No registers left in GHC calling convention");
10384   return true;
10385 }
10386 
10387 // Transform physical registers into virtual registers.
10388 SDValue RISCVTargetLowering::LowerFormalArguments(
10389     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10390     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10391     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10392 
10393   MachineFunction &MF = DAG.getMachineFunction();
10394 
10395   switch (CallConv) {
10396   default:
10397     report_fatal_error("Unsupported calling convention");
10398   case CallingConv::C:
10399   case CallingConv::Fast:
10400     break;
10401   case CallingConv::GHC:
10402     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10403         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10404       report_fatal_error(
10405         "GHC calling convention requires the F and D instruction set extensions");
10406   }
10407 
10408   const Function &Func = MF.getFunction();
10409   if (Func.hasFnAttribute("interrupt")) {
10410     if (!Func.arg_empty())
10411       report_fatal_error(
10412         "Functions with the interrupt attribute cannot have arguments!");
10413 
10414     StringRef Kind =
10415       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10416 
10417     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10418       report_fatal_error(
10419         "Function interrupt attribute argument not supported!");
10420   }
10421 
10422   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10423   MVT XLenVT = Subtarget.getXLenVT();
10424   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10425   // Used with vargs to acumulate store chains.
10426   std::vector<SDValue> OutChains;
10427 
10428   // Assign locations to all of the incoming arguments.
10429   SmallVector<CCValAssign, 16> ArgLocs;
10430   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10431 
10432   if (CallConv == CallingConv::GHC)
10433     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10434   else
10435     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10436                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10437                                                    : CC_RISCV);
10438 
10439   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10440     CCValAssign &VA = ArgLocs[i];
10441     SDValue ArgValue;
10442     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10443     // case.
10444     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10445       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10446     else if (VA.isRegLoc())
10447       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10448     else
10449       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10450 
10451     if (VA.getLocInfo() == CCValAssign::Indirect) {
10452       // If the original argument was split and passed by reference (e.g. i128
10453       // on RV32), we need to load all parts of it here (using the same
10454       // address). Vectors may be partly split to registers and partly to the
10455       // stack, in which case the base address is partly offset and subsequent
10456       // stores are relative to that.
10457       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10458                                    MachinePointerInfo()));
10459       unsigned ArgIndex = Ins[i].OrigArgIndex;
10460       unsigned ArgPartOffset = Ins[i].PartOffset;
10461       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10462       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10463         CCValAssign &PartVA = ArgLocs[i + 1];
10464         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10465         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10466         if (PartVA.getValVT().isScalableVector())
10467           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10468         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10469         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10470                                      MachinePointerInfo()));
10471         ++i;
10472       }
10473       continue;
10474     }
10475     InVals.push_back(ArgValue);
10476   }
10477 
10478   if (IsVarArg) {
10479     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10480     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10481     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10482     MachineFrameInfo &MFI = MF.getFrameInfo();
10483     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10484     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10485 
10486     // Offset of the first variable argument from stack pointer, and size of
10487     // the vararg save area. For now, the varargs save area is either zero or
10488     // large enough to hold a0-a7.
10489     int VaArgOffset, VarArgsSaveSize;
10490 
10491     // If all registers are allocated, then all varargs must be passed on the
10492     // stack and we don't need to save any argregs.
10493     if (ArgRegs.size() == Idx) {
10494       VaArgOffset = CCInfo.getNextStackOffset();
10495       VarArgsSaveSize = 0;
10496     } else {
10497       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10498       VaArgOffset = -VarArgsSaveSize;
10499     }
10500 
10501     // Record the frame index of the first variable argument
10502     // which is a value necessary to VASTART.
10503     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10504     RVFI->setVarArgsFrameIndex(FI);
10505 
10506     // If saving an odd number of registers then create an extra stack slot to
10507     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10508     // offsets to even-numbered registered remain 2*XLEN-aligned.
10509     if (Idx % 2) {
10510       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10511       VarArgsSaveSize += XLenInBytes;
10512     }
10513 
10514     // Copy the integer registers that may have been used for passing varargs
10515     // to the vararg save area.
10516     for (unsigned I = Idx; I < ArgRegs.size();
10517          ++I, VaArgOffset += XLenInBytes) {
10518       const Register Reg = RegInfo.createVirtualRegister(RC);
10519       RegInfo.addLiveIn(ArgRegs[I], Reg);
10520       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10521       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10522       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10523       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10524                                    MachinePointerInfo::getFixedStack(MF, FI));
10525       cast<StoreSDNode>(Store.getNode())
10526           ->getMemOperand()
10527           ->setValue((Value *)nullptr);
10528       OutChains.push_back(Store);
10529     }
10530     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10531   }
10532 
10533   // All stores are grouped in one node to allow the matching between
10534   // the size of Ins and InVals. This only happens for vararg functions.
10535   if (!OutChains.empty()) {
10536     OutChains.push_back(Chain);
10537     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10538   }
10539 
10540   return Chain;
10541 }
10542 
10543 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10544 /// for tail call optimization.
10545 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10546 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10547     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10548     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10549 
10550   auto &Callee = CLI.Callee;
10551   auto CalleeCC = CLI.CallConv;
10552   auto &Outs = CLI.Outs;
10553   auto &Caller = MF.getFunction();
10554   auto CallerCC = Caller.getCallingConv();
10555 
10556   // Exception-handling functions need a special set of instructions to
10557   // indicate a return to the hardware. Tail-calling another function would
10558   // probably break this.
10559   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10560   // should be expanded as new function attributes are introduced.
10561   if (Caller.hasFnAttribute("interrupt"))
10562     return false;
10563 
10564   // Do not tail call opt if the stack is used to pass parameters.
10565   if (CCInfo.getNextStackOffset() != 0)
10566     return false;
10567 
10568   // Do not tail call opt if any parameters need to be passed indirectly.
10569   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10570   // passed indirectly. So the address of the value will be passed in a
10571   // register, or if not available, then the address is put on the stack. In
10572   // order to pass indirectly, space on the stack often needs to be allocated
10573   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10574   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10575   // are passed CCValAssign::Indirect.
10576   for (auto &VA : ArgLocs)
10577     if (VA.getLocInfo() == CCValAssign::Indirect)
10578       return false;
10579 
10580   // Do not tail call opt if either caller or callee uses struct return
10581   // semantics.
10582   auto IsCallerStructRet = Caller.hasStructRetAttr();
10583   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10584   if (IsCallerStructRet || IsCalleeStructRet)
10585     return false;
10586 
10587   // Externally-defined functions with weak linkage should not be
10588   // tail-called. The behaviour of branch instructions in this situation (as
10589   // used for tail calls) is implementation-defined, so we cannot rely on the
10590   // linker replacing the tail call with a return.
10591   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10592     const GlobalValue *GV = G->getGlobal();
10593     if (GV->hasExternalWeakLinkage())
10594       return false;
10595   }
10596 
10597   // The callee has to preserve all registers the caller needs to preserve.
10598   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10599   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10600   if (CalleeCC != CallerCC) {
10601     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10602     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10603       return false;
10604   }
10605 
10606   // Byval parameters hand the function a pointer directly into the stack area
10607   // we want to reuse during a tail call. Working around this *is* possible
10608   // but less efficient and uglier in LowerCall.
10609   for (auto &Arg : Outs)
10610     if (Arg.Flags.isByVal())
10611       return false;
10612 
10613   return true;
10614 }
10615 
10616 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10617   return DAG.getDataLayout().getPrefTypeAlign(
10618       VT.getTypeForEVT(*DAG.getContext()));
10619 }
10620 
10621 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10622 // and output parameter nodes.
10623 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10624                                        SmallVectorImpl<SDValue> &InVals) const {
10625   SelectionDAG &DAG = CLI.DAG;
10626   SDLoc &DL = CLI.DL;
10627   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10628   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10629   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10630   SDValue Chain = CLI.Chain;
10631   SDValue Callee = CLI.Callee;
10632   bool &IsTailCall = CLI.IsTailCall;
10633   CallingConv::ID CallConv = CLI.CallConv;
10634   bool IsVarArg = CLI.IsVarArg;
10635   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10636   MVT XLenVT = Subtarget.getXLenVT();
10637 
10638   MachineFunction &MF = DAG.getMachineFunction();
10639 
10640   // Analyze the operands of the call, assigning locations to each operand.
10641   SmallVector<CCValAssign, 16> ArgLocs;
10642   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10643 
10644   if (CallConv == CallingConv::GHC)
10645     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10646   else
10647     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10648                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10649                                                     : CC_RISCV);
10650 
10651   // Check if it's really possible to do a tail call.
10652   if (IsTailCall)
10653     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10654 
10655   if (IsTailCall)
10656     ++NumTailCalls;
10657   else if (CLI.CB && CLI.CB->isMustTailCall())
10658     report_fatal_error("failed to perform tail call elimination on a call "
10659                        "site marked musttail");
10660 
10661   // Get a count of how many bytes are to be pushed on the stack.
10662   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10663 
10664   // Create local copies for byval args
10665   SmallVector<SDValue, 8> ByValArgs;
10666   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10667     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10668     if (!Flags.isByVal())
10669       continue;
10670 
10671     SDValue Arg = OutVals[i];
10672     unsigned Size = Flags.getByValSize();
10673     Align Alignment = Flags.getNonZeroByValAlign();
10674 
10675     int FI =
10676         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10677     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10678     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10679 
10680     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10681                           /*IsVolatile=*/false,
10682                           /*AlwaysInline=*/false, IsTailCall,
10683                           MachinePointerInfo(), MachinePointerInfo());
10684     ByValArgs.push_back(FIPtr);
10685   }
10686 
10687   if (!IsTailCall)
10688     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10689 
10690   // Copy argument values to their designated locations.
10691   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10692   SmallVector<SDValue, 8> MemOpChains;
10693   SDValue StackPtr;
10694   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10695     CCValAssign &VA = ArgLocs[i];
10696     SDValue ArgValue = OutVals[i];
10697     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10698 
10699     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10700     bool IsF64OnRV32DSoftABI =
10701         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10702     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10703       SDValue SplitF64 = DAG.getNode(
10704           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10705       SDValue Lo = SplitF64.getValue(0);
10706       SDValue Hi = SplitF64.getValue(1);
10707 
10708       Register RegLo = VA.getLocReg();
10709       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10710 
10711       if (RegLo == RISCV::X17) {
10712         // Second half of f64 is passed on the stack.
10713         // Work out the address of the stack slot.
10714         if (!StackPtr.getNode())
10715           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10716         // Emit the store.
10717         MemOpChains.push_back(
10718             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10719       } else {
10720         // Second half of f64 is passed in another GPR.
10721         assert(RegLo < RISCV::X31 && "Invalid register pair");
10722         Register RegHigh = RegLo + 1;
10723         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10724       }
10725       continue;
10726     }
10727 
10728     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10729     // as any other MemLoc.
10730 
10731     // Promote the value if needed.
10732     // For now, only handle fully promoted and indirect arguments.
10733     if (VA.getLocInfo() == CCValAssign::Indirect) {
10734       // Store the argument in a stack slot and pass its address.
10735       Align StackAlign =
10736           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10737                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10738       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10739       // If the original argument was split (e.g. i128), we need
10740       // to store the required parts of it here (and pass just one address).
10741       // Vectors may be partly split to registers and partly to the stack, in
10742       // which case the base address is partly offset and subsequent stores are
10743       // relative to that.
10744       unsigned ArgIndex = Outs[i].OrigArgIndex;
10745       unsigned ArgPartOffset = Outs[i].PartOffset;
10746       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10747       // Calculate the total size to store. We don't have access to what we're
10748       // actually storing other than performing the loop and collecting the
10749       // info.
10750       SmallVector<std::pair<SDValue, SDValue>> Parts;
10751       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10752         SDValue PartValue = OutVals[i + 1];
10753         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10754         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10755         EVT PartVT = PartValue.getValueType();
10756         if (PartVT.isScalableVector())
10757           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10758         StoredSize += PartVT.getStoreSize();
10759         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10760         Parts.push_back(std::make_pair(PartValue, Offset));
10761         ++i;
10762       }
10763       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10764       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10765       MemOpChains.push_back(
10766           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10767                        MachinePointerInfo::getFixedStack(MF, FI)));
10768       for (const auto &Part : Parts) {
10769         SDValue PartValue = Part.first;
10770         SDValue PartOffset = Part.second;
10771         SDValue Address =
10772             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10773         MemOpChains.push_back(
10774             DAG.getStore(Chain, DL, PartValue, Address,
10775                          MachinePointerInfo::getFixedStack(MF, FI)));
10776       }
10777       ArgValue = SpillSlot;
10778     } else {
10779       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10780     }
10781 
10782     // Use local copy if it is a byval arg.
10783     if (Flags.isByVal())
10784       ArgValue = ByValArgs[j++];
10785 
10786     if (VA.isRegLoc()) {
10787       // Queue up the argument copies and emit them at the end.
10788       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10789     } else {
10790       assert(VA.isMemLoc() && "Argument not register or memory");
10791       assert(!IsTailCall && "Tail call not allowed if stack is used "
10792                             "for passing parameters");
10793 
10794       // Work out the address of the stack slot.
10795       if (!StackPtr.getNode())
10796         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10797       SDValue Address =
10798           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10799                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10800 
10801       // Emit the store.
10802       MemOpChains.push_back(
10803           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10804     }
10805   }
10806 
10807   // Join the stores, which are independent of one another.
10808   if (!MemOpChains.empty())
10809     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10810 
10811   SDValue Glue;
10812 
10813   // Build a sequence of copy-to-reg nodes, chained and glued together.
10814   for (auto &Reg : RegsToPass) {
10815     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10816     Glue = Chain.getValue(1);
10817   }
10818 
10819   // Validate that none of the argument registers have been marked as
10820   // reserved, if so report an error. Do the same for the return address if this
10821   // is not a tailcall.
10822   validateCCReservedRegs(RegsToPass, MF);
10823   if (!IsTailCall &&
10824       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10825     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10826         MF.getFunction(),
10827         "Return address register required, but has been reserved."});
10828 
10829   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10830   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10831   // split it and then direct call can be matched by PseudoCALL.
10832   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10833     const GlobalValue *GV = S->getGlobal();
10834 
10835     unsigned OpFlags = RISCVII::MO_CALL;
10836     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10837       OpFlags = RISCVII::MO_PLT;
10838 
10839     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10840   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10841     unsigned OpFlags = RISCVII::MO_CALL;
10842 
10843     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10844                                                  nullptr))
10845       OpFlags = RISCVII::MO_PLT;
10846 
10847     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10848   }
10849 
10850   // The first call operand is the chain and the second is the target address.
10851   SmallVector<SDValue, 8> Ops;
10852   Ops.push_back(Chain);
10853   Ops.push_back(Callee);
10854 
10855   // Add argument registers to the end of the list so that they are
10856   // known live into the call.
10857   for (auto &Reg : RegsToPass)
10858     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10859 
10860   if (!IsTailCall) {
10861     // Add a register mask operand representing the call-preserved registers.
10862     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10863     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10864     assert(Mask && "Missing call preserved mask for calling convention");
10865     Ops.push_back(DAG.getRegisterMask(Mask));
10866   }
10867 
10868   // Glue the call to the argument copies, if any.
10869   if (Glue.getNode())
10870     Ops.push_back(Glue);
10871 
10872   // Emit the call.
10873   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10874 
10875   if (IsTailCall) {
10876     MF.getFrameInfo().setHasTailCall();
10877     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10878   }
10879 
10880   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10881   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10882   Glue = Chain.getValue(1);
10883 
10884   // Mark the end of the call, which is glued to the call itself.
10885   Chain = DAG.getCALLSEQ_END(Chain,
10886                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10887                              DAG.getConstant(0, DL, PtrVT, true),
10888                              Glue, DL);
10889   Glue = Chain.getValue(1);
10890 
10891   // Assign locations to each value returned by this call.
10892   SmallVector<CCValAssign, 16> RVLocs;
10893   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10894   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10895 
10896   // Copy all of the result registers out of their specified physreg.
10897   for (auto &VA : RVLocs) {
10898     // Copy the value out
10899     SDValue RetValue =
10900         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10901     // Glue the RetValue to the end of the call sequence
10902     Chain = RetValue.getValue(1);
10903     Glue = RetValue.getValue(2);
10904 
10905     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10906       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10907       SDValue RetValue2 =
10908           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10909       Chain = RetValue2.getValue(1);
10910       Glue = RetValue2.getValue(2);
10911       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10912                              RetValue2);
10913     }
10914 
10915     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10916 
10917     InVals.push_back(RetValue);
10918   }
10919 
10920   return Chain;
10921 }
10922 
10923 bool RISCVTargetLowering::CanLowerReturn(
10924     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10925     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10926   SmallVector<CCValAssign, 16> RVLocs;
10927   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10928 
10929   Optional<unsigned> FirstMaskArgument;
10930   if (Subtarget.hasVInstructions())
10931     FirstMaskArgument = preAssignMask(Outs);
10932 
10933   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10934     MVT VT = Outs[i].VT;
10935     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10936     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10937     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10938                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10939                  *this, FirstMaskArgument))
10940       return false;
10941   }
10942   return true;
10943 }
10944 
10945 SDValue
10946 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10947                                  bool IsVarArg,
10948                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10949                                  const SmallVectorImpl<SDValue> &OutVals,
10950                                  const SDLoc &DL, SelectionDAG &DAG) const {
10951   const MachineFunction &MF = DAG.getMachineFunction();
10952   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10953 
10954   // Stores the assignment of the return value to a location.
10955   SmallVector<CCValAssign, 16> RVLocs;
10956 
10957   // Info about the registers and stack slot.
10958   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10959                  *DAG.getContext());
10960 
10961   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10962                     nullptr, CC_RISCV);
10963 
10964   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10965     report_fatal_error("GHC functions return void only");
10966 
10967   SDValue Glue;
10968   SmallVector<SDValue, 4> RetOps(1, Chain);
10969 
10970   // Copy the result values into the output registers.
10971   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10972     SDValue Val = OutVals[i];
10973     CCValAssign &VA = RVLocs[i];
10974     assert(VA.isRegLoc() && "Can only return in registers!");
10975 
10976     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10977       // Handle returning f64 on RV32D with a soft float ABI.
10978       assert(VA.isRegLoc() && "Expected return via registers");
10979       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10980                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10981       SDValue Lo = SplitF64.getValue(0);
10982       SDValue Hi = SplitF64.getValue(1);
10983       Register RegLo = VA.getLocReg();
10984       assert(RegLo < RISCV::X31 && "Invalid register pair");
10985       Register RegHi = RegLo + 1;
10986 
10987       if (STI.isRegisterReservedByUser(RegLo) ||
10988           STI.isRegisterReservedByUser(RegHi))
10989         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10990             MF.getFunction(),
10991             "Return value register required, but has been reserved."});
10992 
10993       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10994       Glue = Chain.getValue(1);
10995       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10996       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10997       Glue = Chain.getValue(1);
10998       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10999     } else {
11000       // Handle a 'normal' return.
11001       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
11002       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
11003 
11004       if (STI.isRegisterReservedByUser(VA.getLocReg()))
11005         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11006             MF.getFunction(),
11007             "Return value register required, but has been reserved."});
11008 
11009       // Guarantee that all emitted copies are stuck together.
11010       Glue = Chain.getValue(1);
11011       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
11012     }
11013   }
11014 
11015   RetOps[0] = Chain; // Update chain.
11016 
11017   // Add the glue node if we have it.
11018   if (Glue.getNode()) {
11019     RetOps.push_back(Glue);
11020   }
11021 
11022   unsigned RetOpc = RISCVISD::RET_FLAG;
11023   // Interrupt service routines use different return instructions.
11024   const Function &Func = DAG.getMachineFunction().getFunction();
11025   if (Func.hasFnAttribute("interrupt")) {
11026     if (!Func.getReturnType()->isVoidTy())
11027       report_fatal_error(
11028           "Functions with the interrupt attribute must have void return type!");
11029 
11030     MachineFunction &MF = DAG.getMachineFunction();
11031     StringRef Kind =
11032       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
11033 
11034     if (Kind == "user")
11035       RetOpc = RISCVISD::URET_FLAG;
11036     else if (Kind == "supervisor")
11037       RetOpc = RISCVISD::SRET_FLAG;
11038     else
11039       RetOpc = RISCVISD::MRET_FLAG;
11040   }
11041 
11042   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
11043 }
11044 
11045 void RISCVTargetLowering::validateCCReservedRegs(
11046     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
11047     MachineFunction &MF) const {
11048   const Function &F = MF.getFunction();
11049   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11050 
11051   if (llvm::any_of(Regs, [&STI](auto Reg) {
11052         return STI.isRegisterReservedByUser(Reg.first);
11053       }))
11054     F.getContext().diagnose(DiagnosticInfoUnsupported{
11055         F, "Argument register required, but has been reserved."});
11056 }
11057 
11058 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
11059   return CI->isTailCall();
11060 }
11061 
11062 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
11063 #define NODE_NAME_CASE(NODE)                                                   \
11064   case RISCVISD::NODE:                                                         \
11065     return "RISCVISD::" #NODE;
11066   // clang-format off
11067   switch ((RISCVISD::NodeType)Opcode) {
11068   case RISCVISD::FIRST_NUMBER:
11069     break;
11070   NODE_NAME_CASE(RET_FLAG)
11071   NODE_NAME_CASE(URET_FLAG)
11072   NODE_NAME_CASE(SRET_FLAG)
11073   NODE_NAME_CASE(MRET_FLAG)
11074   NODE_NAME_CASE(CALL)
11075   NODE_NAME_CASE(SELECT_CC)
11076   NODE_NAME_CASE(BR_CC)
11077   NODE_NAME_CASE(BuildPairF64)
11078   NODE_NAME_CASE(SplitF64)
11079   NODE_NAME_CASE(TAIL)
11080   NODE_NAME_CASE(MULHSU)
11081   NODE_NAME_CASE(SLLW)
11082   NODE_NAME_CASE(SRAW)
11083   NODE_NAME_CASE(SRLW)
11084   NODE_NAME_CASE(DIVW)
11085   NODE_NAME_CASE(DIVUW)
11086   NODE_NAME_CASE(REMUW)
11087   NODE_NAME_CASE(ROLW)
11088   NODE_NAME_CASE(RORW)
11089   NODE_NAME_CASE(CLZW)
11090   NODE_NAME_CASE(CTZW)
11091   NODE_NAME_CASE(FSLW)
11092   NODE_NAME_CASE(FSRW)
11093   NODE_NAME_CASE(FSL)
11094   NODE_NAME_CASE(FSR)
11095   NODE_NAME_CASE(FMV_H_X)
11096   NODE_NAME_CASE(FMV_X_ANYEXTH)
11097   NODE_NAME_CASE(FMV_X_SIGNEXTH)
11098   NODE_NAME_CASE(FMV_W_X_RV64)
11099   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
11100   NODE_NAME_CASE(FCVT_X)
11101   NODE_NAME_CASE(FCVT_XU)
11102   NODE_NAME_CASE(FCVT_W_RV64)
11103   NODE_NAME_CASE(FCVT_WU_RV64)
11104   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
11105   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
11106   NODE_NAME_CASE(READ_CYCLE_WIDE)
11107   NODE_NAME_CASE(GREV)
11108   NODE_NAME_CASE(GREVW)
11109   NODE_NAME_CASE(GORC)
11110   NODE_NAME_CASE(GORCW)
11111   NODE_NAME_CASE(SHFL)
11112   NODE_NAME_CASE(SHFLW)
11113   NODE_NAME_CASE(UNSHFL)
11114   NODE_NAME_CASE(UNSHFLW)
11115   NODE_NAME_CASE(BFP)
11116   NODE_NAME_CASE(BFPW)
11117   NODE_NAME_CASE(BCOMPRESS)
11118   NODE_NAME_CASE(BCOMPRESSW)
11119   NODE_NAME_CASE(BDECOMPRESS)
11120   NODE_NAME_CASE(BDECOMPRESSW)
11121   NODE_NAME_CASE(VMV_V_X_VL)
11122   NODE_NAME_CASE(VFMV_V_F_VL)
11123   NODE_NAME_CASE(VMV_X_S)
11124   NODE_NAME_CASE(VMV_S_X_VL)
11125   NODE_NAME_CASE(VFMV_S_F_VL)
11126   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
11127   NODE_NAME_CASE(READ_VLENB)
11128   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
11129   NODE_NAME_CASE(VSLIDEUP_VL)
11130   NODE_NAME_CASE(VSLIDE1UP_VL)
11131   NODE_NAME_CASE(VSLIDEDOWN_VL)
11132   NODE_NAME_CASE(VSLIDE1DOWN_VL)
11133   NODE_NAME_CASE(VID_VL)
11134   NODE_NAME_CASE(VFNCVT_ROD_VL)
11135   NODE_NAME_CASE(VECREDUCE_ADD_VL)
11136   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
11137   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
11138   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
11139   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
11140   NODE_NAME_CASE(VECREDUCE_AND_VL)
11141   NODE_NAME_CASE(VECREDUCE_OR_VL)
11142   NODE_NAME_CASE(VECREDUCE_XOR_VL)
11143   NODE_NAME_CASE(VECREDUCE_FADD_VL)
11144   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
11145   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
11146   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
11147   NODE_NAME_CASE(ADD_VL)
11148   NODE_NAME_CASE(AND_VL)
11149   NODE_NAME_CASE(MUL_VL)
11150   NODE_NAME_CASE(OR_VL)
11151   NODE_NAME_CASE(SDIV_VL)
11152   NODE_NAME_CASE(SHL_VL)
11153   NODE_NAME_CASE(SREM_VL)
11154   NODE_NAME_CASE(SRA_VL)
11155   NODE_NAME_CASE(SRL_VL)
11156   NODE_NAME_CASE(SUB_VL)
11157   NODE_NAME_CASE(UDIV_VL)
11158   NODE_NAME_CASE(UREM_VL)
11159   NODE_NAME_CASE(XOR_VL)
11160   NODE_NAME_CASE(SADDSAT_VL)
11161   NODE_NAME_CASE(UADDSAT_VL)
11162   NODE_NAME_CASE(SSUBSAT_VL)
11163   NODE_NAME_CASE(USUBSAT_VL)
11164   NODE_NAME_CASE(FADD_VL)
11165   NODE_NAME_CASE(FSUB_VL)
11166   NODE_NAME_CASE(FMUL_VL)
11167   NODE_NAME_CASE(FDIV_VL)
11168   NODE_NAME_CASE(FNEG_VL)
11169   NODE_NAME_CASE(FABS_VL)
11170   NODE_NAME_CASE(FSQRT_VL)
11171   NODE_NAME_CASE(FMA_VL)
11172   NODE_NAME_CASE(FCOPYSIGN_VL)
11173   NODE_NAME_CASE(SMIN_VL)
11174   NODE_NAME_CASE(SMAX_VL)
11175   NODE_NAME_CASE(UMIN_VL)
11176   NODE_NAME_CASE(UMAX_VL)
11177   NODE_NAME_CASE(FMINNUM_VL)
11178   NODE_NAME_CASE(FMAXNUM_VL)
11179   NODE_NAME_CASE(MULHS_VL)
11180   NODE_NAME_CASE(MULHU_VL)
11181   NODE_NAME_CASE(FP_TO_SINT_VL)
11182   NODE_NAME_CASE(FP_TO_UINT_VL)
11183   NODE_NAME_CASE(SINT_TO_FP_VL)
11184   NODE_NAME_CASE(UINT_TO_FP_VL)
11185   NODE_NAME_CASE(FP_EXTEND_VL)
11186   NODE_NAME_CASE(FP_ROUND_VL)
11187   NODE_NAME_CASE(VWMUL_VL)
11188   NODE_NAME_CASE(VWMULU_VL)
11189   NODE_NAME_CASE(VWMULSU_VL)
11190   NODE_NAME_CASE(VWADD_VL)
11191   NODE_NAME_CASE(VWADDU_VL)
11192   NODE_NAME_CASE(VWSUB_VL)
11193   NODE_NAME_CASE(VWSUBU_VL)
11194   NODE_NAME_CASE(VWADD_W_VL)
11195   NODE_NAME_CASE(VWADDU_W_VL)
11196   NODE_NAME_CASE(VWSUB_W_VL)
11197   NODE_NAME_CASE(VWSUBU_W_VL)
11198   NODE_NAME_CASE(SETCC_VL)
11199   NODE_NAME_CASE(VSELECT_VL)
11200   NODE_NAME_CASE(VP_MERGE_VL)
11201   NODE_NAME_CASE(VMAND_VL)
11202   NODE_NAME_CASE(VMOR_VL)
11203   NODE_NAME_CASE(VMXOR_VL)
11204   NODE_NAME_CASE(VMCLR_VL)
11205   NODE_NAME_CASE(VMSET_VL)
11206   NODE_NAME_CASE(VRGATHER_VX_VL)
11207   NODE_NAME_CASE(VRGATHER_VV_VL)
11208   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11209   NODE_NAME_CASE(VSEXT_VL)
11210   NODE_NAME_CASE(VZEXT_VL)
11211   NODE_NAME_CASE(VCPOP_VL)
11212   NODE_NAME_CASE(READ_CSR)
11213   NODE_NAME_CASE(WRITE_CSR)
11214   NODE_NAME_CASE(SWAP_CSR)
11215   }
11216   // clang-format on
11217   return nullptr;
11218 #undef NODE_NAME_CASE
11219 }
11220 
11221 /// getConstraintType - Given a constraint letter, return the type of
11222 /// constraint it is for this target.
11223 RISCVTargetLowering::ConstraintType
11224 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11225   if (Constraint.size() == 1) {
11226     switch (Constraint[0]) {
11227     default:
11228       break;
11229     case 'f':
11230       return C_RegisterClass;
11231     case 'I':
11232     case 'J':
11233     case 'K':
11234       return C_Immediate;
11235     case 'A':
11236       return C_Memory;
11237     case 'S': // A symbolic address
11238       return C_Other;
11239     }
11240   } else {
11241     if (Constraint == "vr" || Constraint == "vm")
11242       return C_RegisterClass;
11243   }
11244   return TargetLowering::getConstraintType(Constraint);
11245 }
11246 
11247 std::pair<unsigned, const TargetRegisterClass *>
11248 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11249                                                   StringRef Constraint,
11250                                                   MVT VT) const {
11251   // First, see if this is a constraint that directly corresponds to a
11252   // RISCV register class.
11253   if (Constraint.size() == 1) {
11254     switch (Constraint[0]) {
11255     case 'r':
11256       // TODO: Support fixed vectors up to XLen for P extension?
11257       if (VT.isVector())
11258         break;
11259       return std::make_pair(0U, &RISCV::GPRRegClass);
11260     case 'f':
11261       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11262         return std::make_pair(0U, &RISCV::FPR16RegClass);
11263       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11264         return std::make_pair(0U, &RISCV::FPR32RegClass);
11265       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11266         return std::make_pair(0U, &RISCV::FPR64RegClass);
11267       break;
11268     default:
11269       break;
11270     }
11271   } else if (Constraint == "vr") {
11272     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11273                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11274       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11275         return std::make_pair(0U, RC);
11276     }
11277   } else if (Constraint == "vm") {
11278     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11279       return std::make_pair(0U, &RISCV::VMV0RegClass);
11280   }
11281 
11282   // Clang will correctly decode the usage of register name aliases into their
11283   // official names. However, other frontends like `rustc` do not. This allows
11284   // users of these frontends to use the ABI names for registers in LLVM-style
11285   // register constraints.
11286   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11287                                .Case("{zero}", RISCV::X0)
11288                                .Case("{ra}", RISCV::X1)
11289                                .Case("{sp}", RISCV::X2)
11290                                .Case("{gp}", RISCV::X3)
11291                                .Case("{tp}", RISCV::X4)
11292                                .Case("{t0}", RISCV::X5)
11293                                .Case("{t1}", RISCV::X6)
11294                                .Case("{t2}", RISCV::X7)
11295                                .Cases("{s0}", "{fp}", RISCV::X8)
11296                                .Case("{s1}", RISCV::X9)
11297                                .Case("{a0}", RISCV::X10)
11298                                .Case("{a1}", RISCV::X11)
11299                                .Case("{a2}", RISCV::X12)
11300                                .Case("{a3}", RISCV::X13)
11301                                .Case("{a4}", RISCV::X14)
11302                                .Case("{a5}", RISCV::X15)
11303                                .Case("{a6}", RISCV::X16)
11304                                .Case("{a7}", RISCV::X17)
11305                                .Case("{s2}", RISCV::X18)
11306                                .Case("{s3}", RISCV::X19)
11307                                .Case("{s4}", RISCV::X20)
11308                                .Case("{s5}", RISCV::X21)
11309                                .Case("{s6}", RISCV::X22)
11310                                .Case("{s7}", RISCV::X23)
11311                                .Case("{s8}", RISCV::X24)
11312                                .Case("{s9}", RISCV::X25)
11313                                .Case("{s10}", RISCV::X26)
11314                                .Case("{s11}", RISCV::X27)
11315                                .Case("{t3}", RISCV::X28)
11316                                .Case("{t4}", RISCV::X29)
11317                                .Case("{t5}", RISCV::X30)
11318                                .Case("{t6}", RISCV::X31)
11319                                .Default(RISCV::NoRegister);
11320   if (XRegFromAlias != RISCV::NoRegister)
11321     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11322 
11323   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11324   // TableGen record rather than the AsmName to choose registers for InlineAsm
11325   // constraints, plus we want to match those names to the widest floating point
11326   // register type available, manually select floating point registers here.
11327   //
11328   // The second case is the ABI name of the register, so that frontends can also
11329   // use the ABI names in register constraint lists.
11330   if (Subtarget.hasStdExtF()) {
11331     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11332                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11333                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11334                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11335                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11336                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11337                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11338                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11339                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11340                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11341                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11342                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11343                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11344                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11345                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11346                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11347                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11348                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11349                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11350                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11351                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11352                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11353                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11354                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11355                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11356                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11357                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11358                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11359                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11360                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11361                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11362                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11363                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11364                         .Default(RISCV::NoRegister);
11365     if (FReg != RISCV::NoRegister) {
11366       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11367       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11368         unsigned RegNo = FReg - RISCV::F0_F;
11369         unsigned DReg = RISCV::F0_D + RegNo;
11370         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11371       }
11372       if (VT == MVT::f32 || VT == MVT::Other)
11373         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11374       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11375         unsigned RegNo = FReg - RISCV::F0_F;
11376         unsigned HReg = RISCV::F0_H + RegNo;
11377         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11378       }
11379     }
11380   }
11381 
11382   if (Subtarget.hasVInstructions()) {
11383     Register VReg = StringSwitch<Register>(Constraint.lower())
11384                         .Case("{v0}", RISCV::V0)
11385                         .Case("{v1}", RISCV::V1)
11386                         .Case("{v2}", RISCV::V2)
11387                         .Case("{v3}", RISCV::V3)
11388                         .Case("{v4}", RISCV::V4)
11389                         .Case("{v5}", RISCV::V5)
11390                         .Case("{v6}", RISCV::V6)
11391                         .Case("{v7}", RISCV::V7)
11392                         .Case("{v8}", RISCV::V8)
11393                         .Case("{v9}", RISCV::V9)
11394                         .Case("{v10}", RISCV::V10)
11395                         .Case("{v11}", RISCV::V11)
11396                         .Case("{v12}", RISCV::V12)
11397                         .Case("{v13}", RISCV::V13)
11398                         .Case("{v14}", RISCV::V14)
11399                         .Case("{v15}", RISCV::V15)
11400                         .Case("{v16}", RISCV::V16)
11401                         .Case("{v17}", RISCV::V17)
11402                         .Case("{v18}", RISCV::V18)
11403                         .Case("{v19}", RISCV::V19)
11404                         .Case("{v20}", RISCV::V20)
11405                         .Case("{v21}", RISCV::V21)
11406                         .Case("{v22}", RISCV::V22)
11407                         .Case("{v23}", RISCV::V23)
11408                         .Case("{v24}", RISCV::V24)
11409                         .Case("{v25}", RISCV::V25)
11410                         .Case("{v26}", RISCV::V26)
11411                         .Case("{v27}", RISCV::V27)
11412                         .Case("{v28}", RISCV::V28)
11413                         .Case("{v29}", RISCV::V29)
11414                         .Case("{v30}", RISCV::V30)
11415                         .Case("{v31}", RISCV::V31)
11416                         .Default(RISCV::NoRegister);
11417     if (VReg != RISCV::NoRegister) {
11418       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11419         return std::make_pair(VReg, &RISCV::VMRegClass);
11420       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11421         return std::make_pair(VReg, &RISCV::VRRegClass);
11422       for (const auto *RC :
11423            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11424         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11425           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11426           return std::make_pair(VReg, RC);
11427         }
11428       }
11429     }
11430   }
11431 
11432   std::pair<Register, const TargetRegisterClass *> Res =
11433       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11434 
11435   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11436   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11437   // Subtarget into account.
11438   if (Res.second == &RISCV::GPRF16RegClass ||
11439       Res.second == &RISCV::GPRF32RegClass ||
11440       Res.second == &RISCV::GPRF64RegClass)
11441     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11442 
11443   return Res;
11444 }
11445 
11446 unsigned
11447 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11448   // Currently only support length 1 constraints.
11449   if (ConstraintCode.size() == 1) {
11450     switch (ConstraintCode[0]) {
11451     case 'A':
11452       return InlineAsm::Constraint_A;
11453     default:
11454       break;
11455     }
11456   }
11457 
11458   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11459 }
11460 
11461 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11462     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11463     SelectionDAG &DAG) const {
11464   // Currently only support length 1 constraints.
11465   if (Constraint.length() == 1) {
11466     switch (Constraint[0]) {
11467     case 'I':
11468       // Validate & create a 12-bit signed immediate operand.
11469       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11470         uint64_t CVal = C->getSExtValue();
11471         if (isInt<12>(CVal))
11472           Ops.push_back(
11473               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11474       }
11475       return;
11476     case 'J':
11477       // Validate & create an integer zero operand.
11478       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11479         if (C->getZExtValue() == 0)
11480           Ops.push_back(
11481               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11482       return;
11483     case 'K':
11484       // Validate & create a 5-bit unsigned immediate operand.
11485       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11486         uint64_t CVal = C->getZExtValue();
11487         if (isUInt<5>(CVal))
11488           Ops.push_back(
11489               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11490       }
11491       return;
11492     case 'S':
11493       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11494         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11495                                                  GA->getValueType(0)));
11496       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11497         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11498                                                 BA->getValueType(0)));
11499       }
11500       return;
11501     default:
11502       break;
11503     }
11504   }
11505   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11506 }
11507 
11508 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11509                                                    Instruction *Inst,
11510                                                    AtomicOrdering Ord) const {
11511   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11512     return Builder.CreateFence(Ord);
11513   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11514     return Builder.CreateFence(AtomicOrdering::Release);
11515   return nullptr;
11516 }
11517 
11518 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11519                                                     Instruction *Inst,
11520                                                     AtomicOrdering Ord) const {
11521   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11522     return Builder.CreateFence(AtomicOrdering::Acquire);
11523   return nullptr;
11524 }
11525 
11526 TargetLowering::AtomicExpansionKind
11527 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11528   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11529   // point operations can't be used in an lr/sc sequence without breaking the
11530   // forward-progress guarantee.
11531   if (AI->isFloatingPointOperation())
11532     return AtomicExpansionKind::CmpXChg;
11533 
11534   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11535   if (Size == 8 || Size == 16)
11536     return AtomicExpansionKind::MaskedIntrinsic;
11537   return AtomicExpansionKind::None;
11538 }
11539 
11540 static Intrinsic::ID
11541 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11542   if (XLen == 32) {
11543     switch (BinOp) {
11544     default:
11545       llvm_unreachable("Unexpected AtomicRMW BinOp");
11546     case AtomicRMWInst::Xchg:
11547       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11548     case AtomicRMWInst::Add:
11549       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11550     case AtomicRMWInst::Sub:
11551       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11552     case AtomicRMWInst::Nand:
11553       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11554     case AtomicRMWInst::Max:
11555       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11556     case AtomicRMWInst::Min:
11557       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11558     case AtomicRMWInst::UMax:
11559       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11560     case AtomicRMWInst::UMin:
11561       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11562     }
11563   }
11564 
11565   if (XLen == 64) {
11566     switch (BinOp) {
11567     default:
11568       llvm_unreachable("Unexpected AtomicRMW BinOp");
11569     case AtomicRMWInst::Xchg:
11570       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11571     case AtomicRMWInst::Add:
11572       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11573     case AtomicRMWInst::Sub:
11574       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11575     case AtomicRMWInst::Nand:
11576       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11577     case AtomicRMWInst::Max:
11578       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11579     case AtomicRMWInst::Min:
11580       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11581     case AtomicRMWInst::UMax:
11582       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11583     case AtomicRMWInst::UMin:
11584       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11585     }
11586   }
11587 
11588   llvm_unreachable("Unexpected XLen\n");
11589 }
11590 
11591 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11592     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11593     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11594   unsigned XLen = Subtarget.getXLen();
11595   Value *Ordering =
11596       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11597   Type *Tys[] = {AlignedAddr->getType()};
11598   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11599       AI->getModule(),
11600       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11601 
11602   if (XLen == 64) {
11603     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11604     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11605     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11606   }
11607 
11608   Value *Result;
11609 
11610   // Must pass the shift amount needed to sign extend the loaded value prior
11611   // to performing a signed comparison for min/max. ShiftAmt is the number of
11612   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11613   // is the number of bits to left+right shift the value in order to
11614   // sign-extend.
11615   if (AI->getOperation() == AtomicRMWInst::Min ||
11616       AI->getOperation() == AtomicRMWInst::Max) {
11617     const DataLayout &DL = AI->getModule()->getDataLayout();
11618     unsigned ValWidth =
11619         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11620     Value *SextShamt =
11621         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11622     Result = Builder.CreateCall(LrwOpScwLoop,
11623                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11624   } else {
11625     Result =
11626         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11627   }
11628 
11629   if (XLen == 64)
11630     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11631   return Result;
11632 }
11633 
11634 TargetLowering::AtomicExpansionKind
11635 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11636     AtomicCmpXchgInst *CI) const {
11637   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11638   if (Size == 8 || Size == 16)
11639     return AtomicExpansionKind::MaskedIntrinsic;
11640   return AtomicExpansionKind::None;
11641 }
11642 
11643 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11644     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11645     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11646   unsigned XLen = Subtarget.getXLen();
11647   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11648   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11649   if (XLen == 64) {
11650     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11651     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11652     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11653     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11654   }
11655   Type *Tys[] = {AlignedAddr->getType()};
11656   Function *MaskedCmpXchg =
11657       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11658   Value *Result = Builder.CreateCall(
11659       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11660   if (XLen == 64)
11661     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11662   return Result;
11663 }
11664 
11665 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11666   return false;
11667 }
11668 
11669 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11670                                                EVT VT) const {
11671   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11672     return false;
11673 
11674   switch (FPVT.getSimpleVT().SimpleTy) {
11675   case MVT::f16:
11676     return Subtarget.hasStdExtZfh();
11677   case MVT::f32:
11678     return Subtarget.hasStdExtF();
11679   case MVT::f64:
11680     return Subtarget.hasStdExtD();
11681   default:
11682     return false;
11683   }
11684 }
11685 
11686 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11687   // If we are using the small code model, we can reduce size of jump table
11688   // entry to 4 bytes.
11689   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11690       getTargetMachine().getCodeModel() == CodeModel::Small) {
11691     return MachineJumpTableInfo::EK_Custom32;
11692   }
11693   return TargetLowering::getJumpTableEncoding();
11694 }
11695 
11696 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11697     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11698     unsigned uid, MCContext &Ctx) const {
11699   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11700          getTargetMachine().getCodeModel() == CodeModel::Small);
11701   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11702 }
11703 
11704 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11705                                                      EVT VT) const {
11706   VT = VT.getScalarType();
11707 
11708   if (!VT.isSimple())
11709     return false;
11710 
11711   switch (VT.getSimpleVT().SimpleTy) {
11712   case MVT::f16:
11713     return Subtarget.hasStdExtZfh();
11714   case MVT::f32:
11715     return Subtarget.hasStdExtF();
11716   case MVT::f64:
11717     return Subtarget.hasStdExtD();
11718   default:
11719     break;
11720   }
11721 
11722   return false;
11723 }
11724 
11725 Register RISCVTargetLowering::getExceptionPointerRegister(
11726     const Constant *PersonalityFn) const {
11727   return RISCV::X10;
11728 }
11729 
11730 Register RISCVTargetLowering::getExceptionSelectorRegister(
11731     const Constant *PersonalityFn) const {
11732   return RISCV::X11;
11733 }
11734 
11735 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11736   // Return false to suppress the unnecessary extensions if the LibCall
11737   // arguments or return value is f32 type for LP64 ABI.
11738   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11739   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11740     return false;
11741 
11742   return true;
11743 }
11744 
11745 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11746   if (Subtarget.is64Bit() && Type == MVT::i32)
11747     return true;
11748 
11749   return IsSigned;
11750 }
11751 
11752 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11753                                                  SDValue C) const {
11754   // Check integral scalar types.
11755   if (VT.isScalarInteger()) {
11756     // Omit the optimization if the sub target has the M extension and the data
11757     // size exceeds XLen.
11758     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11759       return false;
11760     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11761       // Break the MUL to a SLLI and an ADD/SUB.
11762       const APInt &Imm = ConstNode->getAPIntValue();
11763       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11764           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11765         return true;
11766       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11767       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11768           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11769            (Imm - 8).isPowerOf2()))
11770         return true;
11771       // Omit the following optimization if the sub target has the M extension
11772       // and the data size >= XLen.
11773       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11774         return false;
11775       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11776       // a pair of LUI/ADDI.
11777       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11778         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11779         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11780             (1 - ImmS).isPowerOf2())
11781         return true;
11782       }
11783     }
11784   }
11785 
11786   return false;
11787 }
11788 
11789 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11790                                                       SDValue ConstNode) const {
11791   // Let the DAGCombiner decide for vectors.
11792   EVT VT = AddNode.getValueType();
11793   if (VT.isVector())
11794     return true;
11795 
11796   // Let the DAGCombiner decide for larger types.
11797   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11798     return true;
11799 
11800   // It is worse if c1 is simm12 while c1*c2 is not.
11801   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11802   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11803   const APInt &C1 = C1Node->getAPIntValue();
11804   const APInt &C2 = C2Node->getAPIntValue();
11805   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11806     return false;
11807 
11808   // Default to true and let the DAGCombiner decide.
11809   return true;
11810 }
11811 
11812 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11813     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11814     bool *Fast) const {
11815   if (!VT.isVector())
11816     return false;
11817 
11818   EVT ElemVT = VT.getVectorElementType();
11819   if (Alignment >= ElemVT.getStoreSize()) {
11820     if (Fast)
11821       *Fast = true;
11822     return true;
11823   }
11824 
11825   return false;
11826 }
11827 
11828 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11829     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11830     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11831   bool IsABIRegCopy = CC.hasValue();
11832   EVT ValueVT = Val.getValueType();
11833   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11834     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11835     // and cast to f32.
11836     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11837     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11838     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11839                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11840     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11841     Parts[0] = Val;
11842     return true;
11843   }
11844 
11845   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11846     LLVMContext &Context = *DAG.getContext();
11847     EVT ValueEltVT = ValueVT.getVectorElementType();
11848     EVT PartEltVT = PartVT.getVectorElementType();
11849     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11850     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11851     if (PartVTBitSize % ValueVTBitSize == 0) {
11852       assert(PartVTBitSize >= ValueVTBitSize);
11853       // If the element types are different, bitcast to the same element type of
11854       // PartVT first.
11855       // Give an example here, we want copy a <vscale x 1 x i8> value to
11856       // <vscale x 4 x i16>.
11857       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11858       // subvector, then we can bitcast to <vscale x 4 x i16>.
11859       if (ValueEltVT != PartEltVT) {
11860         if (PartVTBitSize > ValueVTBitSize) {
11861           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11862           assert(Count != 0 && "The number of element should not be zero.");
11863           EVT SameEltTypeVT =
11864               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11865           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11866                             DAG.getUNDEF(SameEltTypeVT), Val,
11867                             DAG.getVectorIdxConstant(0, DL));
11868         }
11869         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11870       } else {
11871         Val =
11872             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11873                         Val, DAG.getVectorIdxConstant(0, DL));
11874       }
11875       Parts[0] = Val;
11876       return true;
11877     }
11878   }
11879   return false;
11880 }
11881 
11882 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11883     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11884     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11885   bool IsABIRegCopy = CC.hasValue();
11886   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11887     SDValue Val = Parts[0];
11888 
11889     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11890     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11891     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11892     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11893     return Val;
11894   }
11895 
11896   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11897     LLVMContext &Context = *DAG.getContext();
11898     SDValue Val = Parts[0];
11899     EVT ValueEltVT = ValueVT.getVectorElementType();
11900     EVT PartEltVT = PartVT.getVectorElementType();
11901     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11902     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11903     if (PartVTBitSize % ValueVTBitSize == 0) {
11904       assert(PartVTBitSize >= ValueVTBitSize);
11905       EVT SameEltTypeVT = ValueVT;
11906       // If the element types are different, convert it to the same element type
11907       // of PartVT.
11908       // Give an example here, we want copy a <vscale x 1 x i8> value from
11909       // <vscale x 4 x i16>.
11910       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11911       // then we can extract <vscale x 1 x i8>.
11912       if (ValueEltVT != PartEltVT) {
11913         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11914         assert(Count != 0 && "The number of element should not be zero.");
11915         SameEltTypeVT =
11916             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11917         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11918       }
11919       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11920                         DAG.getVectorIdxConstant(0, DL));
11921       return Val;
11922     }
11923   }
11924   return SDValue();
11925 }
11926 
11927 SDValue
11928 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11929                                    SelectionDAG &DAG,
11930                                    SmallVectorImpl<SDNode *> &Created) const {
11931   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11932   if (isIntDivCheap(N->getValueType(0), Attr))
11933     return SDValue(N, 0); // Lower SDIV as SDIV
11934 
11935   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11936          "Unexpected divisor!");
11937 
11938   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11939   if (!Subtarget.hasStdExtZbt())
11940     return SDValue();
11941 
11942   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11943   // Besides, more critical path instructions will be generated when dividing
11944   // by 2. So we keep using the original DAGs for these cases.
11945   unsigned Lg2 = Divisor.countTrailingZeros();
11946   if (Lg2 == 1 || Lg2 >= 12)
11947     return SDValue();
11948 
11949   // fold (sdiv X, pow2)
11950   EVT VT = N->getValueType(0);
11951   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11952     return SDValue();
11953 
11954   SDLoc DL(N);
11955   SDValue N0 = N->getOperand(0);
11956   SDValue Zero = DAG.getConstant(0, DL, VT);
11957   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11958 
11959   // Add (N0 < 0) ? Pow2 - 1 : 0;
11960   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11961   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11962   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11963 
11964   Created.push_back(Cmp.getNode());
11965   Created.push_back(Add.getNode());
11966   Created.push_back(Sel.getNode());
11967 
11968   // Divide by pow2.
11969   SDValue SRA =
11970       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11971 
11972   // If we're dividing by a positive value, we're done.  Otherwise, we must
11973   // negate the result.
11974   if (Divisor.isNonNegative())
11975     return SRA;
11976 
11977   Created.push_back(SRA.getNode());
11978   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11979 }
11980 
11981 #define GET_REGISTER_MATCHER
11982 #include "RISCVGenAsmMatcher.inc"
11983 
11984 Register
11985 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11986                                        const MachineFunction &MF) const {
11987   Register Reg = MatchRegisterAltName(RegName);
11988   if (Reg == RISCV::NoRegister)
11989     Reg = MatchRegisterName(RegName);
11990   if (Reg == RISCV::NoRegister)
11991     report_fatal_error(
11992         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11993   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11994   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11995     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11996                              StringRef(RegName) + "\"."));
11997   return Reg;
11998 }
11999 
12000 namespace llvm {
12001 namespace RISCVVIntrinsicsTable {
12002 
12003 #define GET_RISCVVIntrinsicsTable_IMPL
12004 #include "RISCVGenSearchableTables.inc"
12005 
12006 } // namespace RISCVVIntrinsicsTable
12007 
12008 } // namespace llvm
12009