1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
174                    MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
188 
189   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
190   if (!Subtarget.hasStdExtZbb())
191     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
192 
193   if (Subtarget.is64Bit()) {
194     setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
195                        MVT::i32, Custom);
196 
197     setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
198                        MVT::i32, Custom);
199   } else {
200     setLibcallName(
201         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
202         nullptr);
203     setLibcallName(RTLIB::MULO_I64, nullptr);
204   }
205 
206   if (!Subtarget.hasStdExtM()) {
207     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV,
208                         ISD::SREM, ISD::UREM},
209                        XLenVT, Expand);
210   } else {
211     if (Subtarget.is64Bit()) {
212       setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
213 
214       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
215                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
216     } else {
217       setOperationAction(ISD::MUL, MVT::i64, Custom);
218     }
219   }
220 
221   setOperationAction(
222       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
223       Expand);
224 
225   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
226                      Custom);
227 
228   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
229       Subtarget.hasStdExtZbkb()) {
230     if (Subtarget.is64Bit())
231       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
232   } else {
233     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
234   }
235 
236   if (Subtarget.hasStdExtZbp()) {
237     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
238     // more combining.
239     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
240 
241     // BSWAP i8 doesn't exist.
242     setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
243 
244     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
245 
246     if (Subtarget.is64Bit())
247       setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
248   } else {
249     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
250     // pattern match it directly in isel.
251     setOperationAction(ISD::BSWAP, XLenVT,
252                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
253                            ? Legal
254                            : Expand);
255     // Zbkb can use rev8+brev8 to implement bitreverse.
256     setOperationAction(ISD::BITREVERSE, XLenVT,
257                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
258   }
259 
260   if (Subtarget.hasStdExtZbb()) {
261     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
262                        Legal);
263 
264     if (Subtarget.is64Bit())
265       setOperationAction(
266           {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
267           MVT::i32, Custom);
268   } else {
269     setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
270 
271     if (Subtarget.is64Bit())
272       setOperationAction(ISD::ABS, MVT::i32, Custom);
273   }
274 
275   if (Subtarget.hasStdExtZbt()) {
276     setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
277     setOperationAction(ISD::SELECT, XLenVT, Legal);
278 
279     if (Subtarget.is64Bit())
280       setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
281   } else {
282     setOperationAction(ISD::SELECT, XLenVT, Custom);
283   }
284 
285   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
286       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
287       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
288       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
289       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
290       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
291       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
292 
293   static const ISD::CondCode FPCCToExpand[] = {
294       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
295       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
296       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
297 
298   static const ISD::NodeType FPOpToExpand[] = {
299       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
300       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
301 
302   if (Subtarget.hasStdExtZfh())
303     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
304 
305   if (Subtarget.hasStdExtZfh()) {
306     for (auto NT : FPLegalNodeTypes)
307       setOperationAction(NT, MVT::f16, Legal);
308     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
309     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
310     for (auto CC : FPCCToExpand)
311       setCondCodeAction(CC, MVT::f16, Expand);
312     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
313     setOperationAction(ISD::SELECT, MVT::f16, Custom);
314     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
315 
316     setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT,
317                         ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
318                         ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN,
319                         ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG,
320                         ISD::FLOG2, ISD::FLOG10},
321                        MVT::f16, Promote);
322 
323     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
324     // complete support for all operations in LegalizeDAG.
325 
326     // We need to custom promote this.
327     if (Subtarget.is64Bit())
328       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
329   }
330 
331   if (Subtarget.hasStdExtF()) {
332     for (auto NT : FPLegalNodeTypes)
333       setOperationAction(NT, MVT::f32, Legal);
334     for (auto CC : FPCCToExpand)
335       setCondCodeAction(CC, MVT::f32, Expand);
336     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
337     setOperationAction(ISD::SELECT, MVT::f32, Custom);
338     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
339     for (auto Op : FPOpToExpand)
340       setOperationAction(Op, MVT::f32, Expand);
341     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
342     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
343   }
344 
345   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
346     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
347 
348   if (Subtarget.hasStdExtD()) {
349     for (auto NT : FPLegalNodeTypes)
350       setOperationAction(NT, MVT::f64, Legal);
351     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
352     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
353     for (auto CC : FPCCToExpand)
354       setCondCodeAction(CC, MVT::f64, Expand);
355     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
356     setOperationAction(ISD::SELECT, MVT::f64, Custom);
357     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
358     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
359     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
360     for (auto Op : FPOpToExpand)
361       setOperationAction(Op, MVT::f64, Expand);
362     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
363     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
364   }
365 
366   if (Subtarget.is64Bit())
367     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
368                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
369                        MVT::i32, Custom);
370 
371   if (Subtarget.hasStdExtF()) {
372     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
373                        Custom);
374 
375     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
376                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
377                        XLenVT, Legal);
378 
379     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
380     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
381   }
382 
383   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
384                       ISD::JumpTable},
385                      XLenVT, Custom);
386 
387   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
388 
389   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
390   // Unfortunately this can't be determined just from the ISA naming string.
391   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
392                      Subtarget.is64Bit() ? Legal : Custom);
393 
394   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
395   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
396   if (Subtarget.is64Bit())
397     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
398 
399   if (Subtarget.hasStdExtA()) {
400     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
401     setMinCmpXchgSizeInBits(32);
402   } else {
403     setMaxAtomicSizeInBitsSupported(0);
404   }
405 
406   setBooleanContents(ZeroOrOneBooleanContent);
407 
408   if (Subtarget.hasVInstructions()) {
409     setBooleanVectorContents(ZeroOrOneBooleanContent);
410 
411     setOperationAction(ISD::VSCALE, XLenVT, Custom);
412 
413     // RVV intrinsics may have illegal operands.
414     // We also need to custom legalize vmv.x.s.
415     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
416                        {MVT::i8, MVT::i16}, Custom);
417     if (Subtarget.is64Bit())
418       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
419     else
420       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
421                          MVT::i64, Custom);
422 
423     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
424                        MVT::Other, Custom);
425 
426     static const unsigned IntegerVPOps[] = {
427         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
428         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
429         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
430         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
431         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
432         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
433         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
434         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
435         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SIGN_EXTEND,
436         ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE};
437 
438     static const unsigned FloatingPointVPOps[] = {
439         ISD::VP_FADD,        ISD::VP_FSUB,
440         ISD::VP_FMUL,        ISD::VP_FDIV,
441         ISD::VP_FNEG,        ISD::VP_FMA,
442         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
443         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
444         ISD::VP_MERGE,       ISD::VP_SELECT,
445         ISD::VP_SITOFP,      ISD::VP_UITOFP,
446         ISD::VP_SETCC,       ISD::VP_FP_ROUND,
447         ISD::VP_FP_EXTEND};
448 
449     if (!Subtarget.is64Bit()) {
450       // We must custom-lower certain vXi64 operations on RV32 due to the vector
451       // element type being illegal.
452       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
453                          MVT::i64, Custom);
454 
455       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
456                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
457                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
458                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
459                          MVT::i64, Custom);
460 
461       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
462                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
463                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
464                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
465                          MVT::i64, Custom);
466     }
467 
468     for (MVT VT : BoolVecVTs) {
469       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
470 
471       // Mask VTs are custom-expanded into a series of standard nodes
472       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
473                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
474                          VT, Custom);
475 
476       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
477                          Custom);
478 
479       setOperationAction(ISD::SELECT, VT, Custom);
480       setOperationAction(
481           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
482           Expand);
483 
484       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
485 
486       setOperationAction(
487           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
488           Custom);
489 
490       setOperationAction(
491           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
492           Custom);
493 
494       // RVV has native int->float & float->int conversions where the
495       // element type sizes are within one power-of-two of each other. Any
496       // wider distances between type sizes have to be lowered as sequences
497       // which progressively narrow the gap in stages.
498       setOperationAction(
499           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
500           VT, Custom);
501 
502       // Expand all extending loads to types larger than this, and truncating
503       // stores from types larger than this.
504       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
505         setTruncStoreAction(OtherVT, VT, Expand);
506         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
507                          VT, Expand);
508       }
509 
510       setOperationAction(
511           {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT,
512           Custom);
513     }
514 
515     for (MVT VT : IntVecVTs) {
516       if (VT.getVectorElementType() == MVT::i64 &&
517           !Subtarget.hasVInstructionsI64())
518         continue;
519 
520       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
521       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
522 
523       // Vectors implement MULHS/MULHU.
524       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
525 
526       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
527       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
528         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
529 
530       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
531                          Legal);
532 
533       setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
534 
535       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT,
536                          Expand);
537 
538       setOperationAction(ISD::BSWAP, VT, Expand);
539 
540       // Custom-lower extensions and truncations from/to mask types.
541       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
542                          VT, Custom);
543 
544       // RVV has native int->float & float->int conversions where the
545       // element type sizes are within one power-of-two of each other. Any
546       // wider distances between type sizes have to be lowered as sequences
547       // which progressively narrow the gap in stages.
548       setOperationAction(
549           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
550           VT, Custom);
551 
552       setOperationAction(
553           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
554 
555       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
556       // nodes which truncate by one power of two at a time.
557       setOperationAction(ISD::TRUNCATE, VT, Custom);
558 
559       // Custom-lower insert/extract operations to simplify patterns.
560       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
561                          Custom);
562 
563       // Custom-lower reduction operations to set up the corresponding custom
564       // nodes' operands.
565       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
566                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
567                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
568                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
569                          VT, Custom);
570 
571       for (unsigned VPOpc : IntegerVPOps)
572         setOperationAction(VPOpc, VT, Custom);
573 
574       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
575 
576       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
577                          VT, Custom);
578 
579       setOperationAction(
580           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
581           Custom);
582 
583       setOperationAction(
584           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
585           VT, Custom);
586 
587       setOperationAction(ISD::SELECT, VT, Custom);
588       setOperationAction(ISD::SELECT_CC, VT, Expand);
589 
590       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
591 
592       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
593         setTruncStoreAction(VT, OtherVT, Expand);
594         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
595                          VT, Expand);
596       }
597 
598       // Splice
599       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
600 
601       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
602       // type that can represent the value exactly.
603       if (VT.getVectorElementType() != MVT::i64) {
604         MVT FloatEltVT =
605             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
606         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
607         if (isTypeLegal(FloatVT)) {
608           setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
609                              Custom);
610         }
611       }
612     }
613 
614     // Expand various CCs to best match the RVV ISA, which natively supports UNE
615     // but no other unordered comparisons, and supports all ordered comparisons
616     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
617     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
618     // and we pattern-match those back to the "original", swapping operands once
619     // more. This way we catch both operations and both "vf" and "fv" forms with
620     // fewer patterns.
621     static const ISD::CondCode VFPCCToExpand[] = {
622         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
623         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
624         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
625     };
626 
627     // Sets common operation actions on RVV floating-point vector types.
628     const auto SetCommonVFPActions = [&](MVT VT) {
629       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
630       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
631       // sizes are within one power-of-two of each other. Therefore conversions
632       // between vXf16 and vXf64 must be lowered as sequences which convert via
633       // vXf32.
634       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
635       // Custom-lower insert/extract operations to simplify patterns.
636       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
637                          Custom);
638       // Expand various condition codes (explained above).
639       for (auto CC : VFPCCToExpand)
640         setCondCodeAction(CC, VT, Expand);
641 
642       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
643 
644       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
645                          VT, Custom);
646 
647       setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
648                           ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
649                          VT, Custom);
650 
651       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
652 
653       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
654 
655       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
656                          VT, Custom);
657 
658       setOperationAction(
659           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
660           Custom);
661 
662       setOperationAction(ISD::SELECT, VT, Custom);
663       setOperationAction(ISD::SELECT_CC, VT, Expand);
664 
665       setOperationAction(
666           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
667           VT, Custom);
668 
669       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
670 
671       for (unsigned VPOpc : FloatingPointVPOps)
672         setOperationAction(VPOpc, VT, Custom);
673     };
674 
675     // Sets common extload/truncstore actions on RVV floating-point vector
676     // types.
677     const auto SetCommonVFPExtLoadTruncStoreActions =
678         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
679           for (auto SmallVT : SmallerVTs) {
680             setTruncStoreAction(VT, SmallVT, Expand);
681             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
682           }
683         };
684 
685     if (Subtarget.hasVInstructionsF16())
686       for (MVT VT : F16VecVTs)
687         SetCommonVFPActions(VT);
688 
689     for (MVT VT : F32VecVTs) {
690       if (Subtarget.hasVInstructionsF32())
691         SetCommonVFPActions(VT);
692       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
693     }
694 
695     for (MVT VT : F64VecVTs) {
696       if (Subtarget.hasVInstructionsF64())
697         SetCommonVFPActions(VT);
698       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
699       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
700     }
701 
702     if (Subtarget.useRVVForFixedLengthVectors()) {
703       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
704         if (!useRVVForFixedLengthVectorVT(VT))
705           continue;
706 
707         // By default everything must be expanded.
708         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
709           setOperationAction(Op, VT, Expand);
710         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
711           setTruncStoreAction(VT, OtherVT, Expand);
712           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
713                            OtherVT, VT, Expand);
714         }
715 
716         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
717         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
718                            Custom);
719 
720         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
721                            Custom);
722 
723         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
724                            VT, Custom);
725 
726         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
727 
728         setOperationAction(ISD::SETCC, VT, Custom);
729 
730         setOperationAction(ISD::SELECT, VT, Custom);
731 
732         setOperationAction(ISD::TRUNCATE, VT, Custom);
733 
734         setOperationAction(ISD::BITCAST, VT, Custom);
735 
736         setOperationAction(
737             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
738             Custom);
739 
740         setOperationAction(
741             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
742             Custom);
743 
744         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
745                             ISD::FP_TO_UINT},
746                            VT, Custom);
747 
748         // Operations below are different for between masks and other vectors.
749         if (VT.getVectorElementType() == MVT::i1) {
750           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
751                               ISD::OR, ISD::XOR},
752                              VT, Custom);
753 
754           setOperationAction(
755               {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNCATE},
756               VT, Custom);
757           continue;
758         }
759 
760         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
761         // it before type legalization for i64 vectors on RV32. It will then be
762         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
763         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
764         // improvements first.
765         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
766           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
767           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
768         }
769 
770         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
771         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
772 
773         setOperationAction(
774             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
775 
776         setOperationAction(
777             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
778             Custom);
779 
780         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
781                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
782                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
783                            VT, Custom);
784 
785         setOperationAction(
786             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
787 
788         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
789         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
790           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
791 
792         setOperationAction(
793             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
794             Custom);
795 
796         setOperationAction(ISD::VSELECT, VT, Custom);
797         setOperationAction(ISD::SELECT_CC, VT, Expand);
798 
799         setOperationAction(
800             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
801 
802         // Custom-lower reduction operations to set up the corresponding custom
803         // nodes' operands.
804         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
805                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
806                             ISD::VECREDUCE_UMIN},
807                            VT, Custom);
808 
809         for (unsigned VPOpc : IntegerVPOps)
810           setOperationAction(VPOpc, VT, Custom);
811 
812         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
813         // type that can represent the value exactly.
814         if (VT.getVectorElementType() != MVT::i64) {
815           MVT FloatEltVT =
816               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
817           EVT FloatVT =
818               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
819           if (isTypeLegal(FloatVT))
820             setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
821                                Custom);
822         }
823       }
824 
825       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
826         if (!useRVVForFixedLengthVectorVT(VT))
827           continue;
828 
829         // By default everything must be expanded.
830         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
831           setOperationAction(Op, VT, Expand);
832         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
833           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
834           setTruncStoreAction(VT, OtherVT, Expand);
835         }
836 
837         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
838         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
839                            Custom);
840 
841         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
842                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
843                             ISD::EXTRACT_VECTOR_ELT},
844                            VT, Custom);
845 
846         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
847                             ISD::MGATHER, ISD::MSCATTER},
848                            VT, Custom);
849 
850         setOperationAction(
851             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
852             Custom);
853 
854         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
855                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
856                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
857                            VT, Custom);
858 
859         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
860 
861         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
862                            VT, Custom);
863 
864         for (auto CC : VFPCCToExpand)
865           setCondCodeAction(CC, VT, Expand);
866 
867         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
868         setOperationAction(ISD::SELECT_CC, VT, Expand);
869 
870         setOperationAction(ISD::BITCAST, VT, Custom);
871 
872         setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
873                             ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
874                            VT, Custom);
875 
876         for (unsigned VPOpc : FloatingPointVPOps)
877           setOperationAction(VPOpc, VT, Custom);
878       }
879 
880       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
881       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
882                          Custom);
883       if (Subtarget.hasStdExtZfh())
884         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
885       if (Subtarget.hasStdExtF())
886         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
887       if (Subtarget.hasStdExtD())
888         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
889     }
890   }
891 
892   // Function alignments.
893   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
894   setMinFunctionAlignment(FunctionAlignment);
895   setPrefFunctionAlignment(FunctionAlignment);
896 
897   setMinimumJumpTableEntries(5);
898 
899   // Jumps are expensive, compared to logic
900   setJumpIsExpensive();
901 
902   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
903                        ISD::OR, ISD::XOR});
904 
905   if (Subtarget.hasStdExtF())
906     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
907 
908   if (Subtarget.hasStdExtZbp())
909     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
910 
911   if (Subtarget.hasStdExtZbb())
912     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
913 
914   if (Subtarget.hasStdExtZbkb())
915     setTargetDAGCombine(ISD::BITREVERSE);
916   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
917     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
918   if (Subtarget.hasStdExtF())
919     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
920                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
921   if (Subtarget.hasVInstructions())
922     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
923                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
924                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
925 
926   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
927   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
928 }
929 
930 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
931                                             LLVMContext &Context,
932                                             EVT VT) const {
933   if (!VT.isVector())
934     return getPointerTy(DL);
935   if (Subtarget.hasVInstructions() &&
936       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
937     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
938   return VT.changeVectorElementTypeToInteger();
939 }
940 
941 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
942   return Subtarget.getXLenVT();
943 }
944 
945 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
946                                              const CallInst &I,
947                                              MachineFunction &MF,
948                                              unsigned Intrinsic) const {
949   auto &DL = I.getModule()->getDataLayout();
950   switch (Intrinsic) {
951   default:
952     return false;
953   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
954   case Intrinsic::riscv_masked_atomicrmw_add_i32:
955   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
956   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
957   case Intrinsic::riscv_masked_atomicrmw_max_i32:
958   case Intrinsic::riscv_masked_atomicrmw_min_i32:
959   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
960   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
961   case Intrinsic::riscv_masked_cmpxchg_i32:
962     Info.opc = ISD::INTRINSIC_W_CHAIN;
963     Info.memVT = MVT::i32;
964     Info.ptrVal = I.getArgOperand(0);
965     Info.offset = 0;
966     Info.align = Align(4);
967     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
968                  MachineMemOperand::MOVolatile;
969     return true;
970   case Intrinsic::riscv_masked_strided_load:
971     Info.opc = ISD::INTRINSIC_W_CHAIN;
972     Info.ptrVal = I.getArgOperand(1);
973     Info.memVT = getValueType(DL, I.getType()->getScalarType());
974     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
975     Info.size = MemoryLocation::UnknownSize;
976     Info.flags |= MachineMemOperand::MOLoad;
977     return true;
978   case Intrinsic::riscv_masked_strided_store:
979     Info.opc = ISD::INTRINSIC_VOID;
980     Info.ptrVal = I.getArgOperand(1);
981     Info.memVT =
982         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
983     Info.align = Align(
984         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
985         8);
986     Info.size = MemoryLocation::UnknownSize;
987     Info.flags |= MachineMemOperand::MOStore;
988     return true;
989   case Intrinsic::riscv_seg2_load:
990   case Intrinsic::riscv_seg3_load:
991   case Intrinsic::riscv_seg4_load:
992   case Intrinsic::riscv_seg5_load:
993   case Intrinsic::riscv_seg6_load:
994   case Intrinsic::riscv_seg7_load:
995   case Intrinsic::riscv_seg8_load:
996     Info.opc = ISD::INTRINSIC_W_CHAIN;
997     Info.ptrVal = I.getArgOperand(0);
998     Info.memVT =
999         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1000     Info.align =
1001         Align(DL.getTypeSizeInBits(
1002                   I.getType()->getStructElementType(0)->getScalarType()) /
1003               8);
1004     Info.size = MemoryLocation::UnknownSize;
1005     Info.flags |= MachineMemOperand::MOLoad;
1006     return true;
1007   }
1008 }
1009 
1010 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1011                                                 const AddrMode &AM, Type *Ty,
1012                                                 unsigned AS,
1013                                                 Instruction *I) const {
1014   // No global is ever allowed as a base.
1015   if (AM.BaseGV)
1016     return false;
1017 
1018   // RVV instructions only support register addressing.
1019   if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
1020     return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
1021 
1022   // Require a 12-bit signed offset.
1023   if (!isInt<12>(AM.BaseOffs))
1024     return false;
1025 
1026   switch (AM.Scale) {
1027   case 0: // "r+i" or just "i", depending on HasBaseReg.
1028     break;
1029   case 1:
1030     if (!AM.HasBaseReg) // allow "r+i".
1031       break;
1032     return false; // disallow "r+r" or "r+r+i".
1033   default:
1034     return false;
1035   }
1036 
1037   return true;
1038 }
1039 
1040 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1041   return isInt<12>(Imm);
1042 }
1043 
1044 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1045   return isInt<12>(Imm);
1046 }
1047 
1048 // On RV32, 64-bit integers are split into their high and low parts and held
1049 // in two different registers, so the trunc is free since the low register can
1050 // just be used.
1051 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1052   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1053     return false;
1054   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1055   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1056   return (SrcBits == 64 && DestBits == 32);
1057 }
1058 
1059 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1060   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1061       !SrcVT.isInteger() || !DstVT.isInteger())
1062     return false;
1063   unsigned SrcBits = SrcVT.getSizeInBits();
1064   unsigned DestBits = DstVT.getSizeInBits();
1065   return (SrcBits == 64 && DestBits == 32);
1066 }
1067 
1068 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1069   // Zexts are free if they can be combined with a load.
1070   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1071   // poorly with type legalization of compares preferring sext.
1072   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1073     EVT MemVT = LD->getMemoryVT();
1074     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1075         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1076          LD->getExtensionType() == ISD::ZEXTLOAD))
1077       return true;
1078   }
1079 
1080   return TargetLowering::isZExtFree(Val, VT2);
1081 }
1082 
1083 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1084   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1085 }
1086 
1087 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1088   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1089 }
1090 
1091 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1092   return Subtarget.hasStdExtZbb();
1093 }
1094 
1095 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1096   return Subtarget.hasStdExtZbb();
1097 }
1098 
1099 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1100   EVT VT = Y.getValueType();
1101 
1102   // FIXME: Support vectors once we have tests.
1103   if (VT.isVector())
1104     return false;
1105 
1106   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1107           Subtarget.hasStdExtZbkb()) &&
1108          !isa<ConstantSDNode>(Y);
1109 }
1110 
1111 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1112   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1113   auto *C = dyn_cast<ConstantSDNode>(Y);
1114   return C && C->getAPIntValue().ule(10);
1115 }
1116 
1117 /// Check if sinking \p I's operands to I's basic block is profitable, because
1118 /// the operands can be folded into a target instruction, e.g.
1119 /// splats of scalars can fold into vector instructions.
1120 bool RISCVTargetLowering::shouldSinkOperands(
1121     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1122   using namespace llvm::PatternMatch;
1123 
1124   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1125     return false;
1126 
1127   auto IsSinker = [&](Instruction *I, int Operand) {
1128     switch (I->getOpcode()) {
1129     case Instruction::Add:
1130     case Instruction::Sub:
1131     case Instruction::Mul:
1132     case Instruction::And:
1133     case Instruction::Or:
1134     case Instruction::Xor:
1135     case Instruction::FAdd:
1136     case Instruction::FSub:
1137     case Instruction::FMul:
1138     case Instruction::FDiv:
1139     case Instruction::ICmp:
1140     case Instruction::FCmp:
1141       return true;
1142     case Instruction::Shl:
1143     case Instruction::LShr:
1144     case Instruction::AShr:
1145     case Instruction::UDiv:
1146     case Instruction::SDiv:
1147     case Instruction::URem:
1148     case Instruction::SRem:
1149       return Operand == 1;
1150     case Instruction::Call:
1151       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1152         switch (II->getIntrinsicID()) {
1153         case Intrinsic::fma:
1154         case Intrinsic::vp_fma:
1155           return Operand == 0 || Operand == 1;
1156         // FIXME: Our patterns can only match vx/vf instructions when the splat
1157         // it on the RHS, because TableGen doesn't recognize our VP operations
1158         // as commutative.
1159         case Intrinsic::vp_add:
1160         case Intrinsic::vp_mul:
1161         case Intrinsic::vp_and:
1162         case Intrinsic::vp_or:
1163         case Intrinsic::vp_xor:
1164         case Intrinsic::vp_fadd:
1165         case Intrinsic::vp_fmul:
1166         case Intrinsic::vp_shl:
1167         case Intrinsic::vp_lshr:
1168         case Intrinsic::vp_ashr:
1169         case Intrinsic::vp_udiv:
1170         case Intrinsic::vp_sdiv:
1171         case Intrinsic::vp_urem:
1172         case Intrinsic::vp_srem:
1173           return Operand == 1;
1174         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1175         // explicit patterns for both LHS and RHS (as 'vr' versions).
1176         case Intrinsic::vp_sub:
1177         case Intrinsic::vp_fsub:
1178         case Intrinsic::vp_fdiv:
1179           return Operand == 0 || Operand == 1;
1180         default:
1181           return false;
1182         }
1183       }
1184       return false;
1185     default:
1186       return false;
1187     }
1188   };
1189 
1190   for (auto OpIdx : enumerate(I->operands())) {
1191     if (!IsSinker(I, OpIdx.index()))
1192       continue;
1193 
1194     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1195     // Make sure we are not already sinking this operand
1196     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1197       continue;
1198 
1199     // We are looking for a splat that can be sunk.
1200     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1201                              m_Undef(), m_ZeroMask())))
1202       continue;
1203 
1204     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1205     // and vector registers
1206     for (Use &U : Op->uses()) {
1207       Instruction *Insn = cast<Instruction>(U.getUser());
1208       if (!IsSinker(Insn, U.getOperandNo()))
1209         return false;
1210     }
1211 
1212     Ops.push_back(&Op->getOperandUse(0));
1213     Ops.push_back(&OpIdx.value());
1214   }
1215   return true;
1216 }
1217 
1218 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1219                                        bool ForCodeSize) const {
1220   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1221   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1222     return false;
1223   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1224     return false;
1225   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1226     return false;
1227   return Imm.isZero();
1228 }
1229 
1230 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1231   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1232          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1233          (VT == MVT::f64 && Subtarget.hasStdExtD());
1234 }
1235 
1236 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1237                                                       CallingConv::ID CC,
1238                                                       EVT VT) const {
1239   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1240   // We might still end up using a GPR but that will be decided based on ABI.
1241   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1242   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1243     return MVT::f32;
1244 
1245   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1246 }
1247 
1248 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1249                                                            CallingConv::ID CC,
1250                                                            EVT VT) const {
1251   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1252   // We might still end up using a GPR but that will be decided based on ABI.
1253   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1254   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1255     return 1;
1256 
1257   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1258 }
1259 
1260 // Changes the condition code and swaps operands if necessary, so the SetCC
1261 // operation matches one of the comparisons supported directly by branches
1262 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1263 // with 1/-1.
1264 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1265                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1266   // Convert X > -1 to X >= 0.
1267   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1268     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1269     CC = ISD::SETGE;
1270     return;
1271   }
1272   // Convert X < 1 to 0 >= X.
1273   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1274     RHS = LHS;
1275     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1276     CC = ISD::SETGE;
1277     return;
1278   }
1279 
1280   switch (CC) {
1281   default:
1282     break;
1283   case ISD::SETGT:
1284   case ISD::SETLE:
1285   case ISD::SETUGT:
1286   case ISD::SETULE:
1287     CC = ISD::getSetCCSwappedOperands(CC);
1288     std::swap(LHS, RHS);
1289     break;
1290   }
1291 }
1292 
1293 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1294   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1295   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1296   if (VT.getVectorElementType() == MVT::i1)
1297     KnownSize *= 8;
1298 
1299   switch (KnownSize) {
1300   default:
1301     llvm_unreachable("Invalid LMUL.");
1302   case 8:
1303     return RISCVII::VLMUL::LMUL_F8;
1304   case 16:
1305     return RISCVII::VLMUL::LMUL_F4;
1306   case 32:
1307     return RISCVII::VLMUL::LMUL_F2;
1308   case 64:
1309     return RISCVII::VLMUL::LMUL_1;
1310   case 128:
1311     return RISCVII::VLMUL::LMUL_2;
1312   case 256:
1313     return RISCVII::VLMUL::LMUL_4;
1314   case 512:
1315     return RISCVII::VLMUL::LMUL_8;
1316   }
1317 }
1318 
1319 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1320   switch (LMul) {
1321   default:
1322     llvm_unreachable("Invalid LMUL.");
1323   case RISCVII::VLMUL::LMUL_F8:
1324   case RISCVII::VLMUL::LMUL_F4:
1325   case RISCVII::VLMUL::LMUL_F2:
1326   case RISCVII::VLMUL::LMUL_1:
1327     return RISCV::VRRegClassID;
1328   case RISCVII::VLMUL::LMUL_2:
1329     return RISCV::VRM2RegClassID;
1330   case RISCVII::VLMUL::LMUL_4:
1331     return RISCV::VRM4RegClassID;
1332   case RISCVII::VLMUL::LMUL_8:
1333     return RISCV::VRM8RegClassID;
1334   }
1335 }
1336 
1337 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1338   RISCVII::VLMUL LMUL = getLMUL(VT);
1339   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1340       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1341       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1342       LMUL == RISCVII::VLMUL::LMUL_1) {
1343     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1344                   "Unexpected subreg numbering");
1345     return RISCV::sub_vrm1_0 + Index;
1346   }
1347   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1348     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1349                   "Unexpected subreg numbering");
1350     return RISCV::sub_vrm2_0 + Index;
1351   }
1352   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1353     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1354                   "Unexpected subreg numbering");
1355     return RISCV::sub_vrm4_0 + Index;
1356   }
1357   llvm_unreachable("Invalid vector type.");
1358 }
1359 
1360 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1361   if (VT.getVectorElementType() == MVT::i1)
1362     return RISCV::VRRegClassID;
1363   return getRegClassIDForLMUL(getLMUL(VT));
1364 }
1365 
1366 // Attempt to decompose a subvector insert/extract between VecVT and
1367 // SubVecVT via subregister indices. Returns the subregister index that
1368 // can perform the subvector insert/extract with the given element index, as
1369 // well as the index corresponding to any leftover subvectors that must be
1370 // further inserted/extracted within the register class for SubVecVT.
1371 std::pair<unsigned, unsigned>
1372 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1373     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1374     const RISCVRegisterInfo *TRI) {
1375   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1376                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1377                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1378                 "Register classes not ordered");
1379   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1380   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1381   // Try to compose a subregister index that takes us from the incoming
1382   // LMUL>1 register class down to the outgoing one. At each step we half
1383   // the LMUL:
1384   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1385   // Note that this is not guaranteed to find a subregister index, such as
1386   // when we are extracting from one VR type to another.
1387   unsigned SubRegIdx = RISCV::NoSubRegister;
1388   for (const unsigned RCID :
1389        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1390     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1391       VecVT = VecVT.getHalfNumVectorElementsVT();
1392       bool IsHi =
1393           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1394       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1395                                             getSubregIndexByMVT(VecVT, IsHi));
1396       if (IsHi)
1397         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1398     }
1399   return {SubRegIdx, InsertExtractIdx};
1400 }
1401 
1402 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1403 // stores for those types.
1404 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1405   return !Subtarget.useRVVForFixedLengthVectors() ||
1406          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1407 }
1408 
1409 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1410   if (ScalarTy->isPointerTy())
1411     return true;
1412 
1413   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1414       ScalarTy->isIntegerTy(32))
1415     return true;
1416 
1417   if (ScalarTy->isIntegerTy(64))
1418     return Subtarget.hasVInstructionsI64();
1419 
1420   if (ScalarTy->isHalfTy())
1421     return Subtarget.hasVInstructionsF16();
1422   if (ScalarTy->isFloatTy())
1423     return Subtarget.hasVInstructionsF32();
1424   if (ScalarTy->isDoubleTy())
1425     return Subtarget.hasVInstructionsF64();
1426 
1427   return false;
1428 }
1429 
1430 static SDValue getVLOperand(SDValue Op) {
1431   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1432           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1433          "Unexpected opcode");
1434   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1435   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1436   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1437       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1438   if (!II)
1439     return SDValue();
1440   return Op.getOperand(II->VLOperand + 1 + HasChain);
1441 }
1442 
1443 static bool useRVVForFixedLengthVectorVT(MVT VT,
1444                                          const RISCVSubtarget &Subtarget) {
1445   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1446   if (!Subtarget.useRVVForFixedLengthVectors())
1447     return false;
1448 
1449   // We only support a set of vector types with a consistent maximum fixed size
1450   // across all supported vector element types to avoid legalization issues.
1451   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1452   // fixed-length vector type we support is 1024 bytes.
1453   if (VT.getFixedSizeInBits() > 1024 * 8)
1454     return false;
1455 
1456   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1457 
1458   MVT EltVT = VT.getVectorElementType();
1459 
1460   // Don't use RVV for vectors we cannot scalarize if required.
1461   switch (EltVT.SimpleTy) {
1462   // i1 is supported but has different rules.
1463   default:
1464     return false;
1465   case MVT::i1:
1466     // Masks can only use a single register.
1467     if (VT.getVectorNumElements() > MinVLen)
1468       return false;
1469     MinVLen /= 8;
1470     break;
1471   case MVT::i8:
1472   case MVT::i16:
1473   case MVT::i32:
1474     break;
1475   case MVT::i64:
1476     if (!Subtarget.hasVInstructionsI64())
1477       return false;
1478     break;
1479   case MVT::f16:
1480     if (!Subtarget.hasVInstructionsF16())
1481       return false;
1482     break;
1483   case MVT::f32:
1484     if (!Subtarget.hasVInstructionsF32())
1485       return false;
1486     break;
1487   case MVT::f64:
1488     if (!Subtarget.hasVInstructionsF64())
1489       return false;
1490     break;
1491   }
1492 
1493   // Reject elements larger than ELEN.
1494   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1495     return false;
1496 
1497   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1498   // Don't use RVV for types that don't fit.
1499   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1500     return false;
1501 
1502   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1503   // the base fixed length RVV support in place.
1504   if (!VT.isPow2VectorType())
1505     return false;
1506 
1507   return true;
1508 }
1509 
1510 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1511   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1512 }
1513 
1514 // Return the largest legal scalable vector type that matches VT's element type.
1515 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1516                                             const RISCVSubtarget &Subtarget) {
1517   // This may be called before legal types are setup.
1518   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1519           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1520          "Expected legal fixed length vector!");
1521 
1522   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1523   unsigned MaxELen = Subtarget.getELEN();
1524 
1525   MVT EltVT = VT.getVectorElementType();
1526   switch (EltVT.SimpleTy) {
1527   default:
1528     llvm_unreachable("unexpected element type for RVV container");
1529   case MVT::i1:
1530   case MVT::i8:
1531   case MVT::i16:
1532   case MVT::i32:
1533   case MVT::i64:
1534   case MVT::f16:
1535   case MVT::f32:
1536   case MVT::f64: {
1537     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1538     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1539     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1540     unsigned NumElts =
1541         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1542     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1543     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1544     return MVT::getScalableVectorVT(EltVT, NumElts);
1545   }
1546   }
1547 }
1548 
1549 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1550                                             const RISCVSubtarget &Subtarget) {
1551   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1552                                           Subtarget);
1553 }
1554 
1555 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1556   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1557 }
1558 
1559 // Grow V to consume an entire RVV register.
1560 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1561                                        const RISCVSubtarget &Subtarget) {
1562   assert(VT.isScalableVector() &&
1563          "Expected to convert into a scalable vector!");
1564   assert(V.getValueType().isFixedLengthVector() &&
1565          "Expected a fixed length vector operand!");
1566   SDLoc DL(V);
1567   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1568   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1569 }
1570 
1571 // Shrink V so it's just big enough to maintain a VT's worth of data.
1572 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1573                                          const RISCVSubtarget &Subtarget) {
1574   assert(VT.isFixedLengthVector() &&
1575          "Expected to convert into a fixed length vector!");
1576   assert(V.getValueType().isScalableVector() &&
1577          "Expected a scalable vector operand!");
1578   SDLoc DL(V);
1579   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1580   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1581 }
1582 
1583 /// Return the type of the mask type suitable for masking the provided
1584 /// vector type.  This is simply an i1 element type vector of the same
1585 /// (possibly scalable) length.
1586 static MVT getMaskTypeFor(EVT VecVT) {
1587   assert(VecVT.isVector());
1588   ElementCount EC = VecVT.getVectorElementCount();
1589   return MVT::getVectorVT(MVT::i1, EC);
1590 }
1591 
1592 /// Creates an all ones mask suitable for masking a vector of type VecTy with
1593 /// vector length VL.  .
1594 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
1595                               SelectionDAG &DAG) {
1596   MVT MaskVT = getMaskTypeFor(VecVT);
1597   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1598 }
1599 
1600 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1601 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1602 // the vector type that it is contained in.
1603 static std::pair<SDValue, SDValue>
1604 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1605                 const RISCVSubtarget &Subtarget) {
1606   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1607   MVT XLenVT = Subtarget.getXLenVT();
1608   SDValue VL = VecVT.isFixedLengthVector()
1609                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1610                    : DAG.getRegister(RISCV::X0, XLenVT);
1611   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
1612   return {Mask, VL};
1613 }
1614 
1615 // As above but assuming the given type is a scalable vector type.
1616 static std::pair<SDValue, SDValue>
1617 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1618                         const RISCVSubtarget &Subtarget) {
1619   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1620   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1621 }
1622 
1623 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1624 // of either is (currently) supported. This can get us into an infinite loop
1625 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1626 // as a ..., etc.
1627 // Until either (or both) of these can reliably lower any node, reporting that
1628 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1629 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1630 // which is not desirable.
1631 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1632     EVT VT, unsigned DefinedValues) const {
1633   return false;
1634 }
1635 
1636 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1637                                   const RISCVSubtarget &Subtarget) {
1638   // RISCV FP-to-int conversions saturate to the destination register size, but
1639   // don't produce 0 for nan. We can use a conversion instruction and fix the
1640   // nan case with a compare and a select.
1641   SDValue Src = Op.getOperand(0);
1642 
1643   EVT DstVT = Op.getValueType();
1644   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1645 
1646   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1647   unsigned Opc;
1648   if (SatVT == DstVT)
1649     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1650   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1651     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1652   else
1653     return SDValue();
1654   // FIXME: Support other SatVTs by clamping before or after the conversion.
1655 
1656   SDLoc DL(Op);
1657   SDValue FpToInt = DAG.getNode(
1658       Opc, DL, DstVT, Src,
1659       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1660 
1661   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1662   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1663 }
1664 
1665 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1666 // and back. Taking care to avoid converting values that are nan or already
1667 // correct.
1668 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1669 // have FRM dependencies modeled yet.
1670 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1671   MVT VT = Op.getSimpleValueType();
1672   assert(VT.isVector() && "Unexpected type");
1673 
1674   SDLoc DL(Op);
1675 
1676   // Freeze the source since we are increasing the number of uses.
1677   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1678 
1679   // Truncate to integer and convert back to FP.
1680   MVT IntVT = VT.changeVectorElementTypeToInteger();
1681   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1682   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1683 
1684   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1685 
1686   if (Op.getOpcode() == ISD::FCEIL) {
1687     // If the truncated value is the greater than or equal to the original
1688     // value, we've computed the ceil. Otherwise, we went the wrong way and
1689     // need to increase by 1.
1690     // FIXME: This should use a masked operation. Handle here or in isel?
1691     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1692                                  DAG.getConstantFP(1.0, DL, VT));
1693     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1694     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1695   } else if (Op.getOpcode() == ISD::FFLOOR) {
1696     // If the truncated value is the less than or equal to the original value,
1697     // we've computed the floor. Otherwise, we went the wrong way and need to
1698     // decrease by 1.
1699     // FIXME: This should use a masked operation. Handle here or in isel?
1700     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1701                                  DAG.getConstantFP(1.0, DL, VT));
1702     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1703     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1704   }
1705 
1706   // Restore the original sign so that -0.0 is preserved.
1707   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1708 
1709   // Determine the largest integer that can be represented exactly. This and
1710   // values larger than it don't have any fractional bits so don't need to
1711   // be converted.
1712   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1713   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1714   APFloat MaxVal = APFloat(FltSem);
1715   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1716                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1717   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1718 
1719   // If abs(Src) was larger than MaxVal or nan, keep it.
1720   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1721   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1722   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1723 }
1724 
1725 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1726 // This mode isn't supported in vector hardware on RISCV. But as long as we
1727 // aren't compiling with trapping math, we can emulate this with
1728 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1729 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1730 // dependencies modeled yet.
1731 // FIXME: Use masked operations to avoid final merge.
1732 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1733   MVT VT = Op.getSimpleValueType();
1734   assert(VT.isVector() && "Unexpected type");
1735 
1736   SDLoc DL(Op);
1737 
1738   // Freeze the source since we are increasing the number of uses.
1739   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1740 
1741   // We do the conversion on the absolute value and fix the sign at the end.
1742   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1743 
1744   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1745   bool Ignored;
1746   APFloat Point5Pred = APFloat(0.5f);
1747   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1748   Point5Pred.next(/*nextDown*/ true);
1749 
1750   // Add the adjustment.
1751   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1752                                DAG.getConstantFP(Point5Pred, DL, VT));
1753 
1754   // Truncate to integer and convert back to fp.
1755   MVT IntVT = VT.changeVectorElementTypeToInteger();
1756   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1757   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1758 
1759   // Restore the original sign.
1760   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1761 
1762   // Determine the largest integer that can be represented exactly. This and
1763   // values larger than it don't have any fractional bits so don't need to
1764   // be converted.
1765   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1766   APFloat MaxVal = APFloat(FltSem);
1767   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1768                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1769   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1770 
1771   // If abs(Src) was larger than MaxVal or nan, keep it.
1772   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1773   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1774   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1775 }
1776 
1777 struct VIDSequence {
1778   int64_t StepNumerator;
1779   unsigned StepDenominator;
1780   int64_t Addend;
1781 };
1782 
1783 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1784 // to the (non-zero) step S and start value X. This can be then lowered as the
1785 // RVV sequence (VID * S) + X, for example.
1786 // The step S is represented as an integer numerator divided by a positive
1787 // denominator. Note that the implementation currently only identifies
1788 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1789 // cannot detect 2/3, for example.
1790 // Note that this method will also match potentially unappealing index
1791 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1792 // determine whether this is worth generating code for.
1793 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1794   unsigned NumElts = Op.getNumOperands();
1795   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1796   if (!Op.getValueType().isInteger())
1797     return None;
1798 
1799   Optional<unsigned> SeqStepDenom;
1800   Optional<int64_t> SeqStepNum, SeqAddend;
1801   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1802   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1803   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1804     // Assume undef elements match the sequence; we just have to be careful
1805     // when interpolating across them.
1806     if (Op.getOperand(Idx).isUndef())
1807       continue;
1808     // The BUILD_VECTOR must be all constants.
1809     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1810       return None;
1811 
1812     uint64_t Val = Op.getConstantOperandVal(Idx) &
1813                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1814 
1815     if (PrevElt) {
1816       // Calculate the step since the last non-undef element, and ensure
1817       // it's consistent across the entire sequence.
1818       unsigned IdxDiff = Idx - PrevElt->second;
1819       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1820 
1821       // A zero-value value difference means that we're somewhere in the middle
1822       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1823       // step change before evaluating the sequence.
1824       if (ValDiff == 0)
1825         continue;
1826 
1827       int64_t Remainder = ValDiff % IdxDiff;
1828       // Normalize the step if it's greater than 1.
1829       if (Remainder != ValDiff) {
1830         // The difference must cleanly divide the element span.
1831         if (Remainder != 0)
1832           return None;
1833         ValDiff /= IdxDiff;
1834         IdxDiff = 1;
1835       }
1836 
1837       if (!SeqStepNum)
1838         SeqStepNum = ValDiff;
1839       else if (ValDiff != SeqStepNum)
1840         return None;
1841 
1842       if (!SeqStepDenom)
1843         SeqStepDenom = IdxDiff;
1844       else if (IdxDiff != *SeqStepDenom)
1845         return None;
1846     }
1847 
1848     // Record this non-undef element for later.
1849     if (!PrevElt || PrevElt->first != Val)
1850       PrevElt = std::make_pair(Val, Idx);
1851   }
1852 
1853   // We need to have logged a step for this to count as a legal index sequence.
1854   if (!SeqStepNum || !SeqStepDenom)
1855     return None;
1856 
1857   // Loop back through the sequence and validate elements we might have skipped
1858   // while waiting for a valid step. While doing this, log any sequence addend.
1859   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1860     if (Op.getOperand(Idx).isUndef())
1861       continue;
1862     uint64_t Val = Op.getConstantOperandVal(Idx) &
1863                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1864     uint64_t ExpectedVal =
1865         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1866     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1867     if (!SeqAddend)
1868       SeqAddend = Addend;
1869     else if (Addend != SeqAddend)
1870       return None;
1871   }
1872 
1873   assert(SeqAddend && "Must have an addend if we have a step");
1874 
1875   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1876 }
1877 
1878 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1879 // and lower it as a VRGATHER_VX_VL from the source vector.
1880 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1881                                   SelectionDAG &DAG,
1882                                   const RISCVSubtarget &Subtarget) {
1883   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1884     return SDValue();
1885   SDValue Vec = SplatVal.getOperand(0);
1886   // Only perform this optimization on vectors of the same size for simplicity.
1887   if (Vec.getValueType() != VT)
1888     return SDValue();
1889   SDValue Idx = SplatVal.getOperand(1);
1890   // The index must be a legal type.
1891   if (Idx.getValueType() != Subtarget.getXLenVT())
1892     return SDValue();
1893 
1894   MVT ContainerVT = VT;
1895   if (VT.isFixedLengthVector()) {
1896     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1897     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
1898   }
1899 
1900   SDValue Mask, VL;
1901   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1902 
1903   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
1904                                Idx, Mask, VL);
1905 
1906   if (!VT.isFixedLengthVector())
1907     return Gather;
1908 
1909   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1910 }
1911 
1912 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1913                                  const RISCVSubtarget &Subtarget) {
1914   MVT VT = Op.getSimpleValueType();
1915   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1916 
1917   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1918 
1919   SDLoc DL(Op);
1920   SDValue Mask, VL;
1921   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1922 
1923   MVT XLenVT = Subtarget.getXLenVT();
1924   unsigned NumElts = Op.getNumOperands();
1925 
1926   if (VT.getVectorElementType() == MVT::i1) {
1927     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1928       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1929       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1930     }
1931 
1932     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1933       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1934       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1935     }
1936 
1937     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1938     // scalar integer chunks whose bit-width depends on the number of mask
1939     // bits and XLEN.
1940     // First, determine the most appropriate scalar integer type to use. This
1941     // is at most XLenVT, but may be shrunk to a smaller vector element type
1942     // according to the size of the final vector - use i8 chunks rather than
1943     // XLenVT if we're producing a v8i1. This results in more consistent
1944     // codegen across RV32 and RV64.
1945     unsigned NumViaIntegerBits =
1946         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1947     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
1948     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1949       // If we have to use more than one INSERT_VECTOR_ELT then this
1950       // optimization is likely to increase code size; avoid peforming it in
1951       // such a case. We can use a load from a constant pool in this case.
1952       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1953         return SDValue();
1954       // Now we can create our integer vector type. Note that it may be larger
1955       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1956       MVT IntegerViaVecVT =
1957           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1958                            divideCeil(NumElts, NumViaIntegerBits));
1959 
1960       uint64_t Bits = 0;
1961       unsigned BitPos = 0, IntegerEltIdx = 0;
1962       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1963 
1964       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1965         // Once we accumulate enough bits to fill our scalar type, insert into
1966         // our vector and clear our accumulated data.
1967         if (I != 0 && I % NumViaIntegerBits == 0) {
1968           if (NumViaIntegerBits <= 32)
1969             Bits = SignExtend64(Bits, 32);
1970           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1971           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1972                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1973           Bits = 0;
1974           BitPos = 0;
1975           IntegerEltIdx++;
1976         }
1977         SDValue V = Op.getOperand(I);
1978         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1979         Bits |= ((uint64_t)BitValue << BitPos);
1980       }
1981 
1982       // Insert the (remaining) scalar value into position in our integer
1983       // vector type.
1984       if (NumViaIntegerBits <= 32)
1985         Bits = SignExtend64(Bits, 32);
1986       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1987       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1988                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1989 
1990       if (NumElts < NumViaIntegerBits) {
1991         // If we're producing a smaller vector than our minimum legal integer
1992         // type, bitcast to the equivalent (known-legal) mask type, and extract
1993         // our final mask.
1994         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1995         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1996         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1997                           DAG.getConstant(0, DL, XLenVT));
1998       } else {
1999         // Else we must have produced an integer type with the same size as the
2000         // mask type; bitcast for the final result.
2001         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2002         Vec = DAG.getBitcast(VT, Vec);
2003       }
2004 
2005       return Vec;
2006     }
2007 
2008     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2009     // vector type, we have a legal equivalently-sized i8 type, so we can use
2010     // that.
2011     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2012     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2013 
2014     SDValue WideVec;
2015     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2016       // For a splat, perform a scalar truncate before creating the wider
2017       // vector.
2018       assert(Splat.getValueType() == XLenVT &&
2019              "Unexpected type for i1 splat value");
2020       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2021                           DAG.getConstant(1, DL, XLenVT));
2022       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2023     } else {
2024       SmallVector<SDValue, 8> Ops(Op->op_values());
2025       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2026       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2027       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2028     }
2029 
2030     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2031   }
2032 
2033   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2034     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2035       return Gather;
2036     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2037                                         : RISCVISD::VMV_V_X_VL;
2038     Splat =
2039         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2040     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2041   }
2042 
2043   // Try and match index sequences, which we can lower to the vid instruction
2044   // with optional modifications. An all-undef vector is matched by
2045   // getSplatValue, above.
2046   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2047     int64_t StepNumerator = SimpleVID->StepNumerator;
2048     unsigned StepDenominator = SimpleVID->StepDenominator;
2049     int64_t Addend = SimpleVID->Addend;
2050 
2051     assert(StepNumerator != 0 && "Invalid step");
2052     bool Negate = false;
2053     int64_t SplatStepVal = StepNumerator;
2054     unsigned StepOpcode = ISD::MUL;
2055     if (StepNumerator != 1) {
2056       if (isPowerOf2_64(std::abs(StepNumerator))) {
2057         Negate = StepNumerator < 0;
2058         StepOpcode = ISD::SHL;
2059         SplatStepVal = Log2_64(std::abs(StepNumerator));
2060       }
2061     }
2062 
2063     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2064     // threshold since it's the immediate value many RVV instructions accept.
2065     // There is no vmul.vi instruction so ensure multiply constant can fit in
2066     // a single addi instruction.
2067     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2068          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2069         isPowerOf2_32(StepDenominator) &&
2070         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
2071       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2072       // Convert right out of the scalable type so we can use standard ISD
2073       // nodes for the rest of the computation. If we used scalable types with
2074       // these, we'd lose the fixed-length vector info and generate worse
2075       // vsetvli code.
2076       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2077       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2078           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2079         SDValue SplatStep = DAG.getSplatBuildVector(
2080             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2081         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2082       }
2083       if (StepDenominator != 1) {
2084         SDValue SplatStep = DAG.getSplatBuildVector(
2085             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2086         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2087       }
2088       if (Addend != 0 || Negate) {
2089         SDValue SplatAddend = DAG.getSplatBuildVector(
2090             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2091         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2092       }
2093       return VID;
2094     }
2095   }
2096 
2097   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2098   // when re-interpreted as a vector with a larger element type. For example,
2099   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2100   // could be instead splat as
2101   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2102   // TODO: This optimization could also work on non-constant splats, but it
2103   // would require bit-manipulation instructions to construct the splat value.
2104   SmallVector<SDValue> Sequence;
2105   unsigned EltBitSize = VT.getScalarSizeInBits();
2106   const auto *BV = cast<BuildVectorSDNode>(Op);
2107   if (VT.isInteger() && EltBitSize < 64 &&
2108       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2109       BV->getRepeatedSequence(Sequence) &&
2110       (Sequence.size() * EltBitSize) <= 64) {
2111     unsigned SeqLen = Sequence.size();
2112     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2113     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2114     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2115             ViaIntVT == MVT::i64) &&
2116            "Unexpected sequence type");
2117 
2118     unsigned EltIdx = 0;
2119     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2120     uint64_t SplatValue = 0;
2121     // Construct the amalgamated value which can be splatted as this larger
2122     // vector type.
2123     for (const auto &SeqV : Sequence) {
2124       if (!SeqV.isUndef())
2125         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2126                        << (EltIdx * EltBitSize));
2127       EltIdx++;
2128     }
2129 
2130     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2131     // achieve better constant materializion.
2132     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2133       SplatValue = SignExtend64(SplatValue, 32);
2134 
2135     // Since we can't introduce illegal i64 types at this stage, we can only
2136     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2137     // way we can use RVV instructions to splat.
2138     assert((ViaIntVT.bitsLE(XLenVT) ||
2139             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2140            "Unexpected bitcast sequence");
2141     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2142       SDValue ViaVL =
2143           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2144       MVT ViaContainerVT =
2145           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2146       SDValue Splat =
2147           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2148                       DAG.getUNDEF(ViaContainerVT),
2149                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2150       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2151       return DAG.getBitcast(VT, Splat);
2152     }
2153   }
2154 
2155   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2156   // which constitute a large proportion of the elements. In such cases we can
2157   // splat a vector with the dominant element and make up the shortfall with
2158   // INSERT_VECTOR_ELTs.
2159   // Note that this includes vectors of 2 elements by association. The
2160   // upper-most element is the "dominant" one, allowing us to use a splat to
2161   // "insert" the upper element, and an insert of the lower element at position
2162   // 0, which improves codegen.
2163   SDValue DominantValue;
2164   unsigned MostCommonCount = 0;
2165   DenseMap<SDValue, unsigned> ValueCounts;
2166   unsigned NumUndefElts =
2167       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2168 
2169   // Track the number of scalar loads we know we'd be inserting, estimated as
2170   // any non-zero floating-point constant. Other kinds of element are either
2171   // already in registers or are materialized on demand. The threshold at which
2172   // a vector load is more desirable than several scalar materializion and
2173   // vector-insertion instructions is not known.
2174   unsigned NumScalarLoads = 0;
2175 
2176   for (SDValue V : Op->op_values()) {
2177     if (V.isUndef())
2178       continue;
2179 
2180     ValueCounts.insert(std::make_pair(V, 0));
2181     unsigned &Count = ValueCounts[V];
2182 
2183     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2184       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2185 
2186     // Is this value dominant? In case of a tie, prefer the highest element as
2187     // it's cheaper to insert near the beginning of a vector than it is at the
2188     // end.
2189     if (++Count >= MostCommonCount) {
2190       DominantValue = V;
2191       MostCommonCount = Count;
2192     }
2193   }
2194 
2195   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2196   unsigned NumDefElts = NumElts - NumUndefElts;
2197   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2198 
2199   // Don't perform this optimization when optimizing for size, since
2200   // materializing elements and inserting them tends to cause code bloat.
2201   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2202       ((MostCommonCount > DominantValueCountThreshold) ||
2203        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2204     // Start by splatting the most common element.
2205     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2206 
2207     DenseSet<SDValue> Processed{DominantValue};
2208     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2209     for (const auto &OpIdx : enumerate(Op->ops())) {
2210       const SDValue &V = OpIdx.value();
2211       if (V.isUndef() || !Processed.insert(V).second)
2212         continue;
2213       if (ValueCounts[V] == 1) {
2214         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2215                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2216       } else {
2217         // Blend in all instances of this value using a VSELECT, using a
2218         // mask where each bit signals whether that element is the one
2219         // we're after.
2220         SmallVector<SDValue> Ops;
2221         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2222           return DAG.getConstant(V == V1, DL, XLenVT);
2223         });
2224         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2225                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2226                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2227       }
2228     }
2229 
2230     return Vec;
2231   }
2232 
2233   return SDValue();
2234 }
2235 
2236 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2237                                    SDValue Lo, SDValue Hi, SDValue VL,
2238                                    SelectionDAG &DAG) {
2239   if (!Passthru)
2240     Passthru = DAG.getUNDEF(VT);
2241   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2242     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2243     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2244     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2245     // node in order to try and match RVV vector/scalar instructions.
2246     if ((LoC >> 31) == HiC)
2247       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2248 
2249     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2250     // vmv.v.x whose EEW = 32 to lower it.
2251     auto *Const = dyn_cast<ConstantSDNode>(VL);
2252     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2253       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2254       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2255       // access the subtarget here now.
2256       auto InterVec = DAG.getNode(
2257           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2258                                   DAG.getRegister(RISCV::X0, MVT::i32));
2259       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2260     }
2261   }
2262 
2263   // Fall back to a stack store and stride x0 vector load.
2264   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2265                      Hi, VL);
2266 }
2267 
2268 // Called by type legalization to handle splat of i64 on RV32.
2269 // FIXME: We can optimize this when the type has sign or zero bits in one
2270 // of the halves.
2271 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2272                                    SDValue Scalar, SDValue VL,
2273                                    SelectionDAG &DAG) {
2274   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2275   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2276                            DAG.getConstant(0, DL, MVT::i32));
2277   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2278                            DAG.getConstant(1, DL, MVT::i32));
2279   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2280 }
2281 
2282 // This function lowers a splat of a scalar operand Splat with the vector
2283 // length VL. It ensures the final sequence is type legal, which is useful when
2284 // lowering a splat after type legalization.
2285 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2286                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2287                                 const RISCVSubtarget &Subtarget) {
2288   bool HasPassthru = Passthru && !Passthru.isUndef();
2289   if (!HasPassthru && !Passthru)
2290     Passthru = DAG.getUNDEF(VT);
2291   if (VT.isFloatingPoint()) {
2292     // If VL is 1, we could use vfmv.s.f.
2293     if (isOneConstant(VL))
2294       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2295     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2296   }
2297 
2298   MVT XLenVT = Subtarget.getXLenVT();
2299 
2300   // Simplest case is that the operand needs to be promoted to XLenVT.
2301   if (Scalar.getValueType().bitsLE(XLenVT)) {
2302     // If the operand is a constant, sign extend to increase our chances
2303     // of being able to use a .vi instruction. ANY_EXTEND would become a
2304     // a zero extend and the simm5 check in isel would fail.
2305     // FIXME: Should we ignore the upper bits in isel instead?
2306     unsigned ExtOpc =
2307         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2308     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2309     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2310     // If VL is 1 and the scalar value won't benefit from immediate, we could
2311     // use vmv.s.x.
2312     if (isOneConstant(VL) &&
2313         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2314       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2315     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2316   }
2317 
2318   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2319          "Unexpected scalar for splat lowering!");
2320 
2321   if (isOneConstant(VL) && isNullConstant(Scalar))
2322     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2323                        DAG.getConstant(0, DL, XLenVT), VL);
2324 
2325   // Otherwise use the more complicated splatting algorithm.
2326   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2327 }
2328 
2329 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2330                                 const RISCVSubtarget &Subtarget) {
2331   // We need to be able to widen elements to the next larger integer type.
2332   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2333     return false;
2334 
2335   int Size = Mask.size();
2336   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2337 
2338   int Srcs[] = {-1, -1};
2339   for (int i = 0; i != Size; ++i) {
2340     // Ignore undef elements.
2341     if (Mask[i] < 0)
2342       continue;
2343 
2344     // Is this an even or odd element.
2345     int Pol = i % 2;
2346 
2347     // Ensure we consistently use the same source for this element polarity.
2348     int Src = Mask[i] / Size;
2349     if (Srcs[Pol] < 0)
2350       Srcs[Pol] = Src;
2351     if (Srcs[Pol] != Src)
2352       return false;
2353 
2354     // Make sure the element within the source is appropriate for this element
2355     // in the destination.
2356     int Elt = Mask[i] % Size;
2357     if (Elt != i / 2)
2358       return false;
2359   }
2360 
2361   // We need to find a source for each polarity and they can't be the same.
2362   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2363     return false;
2364 
2365   // Swap the sources if the second source was in the even polarity.
2366   SwapSources = Srcs[0] > Srcs[1];
2367 
2368   return true;
2369 }
2370 
2371 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2372 /// and then extract the original number of elements from the rotated result.
2373 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2374 /// returned rotation amount is for a rotate right, where elements move from
2375 /// higher elements to lower elements. \p LoSrc indicates the first source
2376 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2377 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2378 /// 0 or 1 if a rotation is found.
2379 ///
2380 /// NOTE: We talk about rotate to the right which matches how bit shift and
2381 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2382 /// and the table below write vectors with the lowest elements on the left.
2383 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2384   int Size = Mask.size();
2385 
2386   // We need to detect various ways of spelling a rotation:
2387   //   [11, 12, 13, 14, 15,  0,  1,  2]
2388   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2389   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2390   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2391   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2392   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2393   int Rotation = 0;
2394   LoSrc = -1;
2395   HiSrc = -1;
2396   for (int i = 0; i != Size; ++i) {
2397     int M = Mask[i];
2398     if (M < 0)
2399       continue;
2400 
2401     // Determine where a rotate vector would have started.
2402     int StartIdx = i - (M % Size);
2403     // The identity rotation isn't interesting, stop.
2404     if (StartIdx == 0)
2405       return -1;
2406 
2407     // If we found the tail of a vector the rotation must be the missing
2408     // front. If we found the head of a vector, it must be how much of the
2409     // head.
2410     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2411 
2412     if (Rotation == 0)
2413       Rotation = CandidateRotation;
2414     else if (Rotation != CandidateRotation)
2415       // The rotations don't match, so we can't match this mask.
2416       return -1;
2417 
2418     // Compute which value this mask is pointing at.
2419     int MaskSrc = M < Size ? 0 : 1;
2420 
2421     // Compute which of the two target values this index should be assigned to.
2422     // This reflects whether the high elements are remaining or the low elemnts
2423     // are remaining.
2424     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2425 
2426     // Either set up this value if we've not encountered it before, or check
2427     // that it remains consistent.
2428     if (TargetSrc < 0)
2429       TargetSrc = MaskSrc;
2430     else if (TargetSrc != MaskSrc)
2431       // This may be a rotation, but it pulls from the inputs in some
2432       // unsupported interleaving.
2433       return -1;
2434   }
2435 
2436   // Check that we successfully analyzed the mask, and normalize the results.
2437   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2438   assert((LoSrc >= 0 || HiSrc >= 0) &&
2439          "Failed to find a rotated input vector!");
2440 
2441   return Rotation;
2442 }
2443 
2444 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2445                                    const RISCVSubtarget &Subtarget) {
2446   SDValue V1 = Op.getOperand(0);
2447   SDValue V2 = Op.getOperand(1);
2448   SDLoc DL(Op);
2449   MVT XLenVT = Subtarget.getXLenVT();
2450   MVT VT = Op.getSimpleValueType();
2451   unsigned NumElts = VT.getVectorNumElements();
2452   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2453 
2454   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2455 
2456   SDValue TrueMask, VL;
2457   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2458 
2459   if (SVN->isSplat()) {
2460     const int Lane = SVN->getSplatIndex();
2461     if (Lane >= 0) {
2462       MVT SVT = VT.getVectorElementType();
2463 
2464       // Turn splatted vector load into a strided load with an X0 stride.
2465       SDValue V = V1;
2466       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2467       // with undef.
2468       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2469       int Offset = Lane;
2470       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2471         int OpElements =
2472             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2473         V = V.getOperand(Offset / OpElements);
2474         Offset %= OpElements;
2475       }
2476 
2477       // We need to ensure the load isn't atomic or volatile.
2478       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2479         auto *Ld = cast<LoadSDNode>(V);
2480         Offset *= SVT.getStoreSize();
2481         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2482                                                    TypeSize::Fixed(Offset), DL);
2483 
2484         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2485         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2486           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2487           SDValue IntID =
2488               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2489           SDValue Ops[] = {Ld->getChain(),
2490                            IntID,
2491                            DAG.getUNDEF(ContainerVT),
2492                            NewAddr,
2493                            DAG.getRegister(RISCV::X0, XLenVT),
2494                            VL};
2495           SDValue NewLoad = DAG.getMemIntrinsicNode(
2496               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2497               DAG.getMachineFunction().getMachineMemOperand(
2498                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2499           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2500           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2501         }
2502 
2503         // Otherwise use a scalar load and splat. This will give the best
2504         // opportunity to fold a splat into the operation. ISel can turn it into
2505         // the x0 strided load if we aren't able to fold away the select.
2506         if (SVT.isFloatingPoint())
2507           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2508                           Ld->getPointerInfo().getWithOffset(Offset),
2509                           Ld->getOriginalAlign(),
2510                           Ld->getMemOperand()->getFlags());
2511         else
2512           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2513                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2514                              Ld->getOriginalAlign(),
2515                              Ld->getMemOperand()->getFlags());
2516         DAG.makeEquivalentMemoryOrdering(Ld, V);
2517 
2518         unsigned Opc =
2519             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2520         SDValue Splat =
2521             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2522         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2523       }
2524 
2525       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2526       assert(Lane < (int)NumElts && "Unexpected lane!");
2527       SDValue Gather =
2528           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2529                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2530       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2531     }
2532   }
2533 
2534   ArrayRef<int> Mask = SVN->getMask();
2535 
2536   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2537   // be undef which can be handled with a single SLIDEDOWN/UP.
2538   int LoSrc, HiSrc;
2539   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2540   if (Rotation > 0) {
2541     SDValue LoV, HiV;
2542     if (LoSrc >= 0) {
2543       LoV = LoSrc == 0 ? V1 : V2;
2544       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2545     }
2546     if (HiSrc >= 0) {
2547       HiV = HiSrc == 0 ? V1 : V2;
2548       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2549     }
2550 
2551     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2552     // to slide LoV up by (NumElts - Rotation).
2553     unsigned InvRotate = NumElts - Rotation;
2554 
2555     SDValue Res = DAG.getUNDEF(ContainerVT);
2556     if (HiV) {
2557       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2558       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2559       // causes multiple vsetvlis in some test cases such as lowering
2560       // reduce.mul
2561       SDValue DownVL = VL;
2562       if (LoV)
2563         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2564       Res =
2565           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2566                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2567     }
2568     if (LoV)
2569       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2570                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2571 
2572     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2573   }
2574 
2575   // Detect an interleave shuffle and lower to
2576   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2577   bool SwapSources;
2578   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2579     // Swap sources if needed.
2580     if (SwapSources)
2581       std::swap(V1, V2);
2582 
2583     // Extract the lower half of the vectors.
2584     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2585     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2586                      DAG.getConstant(0, DL, XLenVT));
2587     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2588                      DAG.getConstant(0, DL, XLenVT));
2589 
2590     // Double the element width and halve the number of elements in an int type.
2591     unsigned EltBits = VT.getScalarSizeInBits();
2592     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2593     MVT WideIntVT =
2594         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2595     // Convert this to a scalable vector. We need to base this on the
2596     // destination size to ensure there's always a type with a smaller LMUL.
2597     MVT WideIntContainerVT =
2598         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2599 
2600     // Convert sources to scalable vectors with the same element count as the
2601     // larger type.
2602     MVT HalfContainerVT = MVT::getVectorVT(
2603         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2604     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2605     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2606 
2607     // Cast sources to integer.
2608     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2609     MVT IntHalfVT =
2610         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2611     V1 = DAG.getBitcast(IntHalfVT, V1);
2612     V2 = DAG.getBitcast(IntHalfVT, V2);
2613 
2614     // Freeze V2 since we use it twice and we need to be sure that the add and
2615     // multiply see the same value.
2616     V2 = DAG.getFreeze(V2);
2617 
2618     // Recreate TrueMask using the widened type's element count.
2619     TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
2620 
2621     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2622     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2623                               V2, TrueMask, VL);
2624     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2625     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2626                                      DAG.getUNDEF(IntHalfVT),
2627                                      DAG.getAllOnesConstant(DL, XLenVT));
2628     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2629                                    V2, Multiplier, TrueMask, VL);
2630     // Add the new copies to our previous addition giving us 2^eltbits copies of
2631     // V2. This is equivalent to shifting V2 left by eltbits. This should
2632     // combine with the vwmulu.vv above to form vwmaccu.vv.
2633     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2634                       TrueMask, VL);
2635     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2636     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2637     // vector VT.
2638     ContainerVT =
2639         MVT::getVectorVT(VT.getVectorElementType(),
2640                          WideIntContainerVT.getVectorElementCount() * 2);
2641     Add = DAG.getBitcast(ContainerVT, Add);
2642     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2643   }
2644 
2645   // Detect shuffles which can be re-expressed as vector selects; these are
2646   // shuffles in which each element in the destination is taken from an element
2647   // at the corresponding index in either source vectors.
2648   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2649     int MaskIndex = MaskIdx.value();
2650     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2651   });
2652 
2653   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2654 
2655   SmallVector<SDValue> MaskVals;
2656   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2657   // merged with a second vrgather.
2658   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2659 
2660   // By default we preserve the original operand order, and use a mask to
2661   // select LHS as true and RHS as false. However, since RVV vector selects may
2662   // feature splats but only on the LHS, we may choose to invert our mask and
2663   // instead select between RHS and LHS.
2664   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2665   bool InvertMask = IsSelect == SwapOps;
2666 
2667   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2668   // half.
2669   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2670 
2671   // Now construct the mask that will be used by the vselect or blended
2672   // vrgather operation. For vrgathers, construct the appropriate indices into
2673   // each vector.
2674   for (int MaskIndex : Mask) {
2675     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2676     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2677     if (!IsSelect) {
2678       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2679       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2680                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2681                                      : DAG.getUNDEF(XLenVT));
2682       GatherIndicesRHS.push_back(
2683           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2684                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2685       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2686         ++LHSIndexCounts[MaskIndex];
2687       if (!IsLHSOrUndefIndex)
2688         ++RHSIndexCounts[MaskIndex - NumElts];
2689     }
2690   }
2691 
2692   if (SwapOps) {
2693     std::swap(V1, V2);
2694     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2695   }
2696 
2697   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2698   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2699   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2700 
2701   if (IsSelect)
2702     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2703 
2704   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2705     // On such a large vector we're unable to use i8 as the index type.
2706     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2707     // may involve vector splitting if we're already at LMUL=8, or our
2708     // user-supplied maximum fixed-length LMUL.
2709     return SDValue();
2710   }
2711 
2712   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2713   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2714   MVT IndexVT = VT.changeTypeToInteger();
2715   // Since we can't introduce illegal index types at this stage, use i16 and
2716   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2717   // than XLenVT.
2718   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2719     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2720     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2721   }
2722 
2723   MVT IndexContainerVT =
2724       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2725 
2726   SDValue Gather;
2727   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2728   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2729   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2730     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2731                               Subtarget);
2732   } else {
2733     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2734     // If only one index is used, we can use a "splat" vrgather.
2735     // TODO: We can splat the most-common index and fix-up any stragglers, if
2736     // that's beneficial.
2737     if (LHSIndexCounts.size() == 1) {
2738       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2739       Gather =
2740           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2741                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2742     } else {
2743       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2744       LHSIndices =
2745           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2746 
2747       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2748                            TrueMask, VL);
2749     }
2750   }
2751 
2752   // If a second vector operand is used by this shuffle, blend it in with an
2753   // additional vrgather.
2754   if (!V2.isUndef()) {
2755     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2756     // If only one index is used, we can use a "splat" vrgather.
2757     // TODO: We can splat the most-common index and fix-up any stragglers, if
2758     // that's beneficial.
2759     if (RHSIndexCounts.size() == 1) {
2760       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2761       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2762                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2763     } else {
2764       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2765       RHSIndices =
2766           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2767       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2768                        VL);
2769     }
2770 
2771     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2772     SelectMask =
2773         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2774 
2775     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2776                          Gather, VL);
2777   }
2778 
2779   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2780 }
2781 
2782 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2783   // Support splats for any type. These should type legalize well.
2784   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2785     return true;
2786 
2787   // Only support legal VTs for other shuffles for now.
2788   if (!isTypeLegal(VT))
2789     return false;
2790 
2791   MVT SVT = VT.getSimpleVT();
2792 
2793   bool SwapSources;
2794   int LoSrc, HiSrc;
2795   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2796          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2797 }
2798 
2799 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2800 // the exponent.
2801 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2802   MVT VT = Op.getSimpleValueType();
2803   unsigned EltSize = VT.getScalarSizeInBits();
2804   SDValue Src = Op.getOperand(0);
2805   SDLoc DL(Op);
2806 
2807   // We need a FP type that can represent the value.
2808   // TODO: Use f16 for i8 when possible?
2809   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2810   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2811 
2812   // Legal types should have been checked in the RISCVTargetLowering
2813   // constructor.
2814   // TODO: Splitting may make sense in some cases.
2815   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2816          "Expected legal float type!");
2817 
2818   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2819   // The trailing zero count is equal to log2 of this single bit value.
2820   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2821     SDValue Neg =
2822         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2823     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2824   }
2825 
2826   // We have a legal FP type, convert to it.
2827   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2828   // Bitcast to integer and shift the exponent to the LSB.
2829   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2830   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2831   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2832   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2833                               DAG.getConstant(ShiftAmt, DL, IntVT));
2834   // Truncate back to original type to allow vnsrl.
2835   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2836   // The exponent contains log2 of the value in biased form.
2837   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2838 
2839   // For trailing zeros, we just need to subtract the bias.
2840   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2841     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2842                        DAG.getConstant(ExponentBias, DL, VT));
2843 
2844   // For leading zeros, we need to remove the bias and convert from log2 to
2845   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2846   unsigned Adjust = ExponentBias + (EltSize - 1);
2847   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2848 }
2849 
2850 // While RVV has alignment restrictions, we should always be able to load as a
2851 // legal equivalently-sized byte-typed vector instead. This method is
2852 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2853 // the load is already correctly-aligned, it returns SDValue().
2854 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2855                                                     SelectionDAG &DAG) const {
2856   auto *Load = cast<LoadSDNode>(Op);
2857   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2858 
2859   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2860                                      Load->getMemoryVT(),
2861                                      *Load->getMemOperand()))
2862     return SDValue();
2863 
2864   SDLoc DL(Op);
2865   MVT VT = Op.getSimpleValueType();
2866   unsigned EltSizeBits = VT.getScalarSizeInBits();
2867   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2868          "Unexpected unaligned RVV load type");
2869   MVT NewVT =
2870       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2871   assert(NewVT.isValid() &&
2872          "Expecting equally-sized RVV vector types to be legal");
2873   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2874                           Load->getPointerInfo(), Load->getOriginalAlign(),
2875                           Load->getMemOperand()->getFlags());
2876   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2877 }
2878 
2879 // While RVV has alignment restrictions, we should always be able to store as a
2880 // legal equivalently-sized byte-typed vector instead. This method is
2881 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2882 // returns SDValue() if the store is already correctly aligned.
2883 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2884                                                      SelectionDAG &DAG) const {
2885   auto *Store = cast<StoreSDNode>(Op);
2886   assert(Store && Store->getValue().getValueType().isVector() &&
2887          "Expected vector store");
2888 
2889   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2890                                      Store->getMemoryVT(),
2891                                      *Store->getMemOperand()))
2892     return SDValue();
2893 
2894   SDLoc DL(Op);
2895   SDValue StoredVal = Store->getValue();
2896   MVT VT = StoredVal.getSimpleValueType();
2897   unsigned EltSizeBits = VT.getScalarSizeInBits();
2898   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2899          "Unexpected unaligned RVV store type");
2900   MVT NewVT =
2901       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2902   assert(NewVT.isValid() &&
2903          "Expecting equally-sized RVV vector types to be legal");
2904   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2905   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2906                       Store->getPointerInfo(), Store->getOriginalAlign(),
2907                       Store->getMemOperand()->getFlags());
2908 }
2909 
2910 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2911                                             SelectionDAG &DAG) const {
2912   switch (Op.getOpcode()) {
2913   default:
2914     report_fatal_error("unimplemented operand");
2915   case ISD::GlobalAddress:
2916     return lowerGlobalAddress(Op, DAG);
2917   case ISD::BlockAddress:
2918     return lowerBlockAddress(Op, DAG);
2919   case ISD::ConstantPool:
2920     return lowerConstantPool(Op, DAG);
2921   case ISD::JumpTable:
2922     return lowerJumpTable(Op, DAG);
2923   case ISD::GlobalTLSAddress:
2924     return lowerGlobalTLSAddress(Op, DAG);
2925   case ISD::SELECT:
2926     return lowerSELECT(Op, DAG);
2927   case ISD::BRCOND:
2928     return lowerBRCOND(Op, DAG);
2929   case ISD::VASTART:
2930     return lowerVASTART(Op, DAG);
2931   case ISD::FRAMEADDR:
2932     return lowerFRAMEADDR(Op, DAG);
2933   case ISD::RETURNADDR:
2934     return lowerRETURNADDR(Op, DAG);
2935   case ISD::SHL_PARTS:
2936     return lowerShiftLeftParts(Op, DAG);
2937   case ISD::SRA_PARTS:
2938     return lowerShiftRightParts(Op, DAG, true);
2939   case ISD::SRL_PARTS:
2940     return lowerShiftRightParts(Op, DAG, false);
2941   case ISD::BITCAST: {
2942     SDLoc DL(Op);
2943     EVT VT = Op.getValueType();
2944     SDValue Op0 = Op.getOperand(0);
2945     EVT Op0VT = Op0.getValueType();
2946     MVT XLenVT = Subtarget.getXLenVT();
2947     if (VT.isFixedLengthVector()) {
2948       // We can handle fixed length vector bitcasts with a simple replacement
2949       // in isel.
2950       if (Op0VT.isFixedLengthVector())
2951         return Op;
2952       // When bitcasting from scalar to fixed-length vector, insert the scalar
2953       // into a one-element vector of the result type, and perform a vector
2954       // bitcast.
2955       if (!Op0VT.isVector()) {
2956         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2957         if (!isTypeLegal(BVT))
2958           return SDValue();
2959         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2960                                               DAG.getUNDEF(BVT), Op0,
2961                                               DAG.getConstant(0, DL, XLenVT)));
2962       }
2963       return SDValue();
2964     }
2965     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2966     // thus: bitcast the vector to a one-element vector type whose element type
2967     // is the same as the result type, and extract the first element.
2968     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2969       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2970       if (!isTypeLegal(BVT))
2971         return SDValue();
2972       SDValue BVec = DAG.getBitcast(BVT, Op0);
2973       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2974                          DAG.getConstant(0, DL, XLenVT));
2975     }
2976     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2977       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2978       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2979       return FPConv;
2980     }
2981     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2982         Subtarget.hasStdExtF()) {
2983       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2984       SDValue FPConv =
2985           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2986       return FPConv;
2987     }
2988     return SDValue();
2989   }
2990   case ISD::INTRINSIC_WO_CHAIN:
2991     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2992   case ISD::INTRINSIC_W_CHAIN:
2993     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2994   case ISD::INTRINSIC_VOID:
2995     return LowerINTRINSIC_VOID(Op, DAG);
2996   case ISD::BSWAP:
2997   case ISD::BITREVERSE: {
2998     MVT VT = Op.getSimpleValueType();
2999     SDLoc DL(Op);
3000     if (Subtarget.hasStdExtZbp()) {
3001       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3002       // Start with the maximum immediate value which is the bitwidth - 1.
3003       unsigned Imm = VT.getSizeInBits() - 1;
3004       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3005       if (Op.getOpcode() == ISD::BSWAP)
3006         Imm &= ~0x7U;
3007       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3008                          DAG.getConstant(Imm, DL, VT));
3009     }
3010     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3011     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3012     // Expand bitreverse to a bswap(rev8) followed by brev8.
3013     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3014     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3015     // as brev8 by an isel pattern.
3016     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3017                        DAG.getConstant(7, DL, VT));
3018   }
3019   case ISD::FSHL:
3020   case ISD::FSHR: {
3021     MVT VT = Op.getSimpleValueType();
3022     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3023     SDLoc DL(Op);
3024     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3025     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3026     // accidentally setting the extra bit.
3027     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3028     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3029                                 DAG.getConstant(ShAmtWidth, DL, VT));
3030     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3031     // instruction use different orders. fshl will return its first operand for
3032     // shift of zero, fshr will return its second operand. fsl and fsr both
3033     // return rs1 so the ISD nodes need to have different operand orders.
3034     // Shift amount is in rs2.
3035     SDValue Op0 = Op.getOperand(0);
3036     SDValue Op1 = Op.getOperand(1);
3037     unsigned Opc = RISCVISD::FSL;
3038     if (Op.getOpcode() == ISD::FSHR) {
3039       std::swap(Op0, Op1);
3040       Opc = RISCVISD::FSR;
3041     }
3042     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3043   }
3044   case ISD::TRUNCATE:
3045     // Only custom-lower vector truncates
3046     if (!Op.getSimpleValueType().isVector())
3047       return Op;
3048     return lowerVectorTruncLike(Op, DAG);
3049   case ISD::ANY_EXTEND:
3050   case ISD::ZERO_EXTEND:
3051     if (Op.getOperand(0).getValueType().isVector() &&
3052         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3053       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3054     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3055   case ISD::SIGN_EXTEND:
3056     if (Op.getOperand(0).getValueType().isVector() &&
3057         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3058       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3059     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3060   case ISD::SPLAT_VECTOR_PARTS:
3061     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3062   case ISD::INSERT_VECTOR_ELT:
3063     return lowerINSERT_VECTOR_ELT(Op, DAG);
3064   case ISD::EXTRACT_VECTOR_ELT:
3065     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3066   case ISD::VSCALE: {
3067     MVT VT = Op.getSimpleValueType();
3068     SDLoc DL(Op);
3069     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3070     // We define our scalable vector types for lmul=1 to use a 64 bit known
3071     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3072     // vscale as VLENB / 8.
3073     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3074     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3075       report_fatal_error("Support for VLEN==32 is incomplete.");
3076     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3077       // We assume VLENB is a multiple of 8. We manually choose the best shift
3078       // here because SimplifyDemandedBits isn't always able to simplify it.
3079       uint64_t Val = Op.getConstantOperandVal(0);
3080       if (isPowerOf2_64(Val)) {
3081         uint64_t Log2 = Log2_64(Val);
3082         if (Log2 < 3)
3083           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3084                              DAG.getConstant(3 - Log2, DL, VT));
3085         if (Log2 > 3)
3086           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3087                              DAG.getConstant(Log2 - 3, DL, VT));
3088         return VLENB;
3089       }
3090       // If the multiplier is a multiple of 8, scale it down to avoid needing
3091       // to shift the VLENB value.
3092       if ((Val % 8) == 0)
3093         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3094                            DAG.getConstant(Val / 8, DL, VT));
3095     }
3096 
3097     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3098                                  DAG.getConstant(3, DL, VT));
3099     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3100   }
3101   case ISD::FPOWI: {
3102     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3103     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3104     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3105         Op.getOperand(1).getValueType() == MVT::i32) {
3106       SDLoc DL(Op);
3107       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3108       SDValue Powi =
3109           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3110       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3111                          DAG.getIntPtrConstant(0, DL));
3112     }
3113     return SDValue();
3114   }
3115   case ISD::FP_EXTEND:
3116   case ISD::FP_ROUND:
3117     if (!Op.getValueType().isVector())
3118       return Op;
3119     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3120   case ISD::FP_TO_SINT:
3121   case ISD::FP_TO_UINT:
3122   case ISD::SINT_TO_FP:
3123   case ISD::UINT_TO_FP: {
3124     // RVV can only do fp<->int conversions to types half/double the size as
3125     // the source. We custom-lower any conversions that do two hops into
3126     // sequences.
3127     MVT VT = Op.getSimpleValueType();
3128     if (!VT.isVector())
3129       return Op;
3130     SDLoc DL(Op);
3131     SDValue Src = Op.getOperand(0);
3132     MVT EltVT = VT.getVectorElementType();
3133     MVT SrcVT = Src.getSimpleValueType();
3134     MVT SrcEltVT = SrcVT.getVectorElementType();
3135     unsigned EltSize = EltVT.getSizeInBits();
3136     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3137     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3138            "Unexpected vector element types");
3139 
3140     bool IsInt2FP = SrcEltVT.isInteger();
3141     // Widening conversions
3142     if (EltSize > (2 * SrcEltSize)) {
3143       if (IsInt2FP) {
3144         // Do a regular integer sign/zero extension then convert to float.
3145         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3146                                       VT.getVectorElementCount());
3147         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3148                                  ? ISD::ZERO_EXTEND
3149                                  : ISD::SIGN_EXTEND;
3150         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3151         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3152       }
3153       // FP2Int
3154       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3155       // Do one doubling fp_extend then complete the operation by converting
3156       // to int.
3157       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3158       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3159       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3160     }
3161 
3162     // Narrowing conversions
3163     if (SrcEltSize > (2 * EltSize)) {
3164       if (IsInt2FP) {
3165         // One narrowing int_to_fp, then an fp_round.
3166         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3167         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3168         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3169         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3170       }
3171       // FP2Int
3172       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3173       // representable by the integer, the result is poison.
3174       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3175                                     VT.getVectorElementCount());
3176       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3177       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3178     }
3179 
3180     // Scalable vectors can exit here. Patterns will handle equally-sized
3181     // conversions halving/doubling ones.
3182     if (!VT.isFixedLengthVector())
3183       return Op;
3184 
3185     // For fixed-length vectors we lower to a custom "VL" node.
3186     unsigned RVVOpc = 0;
3187     switch (Op.getOpcode()) {
3188     default:
3189       llvm_unreachable("Impossible opcode");
3190     case ISD::FP_TO_SINT:
3191       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3192       break;
3193     case ISD::FP_TO_UINT:
3194       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3195       break;
3196     case ISD::SINT_TO_FP:
3197       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3198       break;
3199     case ISD::UINT_TO_FP:
3200       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3201       break;
3202     }
3203 
3204     MVT ContainerVT, SrcContainerVT;
3205     // Derive the reference container type from the larger vector type.
3206     if (SrcEltSize > EltSize) {
3207       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3208       ContainerVT =
3209           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3210     } else {
3211       ContainerVT = getContainerForFixedLengthVector(VT);
3212       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3213     }
3214 
3215     SDValue Mask, VL;
3216     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3217 
3218     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3219     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3220     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3221   }
3222   case ISD::FP_TO_SINT_SAT:
3223   case ISD::FP_TO_UINT_SAT:
3224     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3225   case ISD::FTRUNC:
3226   case ISD::FCEIL:
3227   case ISD::FFLOOR:
3228     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3229   case ISD::FROUND:
3230     return lowerFROUND(Op, DAG);
3231   case ISD::VECREDUCE_ADD:
3232   case ISD::VECREDUCE_UMAX:
3233   case ISD::VECREDUCE_SMAX:
3234   case ISD::VECREDUCE_UMIN:
3235   case ISD::VECREDUCE_SMIN:
3236     return lowerVECREDUCE(Op, DAG);
3237   case ISD::VECREDUCE_AND:
3238   case ISD::VECREDUCE_OR:
3239   case ISD::VECREDUCE_XOR:
3240     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3241       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3242     return lowerVECREDUCE(Op, DAG);
3243   case ISD::VECREDUCE_FADD:
3244   case ISD::VECREDUCE_SEQ_FADD:
3245   case ISD::VECREDUCE_FMIN:
3246   case ISD::VECREDUCE_FMAX:
3247     return lowerFPVECREDUCE(Op, DAG);
3248   case ISD::VP_REDUCE_ADD:
3249   case ISD::VP_REDUCE_UMAX:
3250   case ISD::VP_REDUCE_SMAX:
3251   case ISD::VP_REDUCE_UMIN:
3252   case ISD::VP_REDUCE_SMIN:
3253   case ISD::VP_REDUCE_FADD:
3254   case ISD::VP_REDUCE_SEQ_FADD:
3255   case ISD::VP_REDUCE_FMIN:
3256   case ISD::VP_REDUCE_FMAX:
3257     return lowerVPREDUCE(Op, DAG);
3258   case ISD::VP_REDUCE_AND:
3259   case ISD::VP_REDUCE_OR:
3260   case ISD::VP_REDUCE_XOR:
3261     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3262       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3263     return lowerVPREDUCE(Op, DAG);
3264   case ISD::INSERT_SUBVECTOR:
3265     return lowerINSERT_SUBVECTOR(Op, DAG);
3266   case ISD::EXTRACT_SUBVECTOR:
3267     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3268   case ISD::STEP_VECTOR:
3269     return lowerSTEP_VECTOR(Op, DAG);
3270   case ISD::VECTOR_REVERSE:
3271     return lowerVECTOR_REVERSE(Op, DAG);
3272   case ISD::VECTOR_SPLICE:
3273     return lowerVECTOR_SPLICE(Op, DAG);
3274   case ISD::BUILD_VECTOR:
3275     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3276   case ISD::SPLAT_VECTOR:
3277     if (Op.getValueType().getVectorElementType() == MVT::i1)
3278       return lowerVectorMaskSplat(Op, DAG);
3279     return SDValue();
3280   case ISD::VECTOR_SHUFFLE:
3281     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3282   case ISD::CONCAT_VECTORS: {
3283     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3284     // better than going through the stack, as the default expansion does.
3285     SDLoc DL(Op);
3286     MVT VT = Op.getSimpleValueType();
3287     unsigned NumOpElts =
3288         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3289     SDValue Vec = DAG.getUNDEF(VT);
3290     for (const auto &OpIdx : enumerate(Op->ops())) {
3291       SDValue SubVec = OpIdx.value();
3292       // Don't insert undef subvectors.
3293       if (SubVec.isUndef())
3294         continue;
3295       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3296                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3297     }
3298     return Vec;
3299   }
3300   case ISD::LOAD:
3301     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3302       return V;
3303     if (Op.getValueType().isFixedLengthVector())
3304       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3305     return Op;
3306   case ISD::STORE:
3307     if (auto V = expandUnalignedRVVStore(Op, DAG))
3308       return V;
3309     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3310       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3311     return Op;
3312   case ISD::MLOAD:
3313   case ISD::VP_LOAD:
3314     return lowerMaskedLoad(Op, DAG);
3315   case ISD::MSTORE:
3316   case ISD::VP_STORE:
3317     return lowerMaskedStore(Op, DAG);
3318   case ISD::SETCC:
3319     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3320   case ISD::ADD:
3321     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3322   case ISD::SUB:
3323     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3324   case ISD::MUL:
3325     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3326   case ISD::MULHS:
3327     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3328   case ISD::MULHU:
3329     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3330   case ISD::AND:
3331     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3332                                               RISCVISD::AND_VL);
3333   case ISD::OR:
3334     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3335                                               RISCVISD::OR_VL);
3336   case ISD::XOR:
3337     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3338                                               RISCVISD::XOR_VL);
3339   case ISD::SDIV:
3340     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3341   case ISD::SREM:
3342     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3343   case ISD::UDIV:
3344     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3345   case ISD::UREM:
3346     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3347   case ISD::SHL:
3348   case ISD::SRA:
3349   case ISD::SRL:
3350     if (Op.getSimpleValueType().isFixedLengthVector())
3351       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3352     // This can be called for an i32 shift amount that needs to be promoted.
3353     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3354            "Unexpected custom legalisation");
3355     return SDValue();
3356   case ISD::SADDSAT:
3357     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3358   case ISD::UADDSAT:
3359     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3360   case ISD::SSUBSAT:
3361     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3362   case ISD::USUBSAT:
3363     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3364   case ISD::FADD:
3365     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3366   case ISD::FSUB:
3367     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3368   case ISD::FMUL:
3369     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3370   case ISD::FDIV:
3371     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3372   case ISD::FNEG:
3373     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3374   case ISD::FABS:
3375     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3376   case ISD::FSQRT:
3377     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3378   case ISD::FMA:
3379     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3380   case ISD::SMIN:
3381     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3382   case ISD::SMAX:
3383     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3384   case ISD::UMIN:
3385     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3386   case ISD::UMAX:
3387     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3388   case ISD::FMINNUM:
3389     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3390   case ISD::FMAXNUM:
3391     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3392   case ISD::ABS:
3393     return lowerABS(Op, DAG);
3394   case ISD::CTLZ_ZERO_UNDEF:
3395   case ISD::CTTZ_ZERO_UNDEF:
3396     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3397   case ISD::VSELECT:
3398     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3399   case ISD::FCOPYSIGN:
3400     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3401   case ISD::MGATHER:
3402   case ISD::VP_GATHER:
3403     return lowerMaskedGather(Op, DAG);
3404   case ISD::MSCATTER:
3405   case ISD::VP_SCATTER:
3406     return lowerMaskedScatter(Op, DAG);
3407   case ISD::FLT_ROUNDS_:
3408     return lowerGET_ROUNDING(Op, DAG);
3409   case ISD::SET_ROUNDING:
3410     return lowerSET_ROUNDING(Op, DAG);
3411   case ISD::VP_SELECT:
3412     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3413   case ISD::VP_MERGE:
3414     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3415   case ISD::VP_ADD:
3416     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3417   case ISD::VP_SUB:
3418     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3419   case ISD::VP_MUL:
3420     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3421   case ISD::VP_SDIV:
3422     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3423   case ISD::VP_UDIV:
3424     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3425   case ISD::VP_SREM:
3426     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3427   case ISD::VP_UREM:
3428     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3429   case ISD::VP_AND:
3430     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3431   case ISD::VP_OR:
3432     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3433   case ISD::VP_XOR:
3434     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3435   case ISD::VP_ASHR:
3436     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3437   case ISD::VP_LSHR:
3438     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3439   case ISD::VP_SHL:
3440     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3441   case ISD::VP_FADD:
3442     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3443   case ISD::VP_FSUB:
3444     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3445   case ISD::VP_FMUL:
3446     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3447   case ISD::VP_FDIV:
3448     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3449   case ISD::VP_FNEG:
3450     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3451   case ISD::VP_FMA:
3452     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3453   case ISD::VP_SIGN_EXTEND:
3454   case ISD::VP_ZERO_EXTEND:
3455     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3456       return lowerVPExtMaskOp(Op, DAG);
3457     return lowerVPOp(Op, DAG,
3458                      Op.getOpcode() == ISD::VP_SIGN_EXTEND
3459                          ? RISCVISD::VSEXT_VL
3460                          : RISCVISD::VZEXT_VL);
3461   case ISD::VP_TRUNCATE:
3462     return lowerVectorTruncLike(Op, DAG);
3463   case ISD::VP_FP_EXTEND:
3464   case ISD::VP_FP_ROUND:
3465     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3466   case ISD::VP_FPTOSI:
3467     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3468   case ISD::VP_FPTOUI:
3469     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3470   case ISD::VP_SITOFP:
3471     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3472   case ISD::VP_UITOFP:
3473     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3474   case ISD::VP_SETCC:
3475     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3476       return lowerVPSetCCMaskOp(Op, DAG);
3477     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3478   }
3479 }
3480 
3481 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3482                              SelectionDAG &DAG, unsigned Flags) {
3483   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3484 }
3485 
3486 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3487                              SelectionDAG &DAG, unsigned Flags) {
3488   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3489                                    Flags);
3490 }
3491 
3492 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3493                              SelectionDAG &DAG, unsigned Flags) {
3494   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3495                                    N->getOffset(), Flags);
3496 }
3497 
3498 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3499                              SelectionDAG &DAG, unsigned Flags) {
3500   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3501 }
3502 
3503 template <class NodeTy>
3504 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3505                                      bool IsLocal) const {
3506   SDLoc DL(N);
3507   EVT Ty = getPointerTy(DAG.getDataLayout());
3508 
3509   if (isPositionIndependent()) {
3510     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3511     if (IsLocal)
3512       // Use PC-relative addressing to access the symbol. This generates the
3513       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3514       // %pcrel_lo(auipc)).
3515       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3516 
3517     // Use PC-relative addressing to access the GOT for this symbol, then load
3518     // the address from the GOT. This generates the pattern (PseudoLA sym),
3519     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3520     SDValue Load =
3521         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3522     MachineFunction &MF = DAG.getMachineFunction();
3523     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3524         MachinePointerInfo::getGOT(MF),
3525         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3526             MachineMemOperand::MOInvariant,
3527         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3528     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3529     return Load;
3530   }
3531 
3532   switch (getTargetMachine().getCodeModel()) {
3533   default:
3534     report_fatal_error("Unsupported code model for lowering");
3535   case CodeModel::Small: {
3536     // Generate a sequence for accessing addresses within the first 2 GiB of
3537     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3538     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3539     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3540     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3541     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3542   }
3543   case CodeModel::Medium: {
3544     // Generate a sequence for accessing addresses within any 2GiB range within
3545     // the address space. This generates the pattern (PseudoLLA sym), which
3546     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3547     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3548     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3549   }
3550   }
3551 }
3552 
3553 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>(
3554     GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3555 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>(
3556     BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3557 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>(
3558     ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3559 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>(
3560     JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3561 
3562 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3563                                                 SelectionDAG &DAG) const {
3564   SDLoc DL(Op);
3565   EVT Ty = Op.getValueType();
3566   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3567   int64_t Offset = N->getOffset();
3568   MVT XLenVT = Subtarget.getXLenVT();
3569 
3570   const GlobalValue *GV = N->getGlobal();
3571   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3572   SDValue Addr = getAddr(N, DAG, IsLocal);
3573 
3574   // In order to maximise the opportunity for common subexpression elimination,
3575   // emit a separate ADD node for the global address offset instead of folding
3576   // it in the global address node. Later peephole optimisations may choose to
3577   // fold it back in when profitable.
3578   if (Offset != 0)
3579     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3580                        DAG.getConstant(Offset, DL, XLenVT));
3581   return Addr;
3582 }
3583 
3584 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3585                                                SelectionDAG &DAG) const {
3586   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3587 
3588   return getAddr(N, DAG);
3589 }
3590 
3591 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3592                                                SelectionDAG &DAG) const {
3593   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3594 
3595   return getAddr(N, DAG);
3596 }
3597 
3598 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3599                                             SelectionDAG &DAG) const {
3600   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3601 
3602   return getAddr(N, DAG);
3603 }
3604 
3605 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3606                                               SelectionDAG &DAG,
3607                                               bool UseGOT) const {
3608   SDLoc DL(N);
3609   EVT Ty = getPointerTy(DAG.getDataLayout());
3610   const GlobalValue *GV = N->getGlobal();
3611   MVT XLenVT = Subtarget.getXLenVT();
3612 
3613   if (UseGOT) {
3614     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3615     // load the address from the GOT and add the thread pointer. This generates
3616     // the pattern (PseudoLA_TLS_IE sym), which expands to
3617     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3618     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3619     SDValue Load =
3620         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3621     MachineFunction &MF = DAG.getMachineFunction();
3622     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3623         MachinePointerInfo::getGOT(MF),
3624         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3625             MachineMemOperand::MOInvariant,
3626         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3627     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3628 
3629     // Add the thread pointer.
3630     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3631     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3632   }
3633 
3634   // Generate a sequence for accessing the address relative to the thread
3635   // pointer, with the appropriate adjustment for the thread pointer offset.
3636   // This generates the pattern
3637   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3638   SDValue AddrHi =
3639       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3640   SDValue AddrAdd =
3641       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3642   SDValue AddrLo =
3643       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3644 
3645   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3646   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3647   SDValue MNAdd = SDValue(
3648       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3649       0);
3650   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3651 }
3652 
3653 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3654                                                SelectionDAG &DAG) const {
3655   SDLoc DL(N);
3656   EVT Ty = getPointerTy(DAG.getDataLayout());
3657   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3658   const GlobalValue *GV = N->getGlobal();
3659 
3660   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3661   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3662   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3663   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3664   SDValue Load =
3665       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3666 
3667   // Prepare argument list to generate call.
3668   ArgListTy Args;
3669   ArgListEntry Entry;
3670   Entry.Node = Load;
3671   Entry.Ty = CallTy;
3672   Args.push_back(Entry);
3673 
3674   // Setup call to __tls_get_addr.
3675   TargetLowering::CallLoweringInfo CLI(DAG);
3676   CLI.setDebugLoc(DL)
3677       .setChain(DAG.getEntryNode())
3678       .setLibCallee(CallingConv::C, CallTy,
3679                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3680                     std::move(Args));
3681 
3682   return LowerCallTo(CLI).first;
3683 }
3684 
3685 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3686                                                    SelectionDAG &DAG) const {
3687   SDLoc DL(Op);
3688   EVT Ty = Op.getValueType();
3689   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3690   int64_t Offset = N->getOffset();
3691   MVT XLenVT = Subtarget.getXLenVT();
3692 
3693   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3694 
3695   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3696       CallingConv::GHC)
3697     report_fatal_error("In GHC calling convention TLS is not supported");
3698 
3699   SDValue Addr;
3700   switch (Model) {
3701   case TLSModel::LocalExec:
3702     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3703     break;
3704   case TLSModel::InitialExec:
3705     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3706     break;
3707   case TLSModel::LocalDynamic:
3708   case TLSModel::GeneralDynamic:
3709     Addr = getDynamicTLSAddr(N, DAG);
3710     break;
3711   }
3712 
3713   // In order to maximise the opportunity for common subexpression elimination,
3714   // emit a separate ADD node for the global address offset instead of folding
3715   // it in the global address node. Later peephole optimisations may choose to
3716   // fold it back in when profitable.
3717   if (Offset != 0)
3718     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3719                        DAG.getConstant(Offset, DL, XLenVT));
3720   return Addr;
3721 }
3722 
3723 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3724   SDValue CondV = Op.getOperand(0);
3725   SDValue TrueV = Op.getOperand(1);
3726   SDValue FalseV = Op.getOperand(2);
3727   SDLoc DL(Op);
3728   MVT VT = Op.getSimpleValueType();
3729   MVT XLenVT = Subtarget.getXLenVT();
3730 
3731   // Lower vector SELECTs to VSELECTs by splatting the condition.
3732   if (VT.isVector()) {
3733     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3734     SDValue CondSplat = VT.isScalableVector()
3735                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3736                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3737     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3738   }
3739 
3740   // If the result type is XLenVT and CondV is the output of a SETCC node
3741   // which also operated on XLenVT inputs, then merge the SETCC node into the
3742   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3743   // compare+branch instructions. i.e.:
3744   // (select (setcc lhs, rhs, cc), truev, falsev)
3745   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3746   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3747       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3748     SDValue LHS = CondV.getOperand(0);
3749     SDValue RHS = CondV.getOperand(1);
3750     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3751     ISD::CondCode CCVal = CC->get();
3752 
3753     // Special case for a select of 2 constants that have a diffence of 1.
3754     // Normally this is done by DAGCombine, but if the select is introduced by
3755     // type legalization or op legalization, we miss it. Restricting to SETLT
3756     // case for now because that is what signed saturating add/sub need.
3757     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3758     // but we would probably want to swap the true/false values if the condition
3759     // is SETGE/SETLE to avoid an XORI.
3760     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3761         CCVal == ISD::SETLT) {
3762       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3763       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3764       if (TrueVal - 1 == FalseVal)
3765         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3766       if (TrueVal + 1 == FalseVal)
3767         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3768     }
3769 
3770     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3771 
3772     SDValue TargetCC = DAG.getCondCode(CCVal);
3773     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3774     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3775   }
3776 
3777   // Otherwise:
3778   // (select condv, truev, falsev)
3779   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3780   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3781   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3782 
3783   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3784 
3785   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3786 }
3787 
3788 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3789   SDValue CondV = Op.getOperand(1);
3790   SDLoc DL(Op);
3791   MVT XLenVT = Subtarget.getXLenVT();
3792 
3793   if (CondV.getOpcode() == ISD::SETCC &&
3794       CondV.getOperand(0).getValueType() == XLenVT) {
3795     SDValue LHS = CondV.getOperand(0);
3796     SDValue RHS = CondV.getOperand(1);
3797     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3798 
3799     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3800 
3801     SDValue TargetCC = DAG.getCondCode(CCVal);
3802     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3803                        LHS, RHS, TargetCC, Op.getOperand(2));
3804   }
3805 
3806   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3807                      CondV, DAG.getConstant(0, DL, XLenVT),
3808                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3809 }
3810 
3811 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3812   MachineFunction &MF = DAG.getMachineFunction();
3813   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3814 
3815   SDLoc DL(Op);
3816   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3817                                  getPointerTy(MF.getDataLayout()));
3818 
3819   // vastart just stores the address of the VarArgsFrameIndex slot into the
3820   // memory location argument.
3821   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3822   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3823                       MachinePointerInfo(SV));
3824 }
3825 
3826 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3827                                             SelectionDAG &DAG) const {
3828   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3829   MachineFunction &MF = DAG.getMachineFunction();
3830   MachineFrameInfo &MFI = MF.getFrameInfo();
3831   MFI.setFrameAddressIsTaken(true);
3832   Register FrameReg = RI.getFrameRegister(MF);
3833   int XLenInBytes = Subtarget.getXLen() / 8;
3834 
3835   EVT VT = Op.getValueType();
3836   SDLoc DL(Op);
3837   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3838   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3839   while (Depth--) {
3840     int Offset = -(XLenInBytes * 2);
3841     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3842                               DAG.getIntPtrConstant(Offset, DL));
3843     FrameAddr =
3844         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3845   }
3846   return FrameAddr;
3847 }
3848 
3849 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3850                                              SelectionDAG &DAG) const {
3851   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3852   MachineFunction &MF = DAG.getMachineFunction();
3853   MachineFrameInfo &MFI = MF.getFrameInfo();
3854   MFI.setReturnAddressIsTaken(true);
3855   MVT XLenVT = Subtarget.getXLenVT();
3856   int XLenInBytes = Subtarget.getXLen() / 8;
3857 
3858   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3859     return SDValue();
3860 
3861   EVT VT = Op.getValueType();
3862   SDLoc DL(Op);
3863   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3864   if (Depth) {
3865     int Off = -XLenInBytes;
3866     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3867     SDValue Offset = DAG.getConstant(Off, DL, VT);
3868     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3869                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3870                        MachinePointerInfo());
3871   }
3872 
3873   // Return the value of the return address register, marking it an implicit
3874   // live-in.
3875   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3876   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3877 }
3878 
3879 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3880                                                  SelectionDAG &DAG) const {
3881   SDLoc DL(Op);
3882   SDValue Lo = Op.getOperand(0);
3883   SDValue Hi = Op.getOperand(1);
3884   SDValue Shamt = Op.getOperand(2);
3885   EVT VT = Lo.getValueType();
3886 
3887   // if Shamt-XLEN < 0: // Shamt < XLEN
3888   //   Lo = Lo << Shamt
3889   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
3890   // else:
3891   //   Lo = 0
3892   //   Hi = Lo << (Shamt-XLEN)
3893 
3894   SDValue Zero = DAG.getConstant(0, DL, VT);
3895   SDValue One = DAG.getConstant(1, DL, VT);
3896   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3897   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3898   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3899   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3900 
3901   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3902   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3903   SDValue ShiftRightLo =
3904       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3905   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3906   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3907   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3908 
3909   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3910 
3911   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3912   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3913 
3914   SDValue Parts[2] = {Lo, Hi};
3915   return DAG.getMergeValues(Parts, DL);
3916 }
3917 
3918 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3919                                                   bool IsSRA) const {
3920   SDLoc DL(Op);
3921   SDValue Lo = Op.getOperand(0);
3922   SDValue Hi = Op.getOperand(1);
3923   SDValue Shamt = Op.getOperand(2);
3924   EVT VT = Lo.getValueType();
3925 
3926   // SRA expansion:
3927   //   if Shamt-XLEN < 0: // Shamt < XLEN
3928   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3929   //     Hi = Hi >>s Shamt
3930   //   else:
3931   //     Lo = Hi >>s (Shamt-XLEN);
3932   //     Hi = Hi >>s (XLEN-1)
3933   //
3934   // SRL expansion:
3935   //   if Shamt-XLEN < 0: // Shamt < XLEN
3936   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3937   //     Hi = Hi >>u Shamt
3938   //   else:
3939   //     Lo = Hi >>u (Shamt-XLEN);
3940   //     Hi = 0;
3941 
3942   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3943 
3944   SDValue Zero = DAG.getConstant(0, DL, VT);
3945   SDValue One = DAG.getConstant(1, DL, VT);
3946   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3947   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3948   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3949   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3950 
3951   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3952   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3953   SDValue ShiftLeftHi =
3954       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3955   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3956   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3957   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3958   SDValue HiFalse =
3959       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3960 
3961   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3962 
3963   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3964   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3965 
3966   SDValue Parts[2] = {Lo, Hi};
3967   return DAG.getMergeValues(Parts, DL);
3968 }
3969 
3970 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3971 // legal equivalently-sized i8 type, so we can use that as a go-between.
3972 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3973                                                   SelectionDAG &DAG) const {
3974   SDLoc DL(Op);
3975   MVT VT = Op.getSimpleValueType();
3976   SDValue SplatVal = Op.getOperand(0);
3977   // All-zeros or all-ones splats are handled specially.
3978   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3979     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3980     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3981   }
3982   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3983     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3984     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3985   }
3986   MVT XLenVT = Subtarget.getXLenVT();
3987   assert(SplatVal.getValueType() == XLenVT &&
3988          "Unexpected type for i1 splat value");
3989   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3990   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3991                          DAG.getConstant(1, DL, XLenVT));
3992   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3993   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3994   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3995 }
3996 
3997 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3998 // illegal (currently only vXi64 RV32).
3999 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4000 // them to VMV_V_X_VL.
4001 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4002                                                      SelectionDAG &DAG) const {
4003   SDLoc DL(Op);
4004   MVT VecVT = Op.getSimpleValueType();
4005   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4006          "Unexpected SPLAT_VECTOR_PARTS lowering");
4007 
4008   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4009   SDValue Lo = Op.getOperand(0);
4010   SDValue Hi = Op.getOperand(1);
4011 
4012   if (VecVT.isFixedLengthVector()) {
4013     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4014     SDLoc DL(Op);
4015     SDValue Mask, VL;
4016     std::tie(Mask, VL) =
4017         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4018 
4019     SDValue Res =
4020         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4021     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4022   }
4023 
4024   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4025     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4026     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4027     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4028     // node in order to try and match RVV vector/scalar instructions.
4029     if ((LoC >> 31) == HiC)
4030       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4031                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4032   }
4033 
4034   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4035   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4036       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4037       Hi.getConstantOperandVal(1) == 31)
4038     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4039                        DAG.getRegister(RISCV::X0, MVT::i32));
4040 
4041   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4042   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4043                      DAG.getUNDEF(VecVT), Lo, Hi,
4044                      DAG.getRegister(RISCV::X0, MVT::i32));
4045 }
4046 
4047 // Custom-lower extensions from mask vectors by using a vselect either with 1
4048 // for zero/any-extension or -1 for sign-extension:
4049 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4050 // Note that any-extension is lowered identically to zero-extension.
4051 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4052                                                 int64_t ExtTrueVal) const {
4053   SDLoc DL(Op);
4054   MVT VecVT = Op.getSimpleValueType();
4055   SDValue Src = Op.getOperand(0);
4056   // Only custom-lower extensions from mask types
4057   assert(Src.getValueType().isVector() &&
4058          Src.getValueType().getVectorElementType() == MVT::i1);
4059 
4060   if (VecVT.isScalableVector()) {
4061     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4062     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4063     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4064   }
4065 
4066   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4067   MVT I1ContainerVT =
4068       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4069 
4070   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4071 
4072   SDValue Mask, VL;
4073   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4074 
4075   MVT XLenVT = Subtarget.getXLenVT();
4076   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4077   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4078 
4079   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4080                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4081   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4082                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4083   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4084                                SplatTrueVal, SplatZero, VL);
4085 
4086   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4087 }
4088 
4089 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4090     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4091   MVT ExtVT = Op.getSimpleValueType();
4092   // Only custom-lower extensions from fixed-length vector types.
4093   if (!ExtVT.isFixedLengthVector())
4094     return Op;
4095   MVT VT = Op.getOperand(0).getSimpleValueType();
4096   // Grab the canonical container type for the extended type. Infer the smaller
4097   // type from that to ensure the same number of vector elements, as we know
4098   // the LMUL will be sufficient to hold the smaller type.
4099   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4100   // Get the extended container type manually to ensure the same number of
4101   // vector elements between source and dest.
4102   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4103                                      ContainerExtVT.getVectorElementCount());
4104 
4105   SDValue Op1 =
4106       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4107 
4108   SDLoc DL(Op);
4109   SDValue Mask, VL;
4110   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4111 
4112   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4113 
4114   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4115 }
4116 
4117 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4118 // setcc operation:
4119 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4120 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
4121                                                       SelectionDAG &DAG) const {
4122   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4123   SDLoc DL(Op);
4124   EVT MaskVT = Op.getValueType();
4125   // Only expect to custom-lower truncations to mask types
4126   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4127          "Unexpected type for vector mask lowering");
4128   SDValue Src = Op.getOperand(0);
4129   MVT VecVT = Src.getSimpleValueType();
4130   SDValue Mask, VL;
4131   if (IsVPTrunc) {
4132     Mask = Op.getOperand(1);
4133     VL = Op.getOperand(2);
4134   }
4135   // If this is a fixed vector, we need to convert it to a scalable vector.
4136   MVT ContainerVT = VecVT;
4137 
4138   if (VecVT.isFixedLengthVector()) {
4139     ContainerVT = getContainerForFixedLengthVector(VecVT);
4140     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4141     if (IsVPTrunc) {
4142       MVT MaskContainerVT =
4143           getContainerForFixedLengthVector(Mask.getSimpleValueType());
4144       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
4145     }
4146   }
4147 
4148   if (!IsVPTrunc) {
4149     std::tie(Mask, VL) =
4150         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4151   }
4152 
4153   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4154   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4155 
4156   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4157                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
4158   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4159                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4160 
4161   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4162   SDValue Trunc =
4163       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4164   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4165                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4166   if (MaskVT.isFixedLengthVector())
4167     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4168   return Trunc;
4169 }
4170 
4171 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
4172                                                   SelectionDAG &DAG) const {
4173   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4174   SDLoc DL(Op);
4175 
4176   MVT VT = Op.getSimpleValueType();
4177   // Only custom-lower vector truncates
4178   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4179 
4180   // Truncates to mask types are handled differently
4181   if (VT.getVectorElementType() == MVT::i1)
4182     return lowerVectorMaskTruncLike(Op, DAG);
4183 
4184   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
4185   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
4186   // truncate by one power of two at a time.
4187   MVT DstEltVT = VT.getVectorElementType();
4188 
4189   SDValue Src = Op.getOperand(0);
4190   MVT SrcVT = Src.getSimpleValueType();
4191   MVT SrcEltVT = SrcVT.getVectorElementType();
4192 
4193   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
4194          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
4195          "Unexpected vector truncate lowering");
4196 
4197   MVT ContainerVT = SrcVT;
4198   SDValue Mask, VL;
4199   if (IsVPTrunc) {
4200     Mask = Op.getOperand(1);
4201     VL = Op.getOperand(2);
4202   }
4203   if (SrcVT.isFixedLengthVector()) {
4204     ContainerVT = getContainerForFixedLengthVector(SrcVT);
4205     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4206     if (IsVPTrunc) {
4207       MVT MaskVT = getMaskTypeFor(ContainerVT);
4208       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4209     }
4210   }
4211 
4212   SDValue Result = Src;
4213   if (!IsVPTrunc) {
4214     std::tie(Mask, VL) =
4215         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4216   }
4217 
4218   LLVMContext &Context = *DAG.getContext();
4219   const ElementCount Count = ContainerVT.getVectorElementCount();
4220   do {
4221     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
4222     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
4223     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
4224                          Mask, VL);
4225   } while (SrcEltVT != DstEltVT);
4226 
4227   if (SrcVT.isFixedLengthVector())
4228     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4229 
4230   return Result;
4231 }
4232 
4233 SDValue
4234 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
4235                                                     SelectionDAG &DAG) const {
4236   bool IsVP =
4237       Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
4238   bool IsExtend =
4239       Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
4240   // RVV can only do truncate fp to types half the size as the source. We
4241   // custom-lower f64->f16 rounds via RVV's round-to-odd float
4242   // conversion instruction.
4243   SDLoc DL(Op);
4244   MVT VT = Op.getSimpleValueType();
4245 
4246   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4247 
4248   SDValue Src = Op.getOperand(0);
4249   MVT SrcVT = Src.getSimpleValueType();
4250 
4251   bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
4252                                      SrcVT.getVectorElementType() != MVT::f16);
4253   bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
4254                                      SrcVT.getVectorElementType() != MVT::f64);
4255 
4256   bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
4257 
4258   // For FP_ROUND/FP_EXTEND of scalable vectors, leave it to the pattern.
4259   if (!VT.isFixedLengthVector() && !IsVP && IsDirectConv)
4260     return Op;
4261 
4262   // Prepare any fixed-length vector operands.
4263   MVT ContainerVT = VT;
4264   SDValue Mask, VL;
4265   if (IsVP) {
4266     Mask = Op.getOperand(1);
4267     VL = Op.getOperand(2);
4268   }
4269   if (VT.isFixedLengthVector()) {
4270     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
4271     ContainerVT =
4272         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
4273     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4274     if (IsVP) {
4275       MVT MaskVT = getMaskTypeFor(ContainerVT);
4276       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4277     }
4278   }
4279 
4280   if (!IsVP)
4281     std::tie(Mask, VL) =
4282         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4283 
4284   unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
4285 
4286   if (IsDirectConv) {
4287     Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
4288     if (VT.isFixedLengthVector())
4289       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
4290     return Src;
4291   }
4292 
4293   unsigned InterConvOpc =
4294       IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
4295 
4296   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
4297   SDValue IntermediateConv =
4298       DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
4299   SDValue Result =
4300       DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
4301   if (VT.isFixedLengthVector())
4302     return convertFromScalableVector(VT, Result, DAG, Subtarget);
4303   return Result;
4304 }
4305 
4306 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4307 // first position of a vector, and that vector is slid up to the insert index.
4308 // By limiting the active vector length to index+1 and merging with the
4309 // original vector (with an undisturbed tail policy for elements >= VL), we
4310 // achieve the desired result of leaving all elements untouched except the one
4311 // at VL-1, which is replaced with the desired value.
4312 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4313                                                     SelectionDAG &DAG) const {
4314   SDLoc DL(Op);
4315   MVT VecVT = Op.getSimpleValueType();
4316   SDValue Vec = Op.getOperand(0);
4317   SDValue Val = Op.getOperand(1);
4318   SDValue Idx = Op.getOperand(2);
4319 
4320   if (VecVT.getVectorElementType() == MVT::i1) {
4321     // FIXME: For now we just promote to an i8 vector and insert into that,
4322     // but this is probably not optimal.
4323     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4324     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4325     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4326     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4327   }
4328 
4329   MVT ContainerVT = VecVT;
4330   // If the operand is a fixed-length vector, convert to a scalable one.
4331   if (VecVT.isFixedLengthVector()) {
4332     ContainerVT = getContainerForFixedLengthVector(VecVT);
4333     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4334   }
4335 
4336   MVT XLenVT = Subtarget.getXLenVT();
4337 
4338   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4339   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4340   // Even i64-element vectors on RV32 can be lowered without scalar
4341   // legalization if the most-significant 32 bits of the value are not affected
4342   // by the sign-extension of the lower 32 bits.
4343   // TODO: We could also catch sign extensions of a 32-bit value.
4344   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4345     const auto *CVal = cast<ConstantSDNode>(Val);
4346     if (isInt<32>(CVal->getSExtValue())) {
4347       IsLegalInsert = true;
4348       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4349     }
4350   }
4351 
4352   SDValue Mask, VL;
4353   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4354 
4355   SDValue ValInVec;
4356 
4357   if (IsLegalInsert) {
4358     unsigned Opc =
4359         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4360     if (isNullConstant(Idx)) {
4361       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4362       if (!VecVT.isFixedLengthVector())
4363         return Vec;
4364       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4365     }
4366     ValInVec =
4367         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4368   } else {
4369     // On RV32, i64-element vectors must be specially handled to place the
4370     // value at element 0, by using two vslide1up instructions in sequence on
4371     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4372     // this.
4373     SDValue One = DAG.getConstant(1, DL, XLenVT);
4374     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4375     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4376     MVT I32ContainerVT =
4377         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4378     SDValue I32Mask =
4379         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4380     // Limit the active VL to two.
4381     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4382     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4383     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4384     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4385                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4386     // First slide in the hi value, then the lo in underneath it.
4387     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4388                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4389                            I32Mask, InsertI64VL);
4390     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4391                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4392                            I32Mask, InsertI64VL);
4393     // Bitcast back to the right container type.
4394     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4395   }
4396 
4397   // Now that the value is in a vector, slide it into position.
4398   SDValue InsertVL =
4399       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4400   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4401                                 ValInVec, Idx, Mask, InsertVL);
4402   if (!VecVT.isFixedLengthVector())
4403     return Slideup;
4404   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4405 }
4406 
4407 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4408 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4409 // types this is done using VMV_X_S to allow us to glean information about the
4410 // sign bits of the result.
4411 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4412                                                      SelectionDAG &DAG) const {
4413   SDLoc DL(Op);
4414   SDValue Idx = Op.getOperand(1);
4415   SDValue Vec = Op.getOperand(0);
4416   EVT EltVT = Op.getValueType();
4417   MVT VecVT = Vec.getSimpleValueType();
4418   MVT XLenVT = Subtarget.getXLenVT();
4419 
4420   if (VecVT.getVectorElementType() == MVT::i1) {
4421     if (VecVT.isFixedLengthVector()) {
4422       unsigned NumElts = VecVT.getVectorNumElements();
4423       if (NumElts >= 8) {
4424         MVT WideEltVT;
4425         unsigned WidenVecLen;
4426         SDValue ExtractElementIdx;
4427         SDValue ExtractBitIdx;
4428         unsigned MaxEEW = Subtarget.getELEN();
4429         MVT LargestEltVT = MVT::getIntegerVT(
4430             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4431         if (NumElts <= LargestEltVT.getSizeInBits()) {
4432           assert(isPowerOf2_32(NumElts) &&
4433                  "the number of elements should be power of 2");
4434           WideEltVT = MVT::getIntegerVT(NumElts);
4435           WidenVecLen = 1;
4436           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4437           ExtractBitIdx = Idx;
4438         } else {
4439           WideEltVT = LargestEltVT;
4440           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4441           // extract element index = index / element width
4442           ExtractElementIdx = DAG.getNode(
4443               ISD::SRL, DL, XLenVT, Idx,
4444               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4445           // mask bit index = index % element width
4446           ExtractBitIdx = DAG.getNode(
4447               ISD::AND, DL, XLenVT, Idx,
4448               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4449         }
4450         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4451         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4452         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4453                                          Vec, ExtractElementIdx);
4454         // Extract the bit from GPR.
4455         SDValue ShiftRight =
4456             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4457         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4458                            DAG.getConstant(1, DL, XLenVT));
4459       }
4460     }
4461     // Otherwise, promote to an i8 vector and extract from that.
4462     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4463     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4464     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4465   }
4466 
4467   // If this is a fixed vector, we need to convert it to a scalable vector.
4468   MVT ContainerVT = VecVT;
4469   if (VecVT.isFixedLengthVector()) {
4470     ContainerVT = getContainerForFixedLengthVector(VecVT);
4471     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4472   }
4473 
4474   // If the index is 0, the vector is already in the right position.
4475   if (!isNullConstant(Idx)) {
4476     // Use a VL of 1 to avoid processing more elements than we need.
4477     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4478     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
4479     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4480                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4481   }
4482 
4483   if (!EltVT.isInteger()) {
4484     // Floating-point extracts are handled in TableGen.
4485     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4486                        DAG.getConstant(0, DL, XLenVT));
4487   }
4488 
4489   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4490   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4491 }
4492 
4493 // Some RVV intrinsics may claim that they want an integer operand to be
4494 // promoted or expanded.
4495 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4496                                            const RISCVSubtarget &Subtarget) {
4497   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4498           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4499          "Unexpected opcode");
4500 
4501   if (!Subtarget.hasVInstructions())
4502     return SDValue();
4503 
4504   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4505   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4506   SDLoc DL(Op);
4507 
4508   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4509       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4510   if (!II || !II->hasScalarOperand())
4511     return SDValue();
4512 
4513   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4514   assert(SplatOp < Op.getNumOperands());
4515 
4516   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4517   SDValue &ScalarOp = Operands[SplatOp];
4518   MVT OpVT = ScalarOp.getSimpleValueType();
4519   MVT XLenVT = Subtarget.getXLenVT();
4520 
4521   // If this isn't a scalar, or its type is XLenVT we're done.
4522   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4523     return SDValue();
4524 
4525   // Simplest case is that the operand needs to be promoted to XLenVT.
4526   if (OpVT.bitsLT(XLenVT)) {
4527     // If the operand is a constant, sign extend to increase our chances
4528     // of being able to use a .vi instruction. ANY_EXTEND would become a
4529     // a zero extend and the simm5 check in isel would fail.
4530     // FIXME: Should we ignore the upper bits in isel instead?
4531     unsigned ExtOpc =
4532         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4533     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4534     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4535   }
4536 
4537   // Use the previous operand to get the vXi64 VT. The result might be a mask
4538   // VT for compares. Using the previous operand assumes that the previous
4539   // operand will never have a smaller element size than a scalar operand and
4540   // that a widening operation never uses SEW=64.
4541   // NOTE: If this fails the below assert, we can probably just find the
4542   // element count from any operand or result and use it to construct the VT.
4543   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4544   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4545 
4546   // The more complex case is when the scalar is larger than XLenVT.
4547   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4548          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4549 
4550   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4551   // instruction to sign-extend since SEW>XLEN.
4552   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4553     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4554     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4555   }
4556 
4557   switch (IntNo) {
4558   case Intrinsic::riscv_vslide1up:
4559   case Intrinsic::riscv_vslide1down:
4560   case Intrinsic::riscv_vslide1up_mask:
4561   case Intrinsic::riscv_vslide1down_mask: {
4562     // We need to special case these when the scalar is larger than XLen.
4563     unsigned NumOps = Op.getNumOperands();
4564     bool IsMasked = NumOps == 7;
4565 
4566     // Convert the vector source to the equivalent nxvXi32 vector.
4567     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4568     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4569 
4570     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4571                                    DAG.getConstant(0, DL, XLenVT));
4572     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4573                                    DAG.getConstant(1, DL, XLenVT));
4574 
4575     // Double the VL since we halved SEW.
4576     SDValue AVL = getVLOperand(Op);
4577     SDValue I32VL;
4578 
4579     // Optimize for constant AVL
4580     if (isa<ConstantSDNode>(AVL)) {
4581       unsigned EltSize = VT.getScalarSizeInBits();
4582       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4583 
4584       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4585       unsigned MaxVLMAX =
4586           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4587 
4588       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4589       unsigned MinVLMAX =
4590           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4591 
4592       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4593       if (AVLInt <= MinVLMAX) {
4594         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4595       } else if (AVLInt >= 2 * MaxVLMAX) {
4596         // Just set vl to VLMAX in this situation
4597         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4598         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4599         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4600         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4601         SDValue SETVLMAX = DAG.getTargetConstant(
4602             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4603         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4604                             LMUL);
4605       } else {
4606         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4607         // is related to the hardware implementation.
4608         // So let the following code handle
4609       }
4610     }
4611     if (!I32VL) {
4612       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4613       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4614       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4615       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4616       SDValue SETVL =
4617           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4618       // Using vsetvli instruction to get actually used length which related to
4619       // the hardware implementation
4620       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4621                                SEW, LMUL);
4622       I32VL =
4623           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4624     }
4625 
4626     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
4627 
4628     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4629     // instructions.
4630     SDValue Passthru;
4631     if (IsMasked)
4632       Passthru = DAG.getUNDEF(I32VT);
4633     else
4634       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4635 
4636     if (IntNo == Intrinsic::riscv_vslide1up ||
4637         IntNo == Intrinsic::riscv_vslide1up_mask) {
4638       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4639                         ScalarHi, I32Mask, I32VL);
4640       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4641                         ScalarLo, I32Mask, I32VL);
4642     } else {
4643       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4644                         ScalarLo, I32Mask, I32VL);
4645       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4646                         ScalarHi, I32Mask, I32VL);
4647     }
4648 
4649     // Convert back to nxvXi64.
4650     Vec = DAG.getBitcast(VT, Vec);
4651 
4652     if (!IsMasked)
4653       return Vec;
4654     // Apply mask after the operation.
4655     SDValue Mask = Operands[NumOps - 3];
4656     SDValue MaskedOff = Operands[1];
4657     // Assume Policy operand is the last operand.
4658     uint64_t Policy =
4659         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4660     // We don't need to select maskedoff if it's undef.
4661     if (MaskedOff.isUndef())
4662       return Vec;
4663     // TAMU
4664     if (Policy == RISCVII::TAIL_AGNOSTIC)
4665       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4666                          AVL);
4667     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4668     // It's fine because vmerge does not care mask policy.
4669     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4670                        AVL);
4671   }
4672   }
4673 
4674   // We need to convert the scalar to a splat vector.
4675   SDValue VL = getVLOperand(Op);
4676   assert(VL.getValueType() == XLenVT);
4677   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4678   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4679 }
4680 
4681 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4682                                                      SelectionDAG &DAG) const {
4683   unsigned IntNo = Op.getConstantOperandVal(0);
4684   SDLoc DL(Op);
4685   MVT XLenVT = Subtarget.getXLenVT();
4686 
4687   switch (IntNo) {
4688   default:
4689     break; // Don't custom lower most intrinsics.
4690   case Intrinsic::thread_pointer: {
4691     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4692     return DAG.getRegister(RISCV::X4, PtrVT);
4693   }
4694   case Intrinsic::riscv_orc_b:
4695   case Intrinsic::riscv_brev8: {
4696     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4697     unsigned Opc =
4698         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4699     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4700                        DAG.getConstant(7, DL, XLenVT));
4701   }
4702   case Intrinsic::riscv_grev:
4703   case Intrinsic::riscv_gorc: {
4704     unsigned Opc =
4705         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4706     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4707   }
4708   case Intrinsic::riscv_zip:
4709   case Intrinsic::riscv_unzip: {
4710     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4711     // For i32 the immediate is 15. For i64 the immediate is 31.
4712     unsigned Opc =
4713         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4714     unsigned BitWidth = Op.getValueSizeInBits();
4715     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4716     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4717                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4718   }
4719   case Intrinsic::riscv_shfl:
4720   case Intrinsic::riscv_unshfl: {
4721     unsigned Opc =
4722         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4723     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4724   }
4725   case Intrinsic::riscv_bcompress:
4726   case Intrinsic::riscv_bdecompress: {
4727     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4728                                                        : RISCVISD::BDECOMPRESS;
4729     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4730   }
4731   case Intrinsic::riscv_bfp:
4732     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4733                        Op.getOperand(2));
4734   case Intrinsic::riscv_fsl:
4735     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4736                        Op.getOperand(2), Op.getOperand(3));
4737   case Intrinsic::riscv_fsr:
4738     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4739                        Op.getOperand(2), Op.getOperand(3));
4740   case Intrinsic::riscv_vmv_x_s:
4741     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4742     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4743                        Op.getOperand(1));
4744   case Intrinsic::riscv_vmv_v_x:
4745     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4746                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4747                             Subtarget);
4748   case Intrinsic::riscv_vfmv_v_f:
4749     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4750                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4751   case Intrinsic::riscv_vmv_s_x: {
4752     SDValue Scalar = Op.getOperand(2);
4753 
4754     if (Scalar.getValueType().bitsLE(XLenVT)) {
4755       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4756       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4757                          Op.getOperand(1), Scalar, Op.getOperand(3));
4758     }
4759 
4760     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4761 
4762     // This is an i64 value that lives in two scalar registers. We have to
4763     // insert this in a convoluted way. First we build vXi64 splat containing
4764     // the two values that we assemble using some bit math. Next we'll use
4765     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4766     // to merge element 0 from our splat into the source vector.
4767     // FIXME: This is probably not the best way to do this, but it is
4768     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4769     // point.
4770     //   sw lo, (a0)
4771     //   sw hi, 4(a0)
4772     //   vlse vX, (a0)
4773     //
4774     //   vid.v      vVid
4775     //   vmseq.vx   mMask, vVid, 0
4776     //   vmerge.vvm vDest, vSrc, vVal, mMask
4777     MVT VT = Op.getSimpleValueType();
4778     SDValue Vec = Op.getOperand(1);
4779     SDValue VL = getVLOperand(Op);
4780 
4781     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4782     if (Op.getOperand(1).isUndef())
4783       return SplattedVal;
4784     SDValue SplattedIdx =
4785         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4786                     DAG.getConstant(0, DL, MVT::i32), VL);
4787 
4788     MVT MaskVT = getMaskTypeFor(VT);
4789     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
4790     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4791     SDValue SelectCond =
4792         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4793                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4794     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4795                        Vec, VL);
4796   }
4797   }
4798 
4799   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4800 }
4801 
4802 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4803                                                     SelectionDAG &DAG) const {
4804   unsigned IntNo = Op.getConstantOperandVal(1);
4805   switch (IntNo) {
4806   default:
4807     break;
4808   case Intrinsic::riscv_masked_strided_load: {
4809     SDLoc DL(Op);
4810     MVT XLenVT = Subtarget.getXLenVT();
4811 
4812     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4813     // the selection of the masked intrinsics doesn't do this for us.
4814     SDValue Mask = Op.getOperand(5);
4815     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4816 
4817     MVT VT = Op->getSimpleValueType(0);
4818     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4819 
4820     SDValue PassThru = Op.getOperand(2);
4821     if (!IsUnmasked) {
4822       MVT MaskVT = getMaskTypeFor(ContainerVT);
4823       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4824       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4825     }
4826 
4827     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4828 
4829     SDValue IntID = DAG.getTargetConstant(
4830         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4831         XLenVT);
4832 
4833     auto *Load = cast<MemIntrinsicSDNode>(Op);
4834     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4835     if (IsUnmasked)
4836       Ops.push_back(DAG.getUNDEF(ContainerVT));
4837     else
4838       Ops.push_back(PassThru);
4839     Ops.push_back(Op.getOperand(3)); // Ptr
4840     Ops.push_back(Op.getOperand(4)); // Stride
4841     if (!IsUnmasked)
4842       Ops.push_back(Mask);
4843     Ops.push_back(VL);
4844     if (!IsUnmasked) {
4845       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4846       Ops.push_back(Policy);
4847     }
4848 
4849     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4850     SDValue Result =
4851         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4852                                 Load->getMemoryVT(), Load->getMemOperand());
4853     SDValue Chain = Result.getValue(1);
4854     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4855     return DAG.getMergeValues({Result, Chain}, DL);
4856   }
4857   case Intrinsic::riscv_seg2_load:
4858   case Intrinsic::riscv_seg3_load:
4859   case Intrinsic::riscv_seg4_load:
4860   case Intrinsic::riscv_seg5_load:
4861   case Intrinsic::riscv_seg6_load:
4862   case Intrinsic::riscv_seg7_load:
4863   case Intrinsic::riscv_seg8_load: {
4864     SDLoc DL(Op);
4865     static const Intrinsic::ID VlsegInts[7] = {
4866         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4867         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4868         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4869         Intrinsic::riscv_vlseg8};
4870     unsigned NF = Op->getNumValues() - 1;
4871     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4872     MVT XLenVT = Subtarget.getXLenVT();
4873     MVT VT = Op->getSimpleValueType(0);
4874     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4875 
4876     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4877     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4878     auto *Load = cast<MemIntrinsicSDNode>(Op);
4879     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4880     ContainerVTs.push_back(MVT::Other);
4881     SDVTList VTs = DAG.getVTList(ContainerVTs);
4882     SDValue Result =
4883         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
4884                                 {Load->getChain(), IntID, Op.getOperand(2), VL},
4885                                 Load->getMemoryVT(), Load->getMemOperand());
4886     SmallVector<SDValue, 9> Results;
4887     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4888       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4889                                                   DAG, Subtarget));
4890     Results.push_back(Result.getValue(NF));
4891     return DAG.getMergeValues(Results, DL);
4892   }
4893   }
4894 
4895   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4896 }
4897 
4898 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4899                                                  SelectionDAG &DAG) const {
4900   unsigned IntNo = Op.getConstantOperandVal(1);
4901   switch (IntNo) {
4902   default:
4903     break;
4904   case Intrinsic::riscv_masked_strided_store: {
4905     SDLoc DL(Op);
4906     MVT XLenVT = Subtarget.getXLenVT();
4907 
4908     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4909     // the selection of the masked intrinsics doesn't do this for us.
4910     SDValue Mask = Op.getOperand(5);
4911     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4912 
4913     SDValue Val = Op.getOperand(2);
4914     MVT VT = Val.getSimpleValueType();
4915     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4916 
4917     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4918     if (!IsUnmasked) {
4919       MVT MaskVT = getMaskTypeFor(ContainerVT);
4920       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4921     }
4922 
4923     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4924 
4925     SDValue IntID = DAG.getTargetConstant(
4926         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4927         XLenVT);
4928 
4929     auto *Store = cast<MemIntrinsicSDNode>(Op);
4930     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4931     Ops.push_back(Val);
4932     Ops.push_back(Op.getOperand(3)); // Ptr
4933     Ops.push_back(Op.getOperand(4)); // Stride
4934     if (!IsUnmasked)
4935       Ops.push_back(Mask);
4936     Ops.push_back(VL);
4937 
4938     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4939                                    Ops, Store->getMemoryVT(),
4940                                    Store->getMemOperand());
4941   }
4942   }
4943 
4944   return SDValue();
4945 }
4946 
4947 static MVT getLMUL1VT(MVT VT) {
4948   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4949          "Unexpected vector MVT");
4950   return MVT::getScalableVectorVT(
4951       VT.getVectorElementType(),
4952       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4953 }
4954 
4955 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4956   switch (ISDOpcode) {
4957   default:
4958     llvm_unreachable("Unhandled reduction");
4959   case ISD::VECREDUCE_ADD:
4960     return RISCVISD::VECREDUCE_ADD_VL;
4961   case ISD::VECREDUCE_UMAX:
4962     return RISCVISD::VECREDUCE_UMAX_VL;
4963   case ISD::VECREDUCE_SMAX:
4964     return RISCVISD::VECREDUCE_SMAX_VL;
4965   case ISD::VECREDUCE_UMIN:
4966     return RISCVISD::VECREDUCE_UMIN_VL;
4967   case ISD::VECREDUCE_SMIN:
4968     return RISCVISD::VECREDUCE_SMIN_VL;
4969   case ISD::VECREDUCE_AND:
4970     return RISCVISD::VECREDUCE_AND_VL;
4971   case ISD::VECREDUCE_OR:
4972     return RISCVISD::VECREDUCE_OR_VL;
4973   case ISD::VECREDUCE_XOR:
4974     return RISCVISD::VECREDUCE_XOR_VL;
4975   }
4976 }
4977 
4978 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4979                                                          SelectionDAG &DAG,
4980                                                          bool IsVP) const {
4981   SDLoc DL(Op);
4982   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4983   MVT VecVT = Vec.getSimpleValueType();
4984   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4985           Op.getOpcode() == ISD::VECREDUCE_OR ||
4986           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4987           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4988           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4989           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4990          "Unexpected reduction lowering");
4991 
4992   MVT XLenVT = Subtarget.getXLenVT();
4993   assert(Op.getValueType() == XLenVT &&
4994          "Expected reduction output to be legalized to XLenVT");
4995 
4996   MVT ContainerVT = VecVT;
4997   if (VecVT.isFixedLengthVector()) {
4998     ContainerVT = getContainerForFixedLengthVector(VecVT);
4999     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5000   }
5001 
5002   SDValue Mask, VL;
5003   if (IsVP) {
5004     Mask = Op.getOperand(2);
5005     VL = Op.getOperand(3);
5006   } else {
5007     std::tie(Mask, VL) =
5008         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5009   }
5010 
5011   unsigned BaseOpc;
5012   ISD::CondCode CC;
5013   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5014 
5015   switch (Op.getOpcode()) {
5016   default:
5017     llvm_unreachable("Unhandled reduction");
5018   case ISD::VECREDUCE_AND:
5019   case ISD::VP_REDUCE_AND: {
5020     // vcpop ~x == 0
5021     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5022     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5023     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5024     CC = ISD::SETEQ;
5025     BaseOpc = ISD::AND;
5026     break;
5027   }
5028   case ISD::VECREDUCE_OR:
5029   case ISD::VP_REDUCE_OR:
5030     // vcpop x != 0
5031     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5032     CC = ISD::SETNE;
5033     BaseOpc = ISD::OR;
5034     break;
5035   case ISD::VECREDUCE_XOR:
5036   case ISD::VP_REDUCE_XOR: {
5037     // ((vcpop x) & 1) != 0
5038     SDValue One = DAG.getConstant(1, DL, XLenVT);
5039     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5040     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5041     CC = ISD::SETNE;
5042     BaseOpc = ISD::XOR;
5043     break;
5044   }
5045   }
5046 
5047   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5048 
5049   if (!IsVP)
5050     return SetCC;
5051 
5052   // Now include the start value in the operation.
5053   // Note that we must return the start value when no elements are operated
5054   // upon. The vcpop instructions we've emitted in each case above will return
5055   // 0 for an inactive vector, and so we've already received the neutral value:
5056   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5057   // can simply include the start value.
5058   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5059 }
5060 
5061 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5062                                             SelectionDAG &DAG) const {
5063   SDLoc DL(Op);
5064   SDValue Vec = Op.getOperand(0);
5065   EVT VecEVT = Vec.getValueType();
5066 
5067   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5068 
5069   // Due to ordering in legalize types we may have a vector type that needs to
5070   // be split. Do that manually so we can get down to a legal type.
5071   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5072          TargetLowering::TypeSplitVector) {
5073     SDValue Lo, Hi;
5074     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5075     VecEVT = Lo.getValueType();
5076     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5077   }
5078 
5079   // TODO: The type may need to be widened rather than split. Or widened before
5080   // it can be split.
5081   if (!isTypeLegal(VecEVT))
5082     return SDValue();
5083 
5084   MVT VecVT = VecEVT.getSimpleVT();
5085   MVT VecEltVT = VecVT.getVectorElementType();
5086   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5087 
5088   MVT ContainerVT = VecVT;
5089   if (VecVT.isFixedLengthVector()) {
5090     ContainerVT = getContainerForFixedLengthVector(VecVT);
5091     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5092   }
5093 
5094   MVT M1VT = getLMUL1VT(ContainerVT);
5095   MVT XLenVT = Subtarget.getXLenVT();
5096 
5097   SDValue Mask, VL;
5098   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5099 
5100   SDValue NeutralElem =
5101       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5102   SDValue IdentitySplat =
5103       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5104                        M1VT, DL, DAG, Subtarget);
5105   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5106                                   IdentitySplat, Mask, VL);
5107   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5108                              DAG.getConstant(0, DL, XLenVT));
5109   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5110 }
5111 
5112 // Given a reduction op, this function returns the matching reduction opcode,
5113 // the vector SDValue and the scalar SDValue required to lower this to a
5114 // RISCVISD node.
5115 static std::tuple<unsigned, SDValue, SDValue>
5116 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5117   SDLoc DL(Op);
5118   auto Flags = Op->getFlags();
5119   unsigned Opcode = Op.getOpcode();
5120   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5121   switch (Opcode) {
5122   default:
5123     llvm_unreachable("Unhandled reduction");
5124   case ISD::VECREDUCE_FADD: {
5125     // Use positive zero if we can. It is cheaper to materialize.
5126     SDValue Zero =
5127         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5128     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5129   }
5130   case ISD::VECREDUCE_SEQ_FADD:
5131     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5132                            Op.getOperand(0));
5133   case ISD::VECREDUCE_FMIN:
5134     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5135                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5136   case ISD::VECREDUCE_FMAX:
5137     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5138                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5139   }
5140 }
5141 
5142 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5143                                               SelectionDAG &DAG) const {
5144   SDLoc DL(Op);
5145   MVT VecEltVT = Op.getSimpleValueType();
5146 
5147   unsigned RVVOpcode;
5148   SDValue VectorVal, ScalarVal;
5149   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5150       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5151   MVT VecVT = VectorVal.getSimpleValueType();
5152 
5153   MVT ContainerVT = VecVT;
5154   if (VecVT.isFixedLengthVector()) {
5155     ContainerVT = getContainerForFixedLengthVector(VecVT);
5156     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5157   }
5158 
5159   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5160   MVT XLenVT = Subtarget.getXLenVT();
5161 
5162   SDValue Mask, VL;
5163   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5164 
5165   SDValue ScalarSplat =
5166       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5167                        M1VT, DL, DAG, Subtarget);
5168   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5169                                   VectorVal, ScalarSplat, Mask, VL);
5170   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5171                      DAG.getConstant(0, DL, XLenVT));
5172 }
5173 
5174 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5175   switch (ISDOpcode) {
5176   default:
5177     llvm_unreachable("Unhandled reduction");
5178   case ISD::VP_REDUCE_ADD:
5179     return RISCVISD::VECREDUCE_ADD_VL;
5180   case ISD::VP_REDUCE_UMAX:
5181     return RISCVISD::VECREDUCE_UMAX_VL;
5182   case ISD::VP_REDUCE_SMAX:
5183     return RISCVISD::VECREDUCE_SMAX_VL;
5184   case ISD::VP_REDUCE_UMIN:
5185     return RISCVISD::VECREDUCE_UMIN_VL;
5186   case ISD::VP_REDUCE_SMIN:
5187     return RISCVISD::VECREDUCE_SMIN_VL;
5188   case ISD::VP_REDUCE_AND:
5189     return RISCVISD::VECREDUCE_AND_VL;
5190   case ISD::VP_REDUCE_OR:
5191     return RISCVISD::VECREDUCE_OR_VL;
5192   case ISD::VP_REDUCE_XOR:
5193     return RISCVISD::VECREDUCE_XOR_VL;
5194   case ISD::VP_REDUCE_FADD:
5195     return RISCVISD::VECREDUCE_FADD_VL;
5196   case ISD::VP_REDUCE_SEQ_FADD:
5197     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5198   case ISD::VP_REDUCE_FMAX:
5199     return RISCVISD::VECREDUCE_FMAX_VL;
5200   case ISD::VP_REDUCE_FMIN:
5201     return RISCVISD::VECREDUCE_FMIN_VL;
5202   }
5203 }
5204 
5205 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5206                                            SelectionDAG &DAG) const {
5207   SDLoc DL(Op);
5208   SDValue Vec = Op.getOperand(1);
5209   EVT VecEVT = Vec.getValueType();
5210 
5211   // TODO: The type may need to be widened rather than split. Or widened before
5212   // it can be split.
5213   if (!isTypeLegal(VecEVT))
5214     return SDValue();
5215 
5216   MVT VecVT = VecEVT.getSimpleVT();
5217   MVT VecEltVT = VecVT.getVectorElementType();
5218   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5219 
5220   MVT ContainerVT = VecVT;
5221   if (VecVT.isFixedLengthVector()) {
5222     ContainerVT = getContainerForFixedLengthVector(VecVT);
5223     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5224   }
5225 
5226   SDValue VL = Op.getOperand(3);
5227   SDValue Mask = Op.getOperand(2);
5228 
5229   MVT M1VT = getLMUL1VT(ContainerVT);
5230   MVT XLenVT = Subtarget.getXLenVT();
5231   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5232 
5233   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5234                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5235                                         DL, DAG, Subtarget);
5236   SDValue Reduction =
5237       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5238   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5239                              DAG.getConstant(0, DL, XLenVT));
5240   if (!VecVT.isInteger())
5241     return Elt0;
5242   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5243 }
5244 
5245 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5246                                                    SelectionDAG &DAG) const {
5247   SDValue Vec = Op.getOperand(0);
5248   SDValue SubVec = Op.getOperand(1);
5249   MVT VecVT = Vec.getSimpleValueType();
5250   MVT SubVecVT = SubVec.getSimpleValueType();
5251 
5252   SDLoc DL(Op);
5253   MVT XLenVT = Subtarget.getXLenVT();
5254   unsigned OrigIdx = Op.getConstantOperandVal(2);
5255   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5256 
5257   // We don't have the ability to slide mask vectors up indexed by their i1
5258   // elements; the smallest we can do is i8. Often we are able to bitcast to
5259   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5260   // into a scalable one, we might not necessarily have enough scalable
5261   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5262   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5263       (OrigIdx != 0 || !Vec.isUndef())) {
5264     if (VecVT.getVectorMinNumElements() >= 8 &&
5265         SubVecVT.getVectorMinNumElements() >= 8) {
5266       assert(OrigIdx % 8 == 0 && "Invalid index");
5267       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5268              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5269              "Unexpected mask vector lowering");
5270       OrigIdx /= 8;
5271       SubVecVT =
5272           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5273                            SubVecVT.isScalableVector());
5274       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5275                                VecVT.isScalableVector());
5276       Vec = DAG.getBitcast(VecVT, Vec);
5277       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5278     } else {
5279       // We can't slide this mask vector up indexed by its i1 elements.
5280       // This poses a problem when we wish to insert a scalable vector which
5281       // can't be re-expressed as a larger type. Just choose the slow path and
5282       // extend to a larger type, then truncate back down.
5283       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5284       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5285       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5286       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5287       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5288                         Op.getOperand(2));
5289       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5290       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5291     }
5292   }
5293 
5294   // If the subvector vector is a fixed-length type, we cannot use subregister
5295   // manipulation to simplify the codegen; we don't know which register of a
5296   // LMUL group contains the specific subvector as we only know the minimum
5297   // register size. Therefore we must slide the vector group up the full
5298   // amount.
5299   if (SubVecVT.isFixedLengthVector()) {
5300     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5301       return Op;
5302     MVT ContainerVT = VecVT;
5303     if (VecVT.isFixedLengthVector()) {
5304       ContainerVT = getContainerForFixedLengthVector(VecVT);
5305       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5306     }
5307     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5308                          DAG.getUNDEF(ContainerVT), SubVec,
5309                          DAG.getConstant(0, DL, XLenVT));
5310     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5311       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5312       return DAG.getBitcast(Op.getValueType(), SubVec);
5313     }
5314     SDValue Mask =
5315         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5316     // Set the vector length to only the number of elements we care about. Note
5317     // that for slideup this includes the offset.
5318     SDValue VL =
5319         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5320     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5321     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5322                                   SubVec, SlideupAmt, Mask, VL);
5323     if (VecVT.isFixedLengthVector())
5324       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5325     return DAG.getBitcast(Op.getValueType(), Slideup);
5326   }
5327 
5328   unsigned SubRegIdx, RemIdx;
5329   std::tie(SubRegIdx, RemIdx) =
5330       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5331           VecVT, SubVecVT, OrigIdx, TRI);
5332 
5333   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5334   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5335                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5336                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5337 
5338   // 1. If the Idx has been completely eliminated and this subvector's size is
5339   // a vector register or a multiple thereof, or the surrounding elements are
5340   // undef, then this is a subvector insert which naturally aligns to a vector
5341   // register. These can easily be handled using subregister manipulation.
5342   // 2. If the subvector is smaller than a vector register, then the insertion
5343   // must preserve the undisturbed elements of the register. We do this by
5344   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5345   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5346   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5347   // LMUL=1 type back into the larger vector (resolving to another subregister
5348   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5349   // to avoid allocating a large register group to hold our subvector.
5350   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5351     return Op;
5352 
5353   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5354   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5355   // (in our case undisturbed). This means we can set up a subvector insertion
5356   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5357   // size of the subvector.
5358   MVT InterSubVT = VecVT;
5359   SDValue AlignedExtract = Vec;
5360   unsigned AlignedIdx = OrigIdx - RemIdx;
5361   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5362     InterSubVT = getLMUL1VT(VecVT);
5363     // Extract a subvector equal to the nearest full vector register type. This
5364     // should resolve to a EXTRACT_SUBREG instruction.
5365     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5366                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5367   }
5368 
5369   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5370   // For scalable vectors this must be further multiplied by vscale.
5371   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5372 
5373   SDValue Mask, VL;
5374   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5375 
5376   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5377   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5378   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5379   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5380 
5381   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5382                        DAG.getUNDEF(InterSubVT), SubVec,
5383                        DAG.getConstant(0, DL, XLenVT));
5384 
5385   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5386                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5387 
5388   // If required, insert this subvector back into the correct vector register.
5389   // This should resolve to an INSERT_SUBREG instruction.
5390   if (VecVT.bitsGT(InterSubVT))
5391     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5392                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5393 
5394   // We might have bitcast from a mask type: cast back to the original type if
5395   // required.
5396   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5397 }
5398 
5399 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5400                                                     SelectionDAG &DAG) const {
5401   SDValue Vec = Op.getOperand(0);
5402   MVT SubVecVT = Op.getSimpleValueType();
5403   MVT VecVT = Vec.getSimpleValueType();
5404 
5405   SDLoc DL(Op);
5406   MVT XLenVT = Subtarget.getXLenVT();
5407   unsigned OrigIdx = Op.getConstantOperandVal(1);
5408   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5409 
5410   // We don't have the ability to slide mask vectors down indexed by their i1
5411   // elements; the smallest we can do is i8. Often we are able to bitcast to
5412   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5413   // from a scalable one, we might not necessarily have enough scalable
5414   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5415   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5416     if (VecVT.getVectorMinNumElements() >= 8 &&
5417         SubVecVT.getVectorMinNumElements() >= 8) {
5418       assert(OrigIdx % 8 == 0 && "Invalid index");
5419       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5420              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5421              "Unexpected mask vector lowering");
5422       OrigIdx /= 8;
5423       SubVecVT =
5424           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5425                            SubVecVT.isScalableVector());
5426       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5427                                VecVT.isScalableVector());
5428       Vec = DAG.getBitcast(VecVT, Vec);
5429     } else {
5430       // We can't slide this mask vector down, indexed by its i1 elements.
5431       // This poses a problem when we wish to extract a scalable vector which
5432       // can't be re-expressed as a larger type. Just choose the slow path and
5433       // extend to a larger type, then truncate back down.
5434       // TODO: We could probably improve this when extracting certain fixed
5435       // from fixed, where we can extract as i8 and shift the correct element
5436       // right to reach the desired subvector?
5437       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5438       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5439       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5440       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5441                         Op.getOperand(1));
5442       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5443       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5444     }
5445   }
5446 
5447   // If the subvector vector is a fixed-length type, we cannot use subregister
5448   // manipulation to simplify the codegen; we don't know which register of a
5449   // LMUL group contains the specific subvector as we only know the minimum
5450   // register size. Therefore we must slide the vector group down the full
5451   // amount.
5452   if (SubVecVT.isFixedLengthVector()) {
5453     // With an index of 0 this is a cast-like subvector, which can be performed
5454     // with subregister operations.
5455     if (OrigIdx == 0)
5456       return Op;
5457     MVT ContainerVT = VecVT;
5458     if (VecVT.isFixedLengthVector()) {
5459       ContainerVT = getContainerForFixedLengthVector(VecVT);
5460       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5461     }
5462     SDValue Mask =
5463         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5464     // Set the vector length to only the number of elements we care about. This
5465     // avoids sliding down elements we're going to discard straight away.
5466     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5467     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5468     SDValue Slidedown =
5469         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5470                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5471     // Now we can use a cast-like subvector extract to get the result.
5472     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5473                             DAG.getConstant(0, DL, XLenVT));
5474     return DAG.getBitcast(Op.getValueType(), Slidedown);
5475   }
5476 
5477   unsigned SubRegIdx, RemIdx;
5478   std::tie(SubRegIdx, RemIdx) =
5479       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5480           VecVT, SubVecVT, OrigIdx, TRI);
5481 
5482   // If the Idx has been completely eliminated then this is a subvector extract
5483   // which naturally aligns to a vector register. These can easily be handled
5484   // using subregister manipulation.
5485   if (RemIdx == 0)
5486     return Op;
5487 
5488   // Else we must shift our vector register directly to extract the subvector.
5489   // Do this using VSLIDEDOWN.
5490 
5491   // If the vector type is an LMUL-group type, extract a subvector equal to the
5492   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5493   // instruction.
5494   MVT InterSubVT = VecVT;
5495   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5496     InterSubVT = getLMUL1VT(VecVT);
5497     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5498                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5499   }
5500 
5501   // Slide this vector register down by the desired number of elements in order
5502   // to place the desired subvector starting at element 0.
5503   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5504   // For scalable vectors this must be further multiplied by vscale.
5505   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5506 
5507   SDValue Mask, VL;
5508   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5509   SDValue Slidedown =
5510       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5511                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5512 
5513   // Now the vector is in the right position, extract our final subvector. This
5514   // should resolve to a COPY.
5515   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5516                           DAG.getConstant(0, DL, XLenVT));
5517 
5518   // We might have bitcast from a mask type: cast back to the original type if
5519   // required.
5520   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5521 }
5522 
5523 // Lower step_vector to the vid instruction. Any non-identity step value must
5524 // be accounted for my manual expansion.
5525 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5526                                               SelectionDAG &DAG) const {
5527   SDLoc DL(Op);
5528   MVT VT = Op.getSimpleValueType();
5529   MVT XLenVT = Subtarget.getXLenVT();
5530   SDValue Mask, VL;
5531   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5532   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5533   uint64_t StepValImm = Op.getConstantOperandVal(0);
5534   if (StepValImm != 1) {
5535     if (isPowerOf2_64(StepValImm)) {
5536       SDValue StepVal =
5537           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5538                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5539       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5540     } else {
5541       SDValue StepVal = lowerScalarSplat(
5542           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5543           VL, VT, DL, DAG, Subtarget);
5544       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5545     }
5546   }
5547   return StepVec;
5548 }
5549 
5550 // Implement vector_reverse using vrgather.vv with indices determined by
5551 // subtracting the id of each element from (VLMAX-1). This will convert
5552 // the indices like so:
5553 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5554 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5555 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5556                                                  SelectionDAG &DAG) const {
5557   SDLoc DL(Op);
5558   MVT VecVT = Op.getSimpleValueType();
5559   unsigned EltSize = VecVT.getScalarSizeInBits();
5560   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5561 
5562   unsigned MaxVLMAX = 0;
5563   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5564   if (VectorBitsMax != 0)
5565     MaxVLMAX =
5566         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5567 
5568   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5569   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5570 
5571   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5572   // to use vrgatherei16.vv.
5573   // TODO: It's also possible to use vrgatherei16.vv for other types to
5574   // decrease register width for the index calculation.
5575   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5576     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5577     // Reverse each half, then reassemble them in reverse order.
5578     // NOTE: It's also possible that after splitting that VLMAX no longer
5579     // requires vrgatherei16.vv.
5580     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5581       SDValue Lo, Hi;
5582       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5583       EVT LoVT, HiVT;
5584       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5585       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5586       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5587       // Reassemble the low and high pieces reversed.
5588       // FIXME: This is a CONCAT_VECTORS.
5589       SDValue Res =
5590           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5591                       DAG.getIntPtrConstant(0, DL));
5592       return DAG.getNode(
5593           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5594           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5595     }
5596 
5597     // Just promote the int type to i16 which will double the LMUL.
5598     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5599     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5600   }
5601 
5602   MVT XLenVT = Subtarget.getXLenVT();
5603   SDValue Mask, VL;
5604   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5605 
5606   // Calculate VLMAX-1 for the desired SEW.
5607   unsigned MinElts = VecVT.getVectorMinNumElements();
5608   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5609                               DAG.getConstant(MinElts, DL, XLenVT));
5610   SDValue VLMinus1 =
5611       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5612 
5613   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5614   bool IsRV32E64 =
5615       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5616   SDValue SplatVL;
5617   if (!IsRV32E64)
5618     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5619   else
5620     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5621                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5622 
5623   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5624   SDValue Indices =
5625       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5626 
5627   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5628 }
5629 
5630 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5631                                                 SelectionDAG &DAG) const {
5632   SDLoc DL(Op);
5633   SDValue V1 = Op.getOperand(0);
5634   SDValue V2 = Op.getOperand(1);
5635   MVT XLenVT = Subtarget.getXLenVT();
5636   MVT VecVT = Op.getSimpleValueType();
5637 
5638   unsigned MinElts = VecVT.getVectorMinNumElements();
5639   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5640                               DAG.getConstant(MinElts, DL, XLenVT));
5641 
5642   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5643   SDValue DownOffset, UpOffset;
5644   if (ImmValue >= 0) {
5645     // The operand is a TargetConstant, we need to rebuild it as a regular
5646     // constant.
5647     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5648     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5649   } else {
5650     // The operand is a TargetConstant, we need to rebuild it as a regular
5651     // constant rather than negating the original operand.
5652     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5653     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5654   }
5655 
5656   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
5657 
5658   SDValue SlideDown =
5659       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5660                   DownOffset, TrueMask, UpOffset);
5661   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5662                      TrueMask,
5663                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5664 }
5665 
5666 SDValue
5667 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5668                                                      SelectionDAG &DAG) const {
5669   SDLoc DL(Op);
5670   auto *Load = cast<LoadSDNode>(Op);
5671 
5672   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5673                                         Load->getMemoryVT(),
5674                                         *Load->getMemOperand()) &&
5675          "Expecting a correctly-aligned load");
5676 
5677   MVT VT = Op.getSimpleValueType();
5678   MVT XLenVT = Subtarget.getXLenVT();
5679   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5680 
5681   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5682 
5683   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5684   SDValue IntID = DAG.getTargetConstant(
5685       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5686   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5687   if (!IsMaskOp)
5688     Ops.push_back(DAG.getUNDEF(ContainerVT));
5689   Ops.push_back(Load->getBasePtr());
5690   Ops.push_back(VL);
5691   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5692   SDValue NewLoad =
5693       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5694                               Load->getMemoryVT(), Load->getMemOperand());
5695 
5696   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5697   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5698 }
5699 
5700 SDValue
5701 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5702                                                       SelectionDAG &DAG) const {
5703   SDLoc DL(Op);
5704   auto *Store = cast<StoreSDNode>(Op);
5705 
5706   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5707                                         Store->getMemoryVT(),
5708                                         *Store->getMemOperand()) &&
5709          "Expecting a correctly-aligned store");
5710 
5711   SDValue StoreVal = Store->getValue();
5712   MVT VT = StoreVal.getSimpleValueType();
5713   MVT XLenVT = Subtarget.getXLenVT();
5714 
5715   // If the size less than a byte, we need to pad with zeros to make a byte.
5716   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5717     VT = MVT::v8i1;
5718     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5719                            DAG.getConstant(0, DL, VT), StoreVal,
5720                            DAG.getIntPtrConstant(0, DL));
5721   }
5722 
5723   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5724 
5725   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5726 
5727   SDValue NewValue =
5728       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5729 
5730   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5731   SDValue IntID = DAG.getTargetConstant(
5732       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5733   return DAG.getMemIntrinsicNode(
5734       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5735       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5736       Store->getMemoryVT(), Store->getMemOperand());
5737 }
5738 
5739 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5740                                              SelectionDAG &DAG) const {
5741   SDLoc DL(Op);
5742   MVT VT = Op.getSimpleValueType();
5743 
5744   const auto *MemSD = cast<MemSDNode>(Op);
5745   EVT MemVT = MemSD->getMemoryVT();
5746   MachineMemOperand *MMO = MemSD->getMemOperand();
5747   SDValue Chain = MemSD->getChain();
5748   SDValue BasePtr = MemSD->getBasePtr();
5749 
5750   SDValue Mask, PassThru, VL;
5751   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5752     Mask = VPLoad->getMask();
5753     PassThru = DAG.getUNDEF(VT);
5754     VL = VPLoad->getVectorLength();
5755   } else {
5756     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5757     Mask = MLoad->getMask();
5758     PassThru = MLoad->getPassThru();
5759   }
5760 
5761   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5762 
5763   MVT XLenVT = Subtarget.getXLenVT();
5764 
5765   MVT ContainerVT = VT;
5766   if (VT.isFixedLengthVector()) {
5767     ContainerVT = getContainerForFixedLengthVector(VT);
5768     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5769     if (!IsUnmasked) {
5770       MVT MaskVT = getMaskTypeFor(ContainerVT);
5771       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5772     }
5773   }
5774 
5775   if (!VL)
5776     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5777 
5778   unsigned IntID =
5779       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5780   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5781   if (IsUnmasked)
5782     Ops.push_back(DAG.getUNDEF(ContainerVT));
5783   else
5784     Ops.push_back(PassThru);
5785   Ops.push_back(BasePtr);
5786   if (!IsUnmasked)
5787     Ops.push_back(Mask);
5788   Ops.push_back(VL);
5789   if (!IsUnmasked)
5790     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5791 
5792   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5793 
5794   SDValue Result =
5795       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5796   Chain = Result.getValue(1);
5797 
5798   if (VT.isFixedLengthVector())
5799     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5800 
5801   return DAG.getMergeValues({Result, Chain}, DL);
5802 }
5803 
5804 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5805                                               SelectionDAG &DAG) const {
5806   SDLoc DL(Op);
5807 
5808   const auto *MemSD = cast<MemSDNode>(Op);
5809   EVT MemVT = MemSD->getMemoryVT();
5810   MachineMemOperand *MMO = MemSD->getMemOperand();
5811   SDValue Chain = MemSD->getChain();
5812   SDValue BasePtr = MemSD->getBasePtr();
5813   SDValue Val, Mask, VL;
5814 
5815   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5816     Val = VPStore->getValue();
5817     Mask = VPStore->getMask();
5818     VL = VPStore->getVectorLength();
5819   } else {
5820     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5821     Val = MStore->getValue();
5822     Mask = MStore->getMask();
5823   }
5824 
5825   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5826 
5827   MVT VT = Val.getSimpleValueType();
5828   MVT XLenVT = Subtarget.getXLenVT();
5829 
5830   MVT ContainerVT = VT;
5831   if (VT.isFixedLengthVector()) {
5832     ContainerVT = getContainerForFixedLengthVector(VT);
5833 
5834     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5835     if (!IsUnmasked) {
5836       MVT MaskVT = getMaskTypeFor(ContainerVT);
5837       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5838     }
5839   }
5840 
5841   if (!VL)
5842     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5843 
5844   unsigned IntID =
5845       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5846   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5847   Ops.push_back(Val);
5848   Ops.push_back(BasePtr);
5849   if (!IsUnmasked)
5850     Ops.push_back(Mask);
5851   Ops.push_back(VL);
5852 
5853   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5854                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5855 }
5856 
5857 SDValue
5858 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5859                                                       SelectionDAG &DAG) const {
5860   MVT InVT = Op.getOperand(0).getSimpleValueType();
5861   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5862 
5863   MVT VT = Op.getSimpleValueType();
5864 
5865   SDValue Op1 =
5866       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5867   SDValue Op2 =
5868       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5869 
5870   SDLoc DL(Op);
5871   SDValue VL =
5872       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5873 
5874   MVT MaskVT = getMaskTypeFor(ContainerVT);
5875   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
5876 
5877   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5878                             Op.getOperand(2), Mask, VL);
5879 
5880   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5881 }
5882 
5883 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5884     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5885   MVT VT = Op.getSimpleValueType();
5886 
5887   if (VT.getVectorElementType() == MVT::i1)
5888     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5889 
5890   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5891 }
5892 
5893 SDValue
5894 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5895                                                       SelectionDAG &DAG) const {
5896   unsigned Opc;
5897   switch (Op.getOpcode()) {
5898   default: llvm_unreachable("Unexpected opcode!");
5899   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5900   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5901   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5902   }
5903 
5904   return lowerToScalableOp(Op, DAG, Opc);
5905 }
5906 
5907 // Lower vector ABS to smax(X, sub(0, X)).
5908 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5909   SDLoc DL(Op);
5910   MVT VT = Op.getSimpleValueType();
5911   SDValue X = Op.getOperand(0);
5912 
5913   assert(VT.isFixedLengthVector() && "Unexpected type");
5914 
5915   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5916   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5917 
5918   SDValue Mask, VL;
5919   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5920 
5921   SDValue SplatZero = DAG.getNode(
5922       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5923       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5924   SDValue NegX =
5925       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5926   SDValue Max =
5927       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5928 
5929   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5930 }
5931 
5932 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5933     SDValue Op, SelectionDAG &DAG) const {
5934   SDLoc DL(Op);
5935   MVT VT = Op.getSimpleValueType();
5936   SDValue Mag = Op.getOperand(0);
5937   SDValue Sign = Op.getOperand(1);
5938   assert(Mag.getValueType() == Sign.getValueType() &&
5939          "Can only handle COPYSIGN with matching types.");
5940 
5941   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5942   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5943   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5944 
5945   SDValue Mask, VL;
5946   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5947 
5948   SDValue CopySign =
5949       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5950 
5951   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5952 }
5953 
5954 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5955     SDValue Op, SelectionDAG &DAG) const {
5956   MVT VT = Op.getSimpleValueType();
5957   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5958 
5959   MVT I1ContainerVT =
5960       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5961 
5962   SDValue CC =
5963       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5964   SDValue Op1 =
5965       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5966   SDValue Op2 =
5967       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5968 
5969   SDLoc DL(Op);
5970   SDValue Mask, VL;
5971   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5972 
5973   SDValue Select =
5974       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5975 
5976   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5977 }
5978 
5979 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5980                                                unsigned NewOpc,
5981                                                bool HasMask) const {
5982   MVT VT = Op.getSimpleValueType();
5983   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5984 
5985   // Create list of operands by converting existing ones to scalable types.
5986   SmallVector<SDValue, 6> Ops;
5987   for (const SDValue &V : Op->op_values()) {
5988     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5989 
5990     // Pass through non-vector operands.
5991     if (!V.getValueType().isVector()) {
5992       Ops.push_back(V);
5993       continue;
5994     }
5995 
5996     // "cast" fixed length vector to a scalable vector.
5997     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5998            "Only fixed length vectors are supported!");
5999     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6000   }
6001 
6002   SDLoc DL(Op);
6003   SDValue Mask, VL;
6004   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6005   if (HasMask)
6006     Ops.push_back(Mask);
6007   Ops.push_back(VL);
6008 
6009   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6010   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6011 }
6012 
6013 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6014 // * Operands of each node are assumed to be in the same order.
6015 // * The EVL operand is promoted from i32 to i64 on RV64.
6016 // * Fixed-length vectors are converted to their scalable-vector container
6017 //   types.
6018 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6019                                        unsigned RISCVISDOpc) const {
6020   SDLoc DL(Op);
6021   MVT VT = Op.getSimpleValueType();
6022   SmallVector<SDValue, 4> Ops;
6023 
6024   for (const auto &OpIdx : enumerate(Op->ops())) {
6025     SDValue V = OpIdx.value();
6026     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6027     // Pass through operands which aren't fixed-length vectors.
6028     if (!V.getValueType().isFixedLengthVector()) {
6029       Ops.push_back(V);
6030       continue;
6031     }
6032     // "cast" fixed length vector to a scalable vector.
6033     MVT OpVT = V.getSimpleValueType();
6034     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6035     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6036            "Only fixed length vectors are supported!");
6037     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6038   }
6039 
6040   if (!VT.isFixedLengthVector())
6041     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6042 
6043   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6044 
6045   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6046 
6047   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6048 }
6049 
6050 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6051                                               SelectionDAG &DAG) const {
6052   SDLoc DL(Op);
6053   MVT VT = Op.getSimpleValueType();
6054 
6055   SDValue Src = Op.getOperand(0);
6056   // NOTE: Mask is dropped.
6057   SDValue VL = Op.getOperand(2);
6058 
6059   MVT ContainerVT = VT;
6060   if (VT.isFixedLengthVector()) {
6061     ContainerVT = getContainerForFixedLengthVector(VT);
6062     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6063     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6064   }
6065 
6066   MVT XLenVT = Subtarget.getXLenVT();
6067   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6068   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6069                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6070 
6071   SDValue SplatValue = DAG.getConstant(
6072       Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
6073   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6074                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6075 
6076   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6077                                Splat, ZeroSplat, VL);
6078   if (!VT.isFixedLengthVector())
6079     return Result;
6080   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6081 }
6082 
6083 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
6084                                                 SelectionDAG &DAG) const {
6085   SDLoc DL(Op);
6086   MVT VT = Op.getSimpleValueType();
6087 
6088   SDValue Op1 = Op.getOperand(0);
6089   SDValue Op2 = Op.getOperand(1);
6090   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6091   // NOTE: Mask is dropped.
6092   SDValue VL = Op.getOperand(4);
6093 
6094   MVT ContainerVT = VT;
6095   if (VT.isFixedLengthVector()) {
6096     ContainerVT = getContainerForFixedLengthVector(VT);
6097     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6098     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6099   }
6100 
6101   SDValue Result;
6102   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
6103 
6104   switch (Condition) {
6105   default:
6106     break;
6107   // X != Y  --> (X^Y)
6108   case ISD::SETNE:
6109     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6110     break;
6111   // X == Y  --> ~(X^Y)
6112   case ISD::SETEQ: {
6113     SDValue Temp =
6114         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6115     Result =
6116         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
6117     break;
6118   }
6119   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
6120   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
6121   case ISD::SETGT:
6122   case ISD::SETULT: {
6123     SDValue Temp =
6124         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6125     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
6126     break;
6127   }
6128   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
6129   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
6130   case ISD::SETLT:
6131   case ISD::SETUGT: {
6132     SDValue Temp =
6133         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6134     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
6135     break;
6136   }
6137   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
6138   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
6139   case ISD::SETGE:
6140   case ISD::SETULE: {
6141     SDValue Temp =
6142         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6143     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
6144     break;
6145   }
6146   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
6147   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
6148   case ISD::SETLE:
6149   case ISD::SETUGE: {
6150     SDValue Temp =
6151         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6152     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
6153     break;
6154   }
6155   }
6156 
6157   if (!VT.isFixedLengthVector())
6158     return Result;
6159   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6160 }
6161 
6162 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6163 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6164                                                 unsigned RISCVISDOpc) const {
6165   SDLoc DL(Op);
6166 
6167   SDValue Src = Op.getOperand(0);
6168   SDValue Mask = Op.getOperand(1);
6169   SDValue VL = Op.getOperand(2);
6170 
6171   MVT DstVT = Op.getSimpleValueType();
6172   MVT SrcVT = Src.getSimpleValueType();
6173   if (DstVT.isFixedLengthVector()) {
6174     DstVT = getContainerForFixedLengthVector(DstVT);
6175     SrcVT = getContainerForFixedLengthVector(SrcVT);
6176     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6177     MVT MaskVT = getMaskTypeFor(DstVT);
6178     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6179   }
6180 
6181   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6182                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6183                                 ? RISCVISD::VSEXT_VL
6184                                 : RISCVISD::VZEXT_VL;
6185 
6186   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6187   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6188 
6189   SDValue Result;
6190   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6191     if (SrcVT.isInteger()) {
6192       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6193 
6194       // Do we need to do any pre-widening before converting?
6195       if (SrcEltSize == 1) {
6196         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6197         MVT XLenVT = Subtarget.getXLenVT();
6198         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6199         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6200                                         DAG.getUNDEF(IntVT), Zero, VL);
6201         SDValue One = DAG.getConstant(
6202             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6203         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6204                                        DAG.getUNDEF(IntVT), One, VL);
6205         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6206                           ZeroSplat, VL);
6207       } else if (DstEltSize > (2 * SrcEltSize)) {
6208         // Widen before converting.
6209         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6210                                      DstVT.getVectorElementCount());
6211         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6212       }
6213 
6214       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6215     } else {
6216       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6217              "Wrong input/output vector types");
6218 
6219       // Convert f16 to f32 then convert f32 to i64.
6220       if (DstEltSize > (2 * SrcEltSize)) {
6221         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6222         MVT InterimFVT =
6223             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6224         Src =
6225             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6226       }
6227 
6228       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6229     }
6230   } else { // Narrowing + Conversion
6231     if (SrcVT.isInteger()) {
6232       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6233       // First do a narrowing convert to an FP type half the size, then round
6234       // the FP type to a small FP type if needed.
6235 
6236       MVT InterimFVT = DstVT;
6237       if (SrcEltSize > (2 * DstEltSize)) {
6238         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6239         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6240         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6241       }
6242 
6243       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6244 
6245       if (InterimFVT != DstVT) {
6246         Src = Result;
6247         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6248       }
6249     } else {
6250       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6251              "Wrong input/output vector types");
6252       // First do a narrowing conversion to an integer half the size, then
6253       // truncate if needed.
6254 
6255       if (DstEltSize == 1) {
6256         // First convert to the same size integer, then convert to mask using
6257         // setcc.
6258         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6259         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6260                                           DstVT.getVectorElementCount());
6261         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6262 
6263         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6264         // otherwise the conversion was undefined.
6265         MVT XLenVT = Subtarget.getXLenVT();
6266         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6267         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6268                                 DAG.getUNDEF(InterimIVT), SplatZero);
6269         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6270                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6271       } else {
6272         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6273                                           DstVT.getVectorElementCount());
6274 
6275         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6276 
6277         while (InterimIVT != DstVT) {
6278           SrcEltSize /= 2;
6279           Src = Result;
6280           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6281                                         DstVT.getVectorElementCount());
6282           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6283                                Src, Mask, VL);
6284         }
6285       }
6286     }
6287   }
6288 
6289   MVT VT = Op.getSimpleValueType();
6290   if (!VT.isFixedLengthVector())
6291     return Result;
6292   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6293 }
6294 
6295 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6296                                             unsigned MaskOpc,
6297                                             unsigned VecOpc) const {
6298   MVT VT = Op.getSimpleValueType();
6299   if (VT.getVectorElementType() != MVT::i1)
6300     return lowerVPOp(Op, DAG, VecOpc);
6301 
6302   // It is safe to drop mask parameter as masked-off elements are undef.
6303   SDValue Op1 = Op->getOperand(0);
6304   SDValue Op2 = Op->getOperand(1);
6305   SDValue VL = Op->getOperand(3);
6306 
6307   MVT ContainerVT = VT;
6308   const bool IsFixed = VT.isFixedLengthVector();
6309   if (IsFixed) {
6310     ContainerVT = getContainerForFixedLengthVector(VT);
6311     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6312     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6313   }
6314 
6315   SDLoc DL(Op);
6316   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6317   if (!IsFixed)
6318     return Val;
6319   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6320 }
6321 
6322 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6323 // matched to a RVV indexed load. The RVV indexed load instructions only
6324 // support the "unsigned unscaled" addressing mode; indices are implicitly
6325 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6326 // signed or scaled indexing is extended to the XLEN value type and scaled
6327 // accordingly.
6328 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6329                                                SelectionDAG &DAG) const {
6330   SDLoc DL(Op);
6331   MVT VT = Op.getSimpleValueType();
6332 
6333   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6334   EVT MemVT = MemSD->getMemoryVT();
6335   MachineMemOperand *MMO = MemSD->getMemOperand();
6336   SDValue Chain = MemSD->getChain();
6337   SDValue BasePtr = MemSD->getBasePtr();
6338 
6339   ISD::LoadExtType LoadExtType;
6340   SDValue Index, Mask, PassThru, VL;
6341 
6342   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6343     Index = VPGN->getIndex();
6344     Mask = VPGN->getMask();
6345     PassThru = DAG.getUNDEF(VT);
6346     VL = VPGN->getVectorLength();
6347     // VP doesn't support extending loads.
6348     LoadExtType = ISD::NON_EXTLOAD;
6349   } else {
6350     // Else it must be a MGATHER.
6351     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6352     Index = MGN->getIndex();
6353     Mask = MGN->getMask();
6354     PassThru = MGN->getPassThru();
6355     LoadExtType = MGN->getExtensionType();
6356   }
6357 
6358   MVT IndexVT = Index.getSimpleValueType();
6359   MVT XLenVT = Subtarget.getXLenVT();
6360 
6361   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6362          "Unexpected VTs!");
6363   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6364   // Targets have to explicitly opt-in for extending vector loads.
6365   assert(LoadExtType == ISD::NON_EXTLOAD &&
6366          "Unexpected extending MGATHER/VP_GATHER");
6367   (void)LoadExtType;
6368 
6369   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6370   // the selection of the masked intrinsics doesn't do this for us.
6371   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6372 
6373   MVT ContainerVT = VT;
6374   if (VT.isFixedLengthVector()) {
6375     // We need to use the larger of the result and index type to determine the
6376     // scalable type to use so we don't increase LMUL for any operand/result.
6377     if (VT.bitsGE(IndexVT)) {
6378       ContainerVT = getContainerForFixedLengthVector(VT);
6379       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6380                                  ContainerVT.getVectorElementCount());
6381     } else {
6382       IndexVT = getContainerForFixedLengthVector(IndexVT);
6383       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6384                                      IndexVT.getVectorElementCount());
6385     }
6386 
6387     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6388 
6389     if (!IsUnmasked) {
6390       MVT MaskVT = getMaskTypeFor(ContainerVT);
6391       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6392       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6393     }
6394   }
6395 
6396   if (!VL)
6397     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6398 
6399   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6400     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6401     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6402                                    VL);
6403     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6404                         TrueMask, VL);
6405   }
6406 
6407   unsigned IntID =
6408       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6409   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6410   if (IsUnmasked)
6411     Ops.push_back(DAG.getUNDEF(ContainerVT));
6412   else
6413     Ops.push_back(PassThru);
6414   Ops.push_back(BasePtr);
6415   Ops.push_back(Index);
6416   if (!IsUnmasked)
6417     Ops.push_back(Mask);
6418   Ops.push_back(VL);
6419   if (!IsUnmasked)
6420     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6421 
6422   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6423   SDValue Result =
6424       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6425   Chain = Result.getValue(1);
6426 
6427   if (VT.isFixedLengthVector())
6428     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6429 
6430   return DAG.getMergeValues({Result, Chain}, DL);
6431 }
6432 
6433 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6434 // matched to a RVV indexed store. The RVV indexed store instructions only
6435 // support the "unsigned unscaled" addressing mode; indices are implicitly
6436 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6437 // signed or scaled indexing is extended to the XLEN value type and scaled
6438 // accordingly.
6439 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6440                                                 SelectionDAG &DAG) const {
6441   SDLoc DL(Op);
6442   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6443   EVT MemVT = MemSD->getMemoryVT();
6444   MachineMemOperand *MMO = MemSD->getMemOperand();
6445   SDValue Chain = MemSD->getChain();
6446   SDValue BasePtr = MemSD->getBasePtr();
6447 
6448   bool IsTruncatingStore = false;
6449   SDValue Index, Mask, Val, VL;
6450 
6451   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6452     Index = VPSN->getIndex();
6453     Mask = VPSN->getMask();
6454     Val = VPSN->getValue();
6455     VL = VPSN->getVectorLength();
6456     // VP doesn't support truncating stores.
6457     IsTruncatingStore = false;
6458   } else {
6459     // Else it must be a MSCATTER.
6460     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6461     Index = MSN->getIndex();
6462     Mask = MSN->getMask();
6463     Val = MSN->getValue();
6464     IsTruncatingStore = MSN->isTruncatingStore();
6465   }
6466 
6467   MVT VT = Val.getSimpleValueType();
6468   MVT IndexVT = Index.getSimpleValueType();
6469   MVT XLenVT = Subtarget.getXLenVT();
6470 
6471   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6472          "Unexpected VTs!");
6473   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6474   // Targets have to explicitly opt-in for extending vector loads and
6475   // truncating vector stores.
6476   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6477   (void)IsTruncatingStore;
6478 
6479   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6480   // the selection of the masked intrinsics doesn't do this for us.
6481   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6482 
6483   MVT ContainerVT = VT;
6484   if (VT.isFixedLengthVector()) {
6485     // We need to use the larger of the value and index type to determine the
6486     // scalable type to use so we don't increase LMUL for any operand/result.
6487     if (VT.bitsGE(IndexVT)) {
6488       ContainerVT = getContainerForFixedLengthVector(VT);
6489       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6490                                  ContainerVT.getVectorElementCount());
6491     } else {
6492       IndexVT = getContainerForFixedLengthVector(IndexVT);
6493       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6494                                      IndexVT.getVectorElementCount());
6495     }
6496 
6497     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6498     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6499 
6500     if (!IsUnmasked) {
6501       MVT MaskVT = getMaskTypeFor(ContainerVT);
6502       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6503     }
6504   }
6505 
6506   if (!VL)
6507     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6508 
6509   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6510     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6511     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6512                                    VL);
6513     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6514                         TrueMask, VL);
6515   }
6516 
6517   unsigned IntID =
6518       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6519   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6520   Ops.push_back(Val);
6521   Ops.push_back(BasePtr);
6522   Ops.push_back(Index);
6523   if (!IsUnmasked)
6524     Ops.push_back(Mask);
6525   Ops.push_back(VL);
6526 
6527   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6528                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6529 }
6530 
6531 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6532                                                SelectionDAG &DAG) const {
6533   const MVT XLenVT = Subtarget.getXLenVT();
6534   SDLoc DL(Op);
6535   SDValue Chain = Op->getOperand(0);
6536   SDValue SysRegNo = DAG.getTargetConstant(
6537       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6538   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6539   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6540 
6541   // Encoding used for rounding mode in RISCV differs from that used in
6542   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6543   // table, which consists of a sequence of 4-bit fields, each representing
6544   // corresponding FLT_ROUNDS mode.
6545   static const int Table =
6546       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6547       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6548       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6549       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6550       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6551 
6552   SDValue Shift =
6553       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6554   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6555                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6556   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6557                                DAG.getConstant(7, DL, XLenVT));
6558 
6559   return DAG.getMergeValues({Masked, Chain}, DL);
6560 }
6561 
6562 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6563                                                SelectionDAG &DAG) const {
6564   const MVT XLenVT = Subtarget.getXLenVT();
6565   SDLoc DL(Op);
6566   SDValue Chain = Op->getOperand(0);
6567   SDValue RMValue = Op->getOperand(1);
6568   SDValue SysRegNo = DAG.getTargetConstant(
6569       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6570 
6571   // Encoding used for rounding mode in RISCV differs from that used in
6572   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6573   // a table, which consists of a sequence of 4-bit fields, each representing
6574   // corresponding RISCV mode.
6575   static const unsigned Table =
6576       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6577       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6578       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6579       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6580       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6581 
6582   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6583                               DAG.getConstant(2, DL, XLenVT));
6584   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6585                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6586   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6587                         DAG.getConstant(0x7, DL, XLenVT));
6588   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6589                      RMValue);
6590 }
6591 
6592 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6593   switch (IntNo) {
6594   default:
6595     llvm_unreachable("Unexpected Intrinsic");
6596   case Intrinsic::riscv_bcompress:
6597     return RISCVISD::BCOMPRESSW;
6598   case Intrinsic::riscv_bdecompress:
6599     return RISCVISD::BDECOMPRESSW;
6600   case Intrinsic::riscv_bfp:
6601     return RISCVISD::BFPW;
6602   case Intrinsic::riscv_fsl:
6603     return RISCVISD::FSLW;
6604   case Intrinsic::riscv_fsr:
6605     return RISCVISD::FSRW;
6606   }
6607 }
6608 
6609 // Converts the given intrinsic to a i64 operation with any extension.
6610 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6611                                          unsigned IntNo) {
6612   SDLoc DL(N);
6613   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6614   // Deal with the Instruction Operands
6615   SmallVector<SDValue, 3> NewOps;
6616   for (SDValue Op : drop_begin(N->ops()))
6617     // Promote the operand to i64 type
6618     NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op));
6619   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps);
6620   // ReplaceNodeResults requires we maintain the same type for the return value.
6621   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6622 }
6623 
6624 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6625 // form of the given Opcode.
6626 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6627   switch (Opcode) {
6628   default:
6629     llvm_unreachable("Unexpected opcode");
6630   case ISD::SHL:
6631     return RISCVISD::SLLW;
6632   case ISD::SRA:
6633     return RISCVISD::SRAW;
6634   case ISD::SRL:
6635     return RISCVISD::SRLW;
6636   case ISD::SDIV:
6637     return RISCVISD::DIVW;
6638   case ISD::UDIV:
6639     return RISCVISD::DIVUW;
6640   case ISD::UREM:
6641     return RISCVISD::REMUW;
6642   case ISD::ROTL:
6643     return RISCVISD::ROLW;
6644   case ISD::ROTR:
6645     return RISCVISD::RORW;
6646   }
6647 }
6648 
6649 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6650 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6651 // otherwise be promoted to i64, making it difficult to select the
6652 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6653 // type i8/i16/i32 is lost.
6654 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6655                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6656   SDLoc DL(N);
6657   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6658   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6659   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6660   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6661   // ReplaceNodeResults requires we maintain the same type for the return value.
6662   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6663 }
6664 
6665 // Converts the given 32-bit operation to a i64 operation with signed extension
6666 // semantic to reduce the signed extension instructions.
6667 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6668   SDLoc DL(N);
6669   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6670   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6671   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6672   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6673                                DAG.getValueType(MVT::i32));
6674   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6675 }
6676 
6677 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6678                                              SmallVectorImpl<SDValue> &Results,
6679                                              SelectionDAG &DAG) const {
6680   SDLoc DL(N);
6681   switch (N->getOpcode()) {
6682   default:
6683     llvm_unreachable("Don't know how to custom type legalize this operation!");
6684   case ISD::STRICT_FP_TO_SINT:
6685   case ISD::STRICT_FP_TO_UINT:
6686   case ISD::FP_TO_SINT:
6687   case ISD::FP_TO_UINT: {
6688     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6689            "Unexpected custom legalisation");
6690     bool IsStrict = N->isStrictFPOpcode();
6691     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6692                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6693     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6694     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6695         TargetLowering::TypeSoftenFloat) {
6696       if (!isTypeLegal(Op0.getValueType()))
6697         return;
6698       if (IsStrict) {
6699         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6700                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6701         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6702         SDValue Res = DAG.getNode(
6703             Opc, DL, VTs, N->getOperand(0), Op0,
6704             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6705         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6706         Results.push_back(Res.getValue(1));
6707         return;
6708       }
6709       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6710       SDValue Res =
6711           DAG.getNode(Opc, DL, MVT::i64, Op0,
6712                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6713       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6714       return;
6715     }
6716     // If the FP type needs to be softened, emit a library call using the 'si'
6717     // version. If we left it to default legalization we'd end up with 'di'. If
6718     // the FP type doesn't need to be softened just let generic type
6719     // legalization promote the result type.
6720     RTLIB::Libcall LC;
6721     if (IsSigned)
6722       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6723     else
6724       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6725     MakeLibCallOptions CallOptions;
6726     EVT OpVT = Op0.getValueType();
6727     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6728     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6729     SDValue Result;
6730     std::tie(Result, Chain) =
6731         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6732     Results.push_back(Result);
6733     if (IsStrict)
6734       Results.push_back(Chain);
6735     break;
6736   }
6737   case ISD::READCYCLECOUNTER: {
6738     assert(!Subtarget.is64Bit() &&
6739            "READCYCLECOUNTER only has custom type legalization on riscv32");
6740 
6741     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6742     SDValue RCW =
6743         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6744 
6745     Results.push_back(
6746         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6747     Results.push_back(RCW.getValue(2));
6748     break;
6749   }
6750   case ISD::MUL: {
6751     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6752     unsigned XLen = Subtarget.getXLen();
6753     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6754     if (Size > XLen) {
6755       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6756       SDValue LHS = N->getOperand(0);
6757       SDValue RHS = N->getOperand(1);
6758       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6759 
6760       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6761       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6762       // We need exactly one side to be unsigned.
6763       if (LHSIsU == RHSIsU)
6764         return;
6765 
6766       auto MakeMULPair = [&](SDValue S, SDValue U) {
6767         MVT XLenVT = Subtarget.getXLenVT();
6768         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6769         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6770         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6771         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6772         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6773       };
6774 
6775       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6776       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6777 
6778       // The other operand should be signed, but still prefer MULH when
6779       // possible.
6780       if (RHSIsU && LHSIsS && !RHSIsS)
6781         Results.push_back(MakeMULPair(LHS, RHS));
6782       else if (LHSIsU && RHSIsS && !LHSIsS)
6783         Results.push_back(MakeMULPair(RHS, LHS));
6784 
6785       return;
6786     }
6787     LLVM_FALLTHROUGH;
6788   }
6789   case ISD::ADD:
6790   case ISD::SUB:
6791     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6792            "Unexpected custom legalisation");
6793     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6794     break;
6795   case ISD::SHL:
6796   case ISD::SRA:
6797   case ISD::SRL:
6798     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6799            "Unexpected custom legalisation");
6800     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6801       // If we can use a BSET instruction, allow default promotion to apply.
6802       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
6803           isOneConstant(N->getOperand(0)))
6804         break;
6805       Results.push_back(customLegalizeToWOp(N, DAG));
6806       break;
6807     }
6808 
6809     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6810     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6811     // shift amount.
6812     if (N->getOpcode() == ISD::SHL) {
6813       SDLoc DL(N);
6814       SDValue NewOp0 =
6815           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6816       SDValue NewOp1 =
6817           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6818       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6819       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6820                                    DAG.getValueType(MVT::i32));
6821       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6822     }
6823 
6824     break;
6825   case ISD::ROTL:
6826   case ISD::ROTR:
6827     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6828            "Unexpected custom legalisation");
6829     Results.push_back(customLegalizeToWOp(N, DAG));
6830     break;
6831   case ISD::CTTZ:
6832   case ISD::CTTZ_ZERO_UNDEF:
6833   case ISD::CTLZ:
6834   case ISD::CTLZ_ZERO_UNDEF: {
6835     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6836            "Unexpected custom legalisation");
6837 
6838     SDValue NewOp0 =
6839         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6840     bool IsCTZ =
6841         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6842     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6843     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6844     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6845     return;
6846   }
6847   case ISD::SDIV:
6848   case ISD::UDIV:
6849   case ISD::UREM: {
6850     MVT VT = N->getSimpleValueType(0);
6851     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6852            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6853            "Unexpected custom legalisation");
6854     // Don't promote division/remainder by constant since we should expand those
6855     // to multiply by magic constant.
6856     // FIXME: What if the expansion is disabled for minsize.
6857     if (N->getOperand(1).getOpcode() == ISD::Constant)
6858       return;
6859 
6860     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6861     // the upper 32 bits. For other types we need to sign or zero extend
6862     // based on the opcode.
6863     unsigned ExtOpc = ISD::ANY_EXTEND;
6864     if (VT != MVT::i32)
6865       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6866                                            : ISD::ZERO_EXTEND;
6867 
6868     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6869     break;
6870   }
6871   case ISD::UADDO:
6872   case ISD::USUBO: {
6873     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6874            "Unexpected custom legalisation");
6875     bool IsAdd = N->getOpcode() == ISD::UADDO;
6876     // Create an ADDW or SUBW.
6877     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6878     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6879     SDValue Res =
6880         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6881     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6882                       DAG.getValueType(MVT::i32));
6883 
6884     SDValue Overflow;
6885     if (IsAdd && isOneConstant(RHS)) {
6886       // Special case uaddo X, 1 overflowed if the addition result is 0.
6887       // The general case (X + C) < C is not necessarily beneficial. Although we
6888       // reduce the live range of X, we may introduce the materialization of
6889       // constant C, especially when the setcc result is used by branch. We have
6890       // no compare with constant and branch instructions.
6891       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6892                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6893     } else {
6894       // Sign extend the LHS and perform an unsigned compare with the ADDW
6895       // result. Since the inputs are sign extended from i32, this is equivalent
6896       // to comparing the lower 32 bits.
6897       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6898       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6899                               IsAdd ? ISD::SETULT : ISD::SETUGT);
6900     }
6901 
6902     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6903     Results.push_back(Overflow);
6904     return;
6905   }
6906   case ISD::UADDSAT:
6907   case ISD::USUBSAT: {
6908     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6909            "Unexpected custom legalisation");
6910     if (Subtarget.hasStdExtZbb()) {
6911       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6912       // sign extend allows overflow of the lower 32 bits to be detected on
6913       // the promoted size.
6914       SDValue LHS =
6915           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6916       SDValue RHS =
6917           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6918       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6919       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6920       return;
6921     }
6922 
6923     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6924     // promotion for UADDO/USUBO.
6925     Results.push_back(expandAddSubSat(N, DAG));
6926     return;
6927   }
6928   case ISD::ABS: {
6929     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6930            "Unexpected custom legalisation");
6931           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6932 
6933     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6934 
6935     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6936 
6937     // Freeze the source so we can increase it's use count.
6938     Src = DAG.getFreeze(Src);
6939 
6940     // Copy sign bit to all bits using the sraiw pattern.
6941     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6942                                    DAG.getValueType(MVT::i32));
6943     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6944                            DAG.getConstant(31, DL, MVT::i64));
6945 
6946     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6947     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6948 
6949     // NOTE: The result is only required to be anyextended, but sext is
6950     // consistent with type legalization of sub.
6951     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6952                          DAG.getValueType(MVT::i32));
6953     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6954     return;
6955   }
6956   case ISD::BITCAST: {
6957     EVT VT = N->getValueType(0);
6958     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6959     SDValue Op0 = N->getOperand(0);
6960     EVT Op0VT = Op0.getValueType();
6961     MVT XLenVT = Subtarget.getXLenVT();
6962     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6963       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6964       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6965     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6966                Subtarget.hasStdExtF()) {
6967       SDValue FPConv =
6968           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6969       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6970     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6971                isTypeLegal(Op0VT)) {
6972       // Custom-legalize bitcasts from fixed-length vector types to illegal
6973       // scalar types in order to improve codegen. Bitcast the vector to a
6974       // one-element vector type whose element type is the same as the result
6975       // type, and extract the first element.
6976       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6977       if (isTypeLegal(BVT)) {
6978         SDValue BVec = DAG.getBitcast(BVT, Op0);
6979         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6980                                       DAG.getConstant(0, DL, XLenVT)));
6981       }
6982     }
6983     break;
6984   }
6985   case RISCVISD::GREV:
6986   case RISCVISD::GORC:
6987   case RISCVISD::SHFL: {
6988     MVT VT = N->getSimpleValueType(0);
6989     MVT XLenVT = Subtarget.getXLenVT();
6990     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
6991            "Unexpected custom legalisation");
6992     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6993     assert((Subtarget.hasStdExtZbp() ||
6994             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
6995              N->getConstantOperandVal(1) == 7)) &&
6996            "Unexpected extension");
6997     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6998     SDValue NewOp1 =
6999         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7000     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7001     // ReplaceNodeResults requires we maintain the same type for the return
7002     // value.
7003     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7004     break;
7005   }
7006   case ISD::BSWAP:
7007   case ISD::BITREVERSE: {
7008     MVT VT = N->getSimpleValueType(0);
7009     MVT XLenVT = Subtarget.getXLenVT();
7010     assert((VT == MVT::i8 || VT == MVT::i16 ||
7011             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7012            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7013     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7014     unsigned Imm = VT.getSizeInBits() - 1;
7015     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7016     if (N->getOpcode() == ISD::BSWAP)
7017       Imm &= ~0x7U;
7018     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7019                                 DAG.getConstant(Imm, DL, XLenVT));
7020     // ReplaceNodeResults requires we maintain the same type for the return
7021     // value.
7022     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7023     break;
7024   }
7025   case ISD::FSHL:
7026   case ISD::FSHR: {
7027     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7028            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7029     SDValue NewOp0 =
7030         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7031     SDValue NewOp1 =
7032         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7033     SDValue NewShAmt =
7034         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7035     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7036     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7037     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7038                            DAG.getConstant(0x1f, DL, MVT::i64));
7039     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7040     // instruction use different orders. fshl will return its first operand for
7041     // shift of zero, fshr will return its second operand. fsl and fsr both
7042     // return rs1 so the ISD nodes need to have different operand orders.
7043     // Shift amount is in rs2.
7044     unsigned Opc = RISCVISD::FSLW;
7045     if (N->getOpcode() == ISD::FSHR) {
7046       std::swap(NewOp0, NewOp1);
7047       Opc = RISCVISD::FSRW;
7048     }
7049     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7050     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7051     break;
7052   }
7053   case ISD::EXTRACT_VECTOR_ELT: {
7054     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7055     // type is illegal (currently only vXi64 RV32).
7056     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7057     // transferred to the destination register. We issue two of these from the
7058     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7059     // first element.
7060     SDValue Vec = N->getOperand(0);
7061     SDValue Idx = N->getOperand(1);
7062 
7063     // The vector type hasn't been legalized yet so we can't issue target
7064     // specific nodes if it needs legalization.
7065     // FIXME: We would manually legalize if it's important.
7066     if (!isTypeLegal(Vec.getValueType()))
7067       return;
7068 
7069     MVT VecVT = Vec.getSimpleValueType();
7070 
7071     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7072            VecVT.getVectorElementType() == MVT::i64 &&
7073            "Unexpected EXTRACT_VECTOR_ELT legalization");
7074 
7075     // If this is a fixed vector, we need to convert it to a scalable vector.
7076     MVT ContainerVT = VecVT;
7077     if (VecVT.isFixedLengthVector()) {
7078       ContainerVT = getContainerForFixedLengthVector(VecVT);
7079       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7080     }
7081 
7082     MVT XLenVT = Subtarget.getXLenVT();
7083 
7084     // Use a VL of 1 to avoid processing more elements than we need.
7085     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7086     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
7087 
7088     // Unless the index is known to be 0, we must slide the vector down to get
7089     // the desired element into index 0.
7090     if (!isNullConstant(Idx)) {
7091       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7092                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7093     }
7094 
7095     // Extract the lower XLEN bits of the correct vector element.
7096     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7097 
7098     // To extract the upper XLEN bits of the vector element, shift the first
7099     // element right by 32 bits and re-extract the lower XLEN bits.
7100     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7101                                      DAG.getUNDEF(ContainerVT),
7102                                      DAG.getConstant(32, DL, XLenVT), VL);
7103     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7104                                  ThirtyTwoV, Mask, VL);
7105 
7106     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7107 
7108     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7109     break;
7110   }
7111   case ISD::INTRINSIC_WO_CHAIN: {
7112     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7113     switch (IntNo) {
7114     default:
7115       llvm_unreachable(
7116           "Don't know how to custom type legalize this intrinsic!");
7117     case Intrinsic::riscv_grev:
7118     case Intrinsic::riscv_gorc: {
7119       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7120              "Unexpected custom legalisation");
7121       SDValue NewOp1 =
7122           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7123       SDValue NewOp2 =
7124           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7125       unsigned Opc =
7126           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7127       // If the control is a constant, promote the node by clearing any extra
7128       // bits bits in the control. isel will form greviw/gorciw if the result is
7129       // sign extended.
7130       if (isa<ConstantSDNode>(NewOp2)) {
7131         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7132                              DAG.getConstant(0x1f, DL, MVT::i64));
7133         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7134       }
7135       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7136       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7137       break;
7138     }
7139     case Intrinsic::riscv_bcompress:
7140     case Intrinsic::riscv_bdecompress:
7141     case Intrinsic::riscv_bfp:
7142     case Intrinsic::riscv_fsl:
7143     case Intrinsic::riscv_fsr: {
7144       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7145              "Unexpected custom legalisation");
7146       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7147       break;
7148     }
7149     case Intrinsic::riscv_orc_b: {
7150       // Lower to the GORCI encoding for orc.b with the operand extended.
7151       SDValue NewOp =
7152           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7153       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7154                                 DAG.getConstant(7, DL, MVT::i64));
7155       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7156       return;
7157     }
7158     case Intrinsic::riscv_shfl:
7159     case Intrinsic::riscv_unshfl: {
7160       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7161              "Unexpected custom legalisation");
7162       SDValue NewOp1 =
7163           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7164       SDValue NewOp2 =
7165           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7166       unsigned Opc =
7167           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7168       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7169       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7170       // will be shuffled the same way as the lower 32 bit half, but the two
7171       // halves won't cross.
7172       if (isa<ConstantSDNode>(NewOp2)) {
7173         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7174                              DAG.getConstant(0xf, DL, MVT::i64));
7175         Opc =
7176             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7177       }
7178       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7179       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7180       break;
7181     }
7182     case Intrinsic::riscv_vmv_x_s: {
7183       EVT VT = N->getValueType(0);
7184       MVT XLenVT = Subtarget.getXLenVT();
7185       if (VT.bitsLT(XLenVT)) {
7186         // Simple case just extract using vmv.x.s and truncate.
7187         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7188                                       Subtarget.getXLenVT(), N->getOperand(1));
7189         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7190         return;
7191       }
7192 
7193       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7194              "Unexpected custom legalization");
7195 
7196       // We need to do the move in two steps.
7197       SDValue Vec = N->getOperand(1);
7198       MVT VecVT = Vec.getSimpleValueType();
7199 
7200       // First extract the lower XLEN bits of the element.
7201       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7202 
7203       // To extract the upper XLEN bits of the vector element, shift the first
7204       // element right by 32 bits and re-extract the lower XLEN bits.
7205       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7206       SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
7207 
7208       SDValue ThirtyTwoV =
7209           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7210                       DAG.getConstant(32, DL, XLenVT), VL);
7211       SDValue LShr32 =
7212           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7213       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7214 
7215       Results.push_back(
7216           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7217       break;
7218     }
7219     }
7220     break;
7221   }
7222   case ISD::VECREDUCE_ADD:
7223   case ISD::VECREDUCE_AND:
7224   case ISD::VECREDUCE_OR:
7225   case ISD::VECREDUCE_XOR:
7226   case ISD::VECREDUCE_SMAX:
7227   case ISD::VECREDUCE_UMAX:
7228   case ISD::VECREDUCE_SMIN:
7229   case ISD::VECREDUCE_UMIN:
7230     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7231       Results.push_back(V);
7232     break;
7233   case ISD::VP_REDUCE_ADD:
7234   case ISD::VP_REDUCE_AND:
7235   case ISD::VP_REDUCE_OR:
7236   case ISD::VP_REDUCE_XOR:
7237   case ISD::VP_REDUCE_SMAX:
7238   case ISD::VP_REDUCE_UMAX:
7239   case ISD::VP_REDUCE_SMIN:
7240   case ISD::VP_REDUCE_UMIN:
7241     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7242       Results.push_back(V);
7243     break;
7244   case ISD::FLT_ROUNDS_: {
7245     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7246     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7247     Results.push_back(Res.getValue(0));
7248     Results.push_back(Res.getValue(1));
7249     break;
7250   }
7251   }
7252 }
7253 
7254 // A structure to hold one of the bit-manipulation patterns below. Together, a
7255 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7256 //   (or (and (shl x, 1), 0xAAAAAAAA),
7257 //       (and (srl x, 1), 0x55555555))
7258 struct RISCVBitmanipPat {
7259   SDValue Op;
7260   unsigned ShAmt;
7261   bool IsSHL;
7262 
7263   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7264     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7265   }
7266 };
7267 
7268 // Matches patterns of the form
7269 //   (and (shl x, C2), (C1 << C2))
7270 //   (and (srl x, C2), C1)
7271 //   (shl (and x, C1), C2)
7272 //   (srl (and x, (C1 << C2)), C2)
7273 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7274 // The expected masks for each shift amount are specified in BitmanipMasks where
7275 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7276 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7277 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7278 // XLen is 64.
7279 static Optional<RISCVBitmanipPat>
7280 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7281   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7282          "Unexpected number of masks");
7283   Optional<uint64_t> Mask;
7284   // Optionally consume a mask around the shift operation.
7285   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7286     Mask = Op.getConstantOperandVal(1);
7287     Op = Op.getOperand(0);
7288   }
7289   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7290     return None;
7291   bool IsSHL = Op.getOpcode() == ISD::SHL;
7292 
7293   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7294     return None;
7295   uint64_t ShAmt = Op.getConstantOperandVal(1);
7296 
7297   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7298   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7299     return None;
7300   // If we don't have enough masks for 64 bit, then we must be trying to
7301   // match SHFL so we're only allowed to shift 1/4 of the width.
7302   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7303     return None;
7304 
7305   SDValue Src = Op.getOperand(0);
7306 
7307   // The expected mask is shifted left when the AND is found around SHL
7308   // patterns.
7309   //   ((x >> 1) & 0x55555555)
7310   //   ((x << 1) & 0xAAAAAAAA)
7311   bool SHLExpMask = IsSHL;
7312 
7313   if (!Mask) {
7314     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7315     // the mask is all ones: consume that now.
7316     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7317       Mask = Src.getConstantOperandVal(1);
7318       Src = Src.getOperand(0);
7319       // The expected mask is now in fact shifted left for SRL, so reverse the
7320       // decision.
7321       //   ((x & 0xAAAAAAAA) >> 1)
7322       //   ((x & 0x55555555) << 1)
7323       SHLExpMask = !SHLExpMask;
7324     } else {
7325       // Use a default shifted mask of all-ones if there's no AND, truncated
7326       // down to the expected width. This simplifies the logic later on.
7327       Mask = maskTrailingOnes<uint64_t>(Width);
7328       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7329     }
7330   }
7331 
7332   unsigned MaskIdx = Log2_32(ShAmt);
7333   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7334 
7335   if (SHLExpMask)
7336     ExpMask <<= ShAmt;
7337 
7338   if (Mask != ExpMask)
7339     return None;
7340 
7341   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7342 }
7343 
7344 // Matches any of the following bit-manipulation patterns:
7345 //   (and (shl x, 1), (0x55555555 << 1))
7346 //   (and (srl x, 1), 0x55555555)
7347 //   (shl (and x, 0x55555555), 1)
7348 //   (srl (and x, (0x55555555 << 1)), 1)
7349 // where the shift amount and mask may vary thus:
7350 //   [1]  = 0x55555555 / 0xAAAAAAAA
7351 //   [2]  = 0x33333333 / 0xCCCCCCCC
7352 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7353 //   [8]  = 0x00FF00FF / 0xFF00FF00
7354 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7355 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7356 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7357   // These are the unshifted masks which we use to match bit-manipulation
7358   // patterns. They may be shifted left in certain circumstances.
7359   static const uint64_t BitmanipMasks[] = {
7360       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7361       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7362 
7363   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7364 }
7365 
7366 // Try to fold (<bop> x, (reduction.<bop> vec, start))
7367 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
7368   auto BinOpToRVVReduce = [](unsigned Opc) {
7369     switch (Opc) {
7370     default:
7371       llvm_unreachable("Unhandled binary to transfrom reduction");
7372     case ISD::ADD:
7373       return RISCVISD::VECREDUCE_ADD_VL;
7374     case ISD::UMAX:
7375       return RISCVISD::VECREDUCE_UMAX_VL;
7376     case ISD::SMAX:
7377       return RISCVISD::VECREDUCE_SMAX_VL;
7378     case ISD::UMIN:
7379       return RISCVISD::VECREDUCE_UMIN_VL;
7380     case ISD::SMIN:
7381       return RISCVISD::VECREDUCE_SMIN_VL;
7382     case ISD::AND:
7383       return RISCVISD::VECREDUCE_AND_VL;
7384     case ISD::OR:
7385       return RISCVISD::VECREDUCE_OR_VL;
7386     case ISD::XOR:
7387       return RISCVISD::VECREDUCE_XOR_VL;
7388     case ISD::FADD:
7389       return RISCVISD::VECREDUCE_FADD_VL;
7390     case ISD::FMAXNUM:
7391       return RISCVISD::VECREDUCE_FMAX_VL;
7392     case ISD::FMINNUM:
7393       return RISCVISD::VECREDUCE_FMIN_VL;
7394     }
7395   };
7396 
7397   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
7398     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7399            isNullConstant(V.getOperand(1)) &&
7400            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
7401   };
7402 
7403   unsigned Opc = N->getOpcode();
7404   unsigned ReduceIdx;
7405   if (IsReduction(N->getOperand(0), Opc))
7406     ReduceIdx = 0;
7407   else if (IsReduction(N->getOperand(1), Opc))
7408     ReduceIdx = 1;
7409   else
7410     return SDValue();
7411 
7412   // Skip if FADD disallows reassociation but the combiner needs.
7413   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
7414     return SDValue();
7415 
7416   SDValue Extract = N->getOperand(ReduceIdx);
7417   SDValue Reduce = Extract.getOperand(0);
7418   if (!Reduce.hasOneUse())
7419     return SDValue();
7420 
7421   SDValue ScalarV = Reduce.getOperand(2);
7422 
7423   // Make sure that ScalarV is a splat with VL=1.
7424   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
7425       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
7426       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
7427     return SDValue();
7428 
7429   if (!isOneConstant(ScalarV.getOperand(2)))
7430     return SDValue();
7431 
7432   // TODO: Deal with value other than neutral element.
7433   auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) {
7434     if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() &&
7435         isNullFPConstant(V))
7436       return true;
7437     return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(),
7438                                  N->getFlags()) == V;
7439   };
7440 
7441   // Check the scalar of ScalarV is neutral element
7442   if (!IsRVVNeutralElement(N, ScalarV.getOperand(1)))
7443     return SDValue();
7444 
7445   if (!ScalarV.hasOneUse())
7446     return SDValue();
7447 
7448   EVT SplatVT = ScalarV.getValueType();
7449   SDValue NewStart = N->getOperand(1 - ReduceIdx);
7450   unsigned SplatOpc = RISCVISD::VFMV_S_F_VL;
7451   if (SplatVT.isInteger()) {
7452     auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode());
7453     if (!C || C->isZero() || !isInt<5>(C->getSExtValue()))
7454       SplatOpc = RISCVISD::VMV_S_X_VL;
7455     else
7456       SplatOpc = RISCVISD::VMV_V_X_VL;
7457   }
7458 
7459   SDValue NewScalarV =
7460       DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart,
7461                   ScalarV.getOperand(2));
7462   SDValue NewReduce =
7463       DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(),
7464                   Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
7465                   Reduce.getOperand(3), Reduce.getOperand(4));
7466   return DAG.getNode(Extract.getOpcode(), SDLoc(Extract),
7467                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
7468 }
7469 
7470 // Match the following pattern as a GREVI(W) operation
7471 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7472 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7473                                const RISCVSubtarget &Subtarget) {
7474   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7475   EVT VT = Op.getValueType();
7476 
7477   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7478     auto LHS = matchGREVIPat(Op.getOperand(0));
7479     auto RHS = matchGREVIPat(Op.getOperand(1));
7480     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7481       SDLoc DL(Op);
7482       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7483                          DAG.getConstant(LHS->ShAmt, DL, VT));
7484     }
7485   }
7486   return SDValue();
7487 }
7488 
7489 // Matches any the following pattern as a GORCI(W) operation
7490 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7491 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7492 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7493 // Note that with the variant of 3.,
7494 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7495 // the inner pattern will first be matched as GREVI and then the outer
7496 // pattern will be matched to GORC via the first rule above.
7497 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7498 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7499                                const RISCVSubtarget &Subtarget) {
7500   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7501   EVT VT = Op.getValueType();
7502 
7503   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7504     SDLoc DL(Op);
7505     SDValue Op0 = Op.getOperand(0);
7506     SDValue Op1 = Op.getOperand(1);
7507 
7508     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7509       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7510           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7511           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7512         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7513       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7514       if ((Reverse.getOpcode() == ISD::ROTL ||
7515            Reverse.getOpcode() == ISD::ROTR) &&
7516           Reverse.getOperand(0) == X &&
7517           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7518         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7519         if (RotAmt == (VT.getSizeInBits() / 2))
7520           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7521                              DAG.getConstant(RotAmt, DL, VT));
7522       }
7523       return SDValue();
7524     };
7525 
7526     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7527     if (SDValue V = MatchOROfReverse(Op0, Op1))
7528       return V;
7529     if (SDValue V = MatchOROfReverse(Op1, Op0))
7530       return V;
7531 
7532     // OR is commutable so canonicalize its OR operand to the left
7533     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7534       std::swap(Op0, Op1);
7535     if (Op0.getOpcode() != ISD::OR)
7536       return SDValue();
7537     SDValue OrOp0 = Op0.getOperand(0);
7538     SDValue OrOp1 = Op0.getOperand(1);
7539     auto LHS = matchGREVIPat(OrOp0);
7540     // OR is commutable so swap the operands and try again: x might have been
7541     // on the left
7542     if (!LHS) {
7543       std::swap(OrOp0, OrOp1);
7544       LHS = matchGREVIPat(OrOp0);
7545     }
7546     auto RHS = matchGREVIPat(Op1);
7547     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7548       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7549                          DAG.getConstant(LHS->ShAmt, DL, VT));
7550     }
7551   }
7552   return SDValue();
7553 }
7554 
7555 // Matches any of the following bit-manipulation patterns:
7556 //   (and (shl x, 1), (0x22222222 << 1))
7557 //   (and (srl x, 1), 0x22222222)
7558 //   (shl (and x, 0x22222222), 1)
7559 //   (srl (and x, (0x22222222 << 1)), 1)
7560 // where the shift amount and mask may vary thus:
7561 //   [1]  = 0x22222222 / 0x44444444
7562 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7563 //   [4]  = 0x00F000F0 / 0x0F000F00
7564 //   [8]  = 0x0000FF00 / 0x00FF0000
7565 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7566 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7567   // These are the unshifted masks which we use to match bit-manipulation
7568   // patterns. They may be shifted left in certain circumstances.
7569   static const uint64_t BitmanipMasks[] = {
7570       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7571       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7572 
7573   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7574 }
7575 
7576 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7577 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7578                                const RISCVSubtarget &Subtarget) {
7579   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7580   EVT VT = Op.getValueType();
7581 
7582   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7583     return SDValue();
7584 
7585   SDValue Op0 = Op.getOperand(0);
7586   SDValue Op1 = Op.getOperand(1);
7587 
7588   // Or is commutable so canonicalize the second OR to the LHS.
7589   if (Op0.getOpcode() != ISD::OR)
7590     std::swap(Op0, Op1);
7591   if (Op0.getOpcode() != ISD::OR)
7592     return SDValue();
7593 
7594   // We found an inner OR, so our operands are the operands of the inner OR
7595   // and the other operand of the outer OR.
7596   SDValue A = Op0.getOperand(0);
7597   SDValue B = Op0.getOperand(1);
7598   SDValue C = Op1;
7599 
7600   auto Match1 = matchSHFLPat(A);
7601   auto Match2 = matchSHFLPat(B);
7602 
7603   // If neither matched, we failed.
7604   if (!Match1 && !Match2)
7605     return SDValue();
7606 
7607   // We had at least one match. if one failed, try the remaining C operand.
7608   if (!Match1) {
7609     std::swap(A, C);
7610     Match1 = matchSHFLPat(A);
7611     if (!Match1)
7612       return SDValue();
7613   } else if (!Match2) {
7614     std::swap(B, C);
7615     Match2 = matchSHFLPat(B);
7616     if (!Match2)
7617       return SDValue();
7618   }
7619   assert(Match1 && Match2);
7620 
7621   // Make sure our matches pair up.
7622   if (!Match1->formsPairWith(*Match2))
7623     return SDValue();
7624 
7625   // All the remains is to make sure C is an AND with the same input, that masks
7626   // out the bits that are being shuffled.
7627   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7628       C.getOperand(0) != Match1->Op)
7629     return SDValue();
7630 
7631   uint64_t Mask = C.getConstantOperandVal(1);
7632 
7633   static const uint64_t BitmanipMasks[] = {
7634       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7635       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7636   };
7637 
7638   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7639   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7640   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7641 
7642   if (Mask != ExpMask)
7643     return SDValue();
7644 
7645   SDLoc DL(Op);
7646   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7647                      DAG.getConstant(Match1->ShAmt, DL, VT));
7648 }
7649 
7650 // Optimize (add (shl x, c0), (shl y, c1)) ->
7651 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7652 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7653                                   const RISCVSubtarget &Subtarget) {
7654   // Perform this optimization only in the zba extension.
7655   if (!Subtarget.hasStdExtZba())
7656     return SDValue();
7657 
7658   // Skip for vector types and larger types.
7659   EVT VT = N->getValueType(0);
7660   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7661     return SDValue();
7662 
7663   // The two operand nodes must be SHL and have no other use.
7664   SDValue N0 = N->getOperand(0);
7665   SDValue N1 = N->getOperand(1);
7666   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7667       !N0->hasOneUse() || !N1->hasOneUse())
7668     return SDValue();
7669 
7670   // Check c0 and c1.
7671   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7672   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7673   if (!N0C || !N1C)
7674     return SDValue();
7675   int64_t C0 = N0C->getSExtValue();
7676   int64_t C1 = N1C->getSExtValue();
7677   if (C0 <= 0 || C1 <= 0)
7678     return SDValue();
7679 
7680   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7681   int64_t Bits = std::min(C0, C1);
7682   int64_t Diff = std::abs(C0 - C1);
7683   if (Diff != 1 && Diff != 2 && Diff != 3)
7684     return SDValue();
7685 
7686   // Build nodes.
7687   SDLoc DL(N);
7688   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7689   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7690   SDValue NA0 =
7691       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7692   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7693   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7694 }
7695 
7696 // Combine
7697 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7698 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7699 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7700 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7701 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7702 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7703 // The grev patterns represents BSWAP.
7704 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7705 // off the grev.
7706 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7707                                           const RISCVSubtarget &Subtarget) {
7708   bool IsWInstruction =
7709       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7710   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7711           IsWInstruction) &&
7712          "Unexpected opcode!");
7713   SDValue Src = N->getOperand(0);
7714   EVT VT = N->getValueType(0);
7715   SDLoc DL(N);
7716 
7717   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7718     return SDValue();
7719 
7720   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7721       !isa<ConstantSDNode>(Src.getOperand(1)))
7722     return SDValue();
7723 
7724   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7725   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7726 
7727   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7728   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7729   unsigned ShAmt1 = N->getConstantOperandVal(1);
7730   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7731   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7732     return SDValue();
7733 
7734   Src = Src.getOperand(0);
7735 
7736   // Toggle bit the MSB of the shift.
7737   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7738   if (CombinedShAmt == 0)
7739     return Src;
7740 
7741   SDValue Res = DAG.getNode(
7742       RISCVISD::GREV, DL, VT, Src,
7743       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7744   if (!IsWInstruction)
7745     return Res;
7746 
7747   // Sign extend the result to match the behavior of the rotate. This will be
7748   // selected to GREVIW in isel.
7749   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7750                      DAG.getValueType(MVT::i32));
7751 }
7752 
7753 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7754 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7755 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7756 // not undo itself, but they are redundant.
7757 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7758   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7759   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7760   SDValue Src = N->getOperand(0);
7761 
7762   if (Src.getOpcode() != N->getOpcode())
7763     return SDValue();
7764 
7765   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7766       !isa<ConstantSDNode>(Src.getOperand(1)))
7767     return SDValue();
7768 
7769   unsigned ShAmt1 = N->getConstantOperandVal(1);
7770   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7771   Src = Src.getOperand(0);
7772 
7773   unsigned CombinedShAmt;
7774   if (IsGORC)
7775     CombinedShAmt = ShAmt1 | ShAmt2;
7776   else
7777     CombinedShAmt = ShAmt1 ^ ShAmt2;
7778 
7779   if (CombinedShAmt == 0)
7780     return Src;
7781 
7782   SDLoc DL(N);
7783   return DAG.getNode(
7784       N->getOpcode(), DL, N->getValueType(0), Src,
7785       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7786 }
7787 
7788 // Combine a constant select operand into its use:
7789 //
7790 // (and (select cond, -1, c), x)
7791 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7792 // (or  (select cond, 0, c), x)
7793 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7794 // (xor (select cond, 0, c), x)
7795 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7796 // (add (select cond, 0, c), x)
7797 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7798 // (sub x, (select cond, 0, c))
7799 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7800 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7801                                    SelectionDAG &DAG, bool AllOnes) {
7802   EVT VT = N->getValueType(0);
7803 
7804   // Skip vectors.
7805   if (VT.isVector())
7806     return SDValue();
7807 
7808   if ((Slct.getOpcode() != ISD::SELECT &&
7809        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7810       !Slct.hasOneUse())
7811     return SDValue();
7812 
7813   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7814     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7815   };
7816 
7817   bool SwapSelectOps;
7818   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7819   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7820   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7821   SDValue NonConstantVal;
7822   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7823     SwapSelectOps = false;
7824     NonConstantVal = FalseVal;
7825   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7826     SwapSelectOps = true;
7827     NonConstantVal = TrueVal;
7828   } else
7829     return SDValue();
7830 
7831   // Slct is now know to be the desired identity constant when CC is true.
7832   TrueVal = OtherOp;
7833   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7834   // Unless SwapSelectOps says the condition should be false.
7835   if (SwapSelectOps)
7836     std::swap(TrueVal, FalseVal);
7837 
7838   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7839     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7840                        {Slct.getOperand(0), Slct.getOperand(1),
7841                         Slct.getOperand(2), TrueVal, FalseVal});
7842 
7843   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7844                      {Slct.getOperand(0), TrueVal, FalseVal});
7845 }
7846 
7847 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7848 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7849                                               bool AllOnes) {
7850   SDValue N0 = N->getOperand(0);
7851   SDValue N1 = N->getOperand(1);
7852   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7853     return Result;
7854   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7855     return Result;
7856   return SDValue();
7857 }
7858 
7859 // Transform (add (mul x, c0), c1) ->
7860 //           (add (mul (add x, c1/c0), c0), c1%c0).
7861 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7862 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7863 // to an infinite loop in DAGCombine if transformed.
7864 // Or transform (add (mul x, c0), c1) ->
7865 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7866 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7867 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7868 // lead to an infinite loop in DAGCombine if transformed.
7869 // Or transform (add (mul x, c0), c1) ->
7870 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7871 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7872 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7873 // lead to an infinite loop in DAGCombine if transformed.
7874 // Or transform (add (mul x, c0), c1) ->
7875 //              (mul (add x, c1/c0), c0).
7876 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7877 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7878                                      const RISCVSubtarget &Subtarget) {
7879   // Skip for vector types and larger types.
7880   EVT VT = N->getValueType(0);
7881   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7882     return SDValue();
7883   // The first operand node must be a MUL and has no other use.
7884   SDValue N0 = N->getOperand(0);
7885   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7886     return SDValue();
7887   // Check if c0 and c1 match above conditions.
7888   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7889   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7890   if (!N0C || !N1C)
7891     return SDValue();
7892   // If N0C has multiple uses it's possible one of the cases in
7893   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7894   // in an infinite loop.
7895   if (!N0C->hasOneUse())
7896     return SDValue();
7897   int64_t C0 = N0C->getSExtValue();
7898   int64_t C1 = N1C->getSExtValue();
7899   int64_t CA, CB;
7900   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7901     return SDValue();
7902   // Search for proper CA (non-zero) and CB that both are simm12.
7903   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7904       !isInt<12>(C0 * (C1 / C0))) {
7905     CA = C1 / C0;
7906     CB = C1 % C0;
7907   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7908              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7909     CA = C1 / C0 + 1;
7910     CB = C1 % C0 - C0;
7911   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7912              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7913     CA = C1 / C0 - 1;
7914     CB = C1 % C0 + C0;
7915   } else
7916     return SDValue();
7917   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7918   SDLoc DL(N);
7919   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7920                              DAG.getConstant(CA, DL, VT));
7921   SDValue New1 =
7922       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7923   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7924 }
7925 
7926 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7927                                  const RISCVSubtarget &Subtarget) {
7928   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7929     return V;
7930   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7931     return V;
7932   if (SDValue V = combineBinOpToReduce(N, DAG))
7933     return V;
7934   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7935   //      (select lhs, rhs, cc, x, (add x, y))
7936   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7937 }
7938 
7939 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7940   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7941   //      (select lhs, rhs, cc, x, (sub x, y))
7942   SDValue N0 = N->getOperand(0);
7943   SDValue N1 = N->getOperand(1);
7944   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7945 }
7946 
7947 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7948   if (SDValue V = combineBinOpToReduce(N, DAG))
7949     return V;
7950   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7951   //      (select lhs, rhs, cc, x, (and x, y))
7952   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7953 }
7954 
7955 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7956                                 const RISCVSubtarget &Subtarget) {
7957   if (Subtarget.hasStdExtZbp()) {
7958     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7959       return GREV;
7960     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7961       return GORC;
7962     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7963       return SHFL;
7964   }
7965 
7966   if (SDValue V = combineBinOpToReduce(N, DAG))
7967     return V;
7968   // fold (or (select cond, 0, y), x) ->
7969   //      (select cond, x, (or x, y))
7970   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7971 }
7972 
7973 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7974   SDValue N0 = N->getOperand(0);
7975   SDValue N1 = N->getOperand(1);
7976 
7977   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
7978   // NOTE: Assumes ROL being legal means ROLW is legal.
7979   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7980   if (N0.getOpcode() == RISCVISD::SLLW &&
7981       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
7982       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
7983     SDLoc DL(N);
7984     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
7985                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
7986   }
7987 
7988   if (SDValue V = combineBinOpToReduce(N, DAG))
7989     return V;
7990   // fold (xor (select cond, 0, y), x) ->
7991   //      (select cond, x, (xor x, y))
7992   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7993 }
7994 
7995 static SDValue
7996 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
7997                                 const RISCVSubtarget &Subtarget) {
7998   SDValue Src = N->getOperand(0);
7999   EVT VT = N->getValueType(0);
8000 
8001   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
8002   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8003       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
8004     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
8005                        Src.getOperand(0));
8006 
8007   // Fold (i64 (sext_inreg (abs X), i32)) ->
8008   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
8009   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
8010   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
8011   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
8012   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
8013   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
8014   // may get combined into an earlier operation so we need to use
8015   // ComputeNumSignBits.
8016   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
8017   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
8018   // we can't assume that X has 33 sign bits. We must check.
8019   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
8020       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
8021       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
8022       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
8023     SDLoc DL(N);
8024     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
8025     SDValue Neg =
8026         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
8027     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
8028                       DAG.getValueType(MVT::i32));
8029     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
8030   }
8031 
8032   return SDValue();
8033 }
8034 
8035 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
8036 // vwadd(u).vv/vx or vwsub(u).vv/vx.
8037 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
8038                                              bool Commute = false) {
8039   assert((N->getOpcode() == RISCVISD::ADD_VL ||
8040           N->getOpcode() == RISCVISD::SUB_VL) &&
8041          "Unexpected opcode");
8042   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
8043   SDValue Op0 = N->getOperand(0);
8044   SDValue Op1 = N->getOperand(1);
8045   if (Commute)
8046     std::swap(Op0, Op1);
8047 
8048   MVT VT = N->getSimpleValueType(0);
8049 
8050   // Determine the narrow size for a widening add/sub.
8051   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8052   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8053                                   VT.getVectorElementCount());
8054 
8055   SDValue Mask = N->getOperand(2);
8056   SDValue VL = N->getOperand(3);
8057 
8058   SDLoc DL(N);
8059 
8060   // If the RHS is a sext or zext, we can form a widening op.
8061   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
8062        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
8063       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
8064     unsigned ExtOpc = Op1.getOpcode();
8065     Op1 = Op1.getOperand(0);
8066     // Re-introduce narrower extends if needed.
8067     if (Op1.getValueType() != NarrowVT)
8068       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8069 
8070     unsigned WOpc;
8071     if (ExtOpc == RISCVISD::VSEXT_VL)
8072       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
8073     else
8074       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
8075 
8076     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
8077   }
8078 
8079   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
8080   // sext/zext?
8081 
8082   return SDValue();
8083 }
8084 
8085 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
8086 // vwsub(u).vv/vx.
8087 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
8088   SDValue Op0 = N->getOperand(0);
8089   SDValue Op1 = N->getOperand(1);
8090   SDValue Mask = N->getOperand(2);
8091   SDValue VL = N->getOperand(3);
8092 
8093   MVT VT = N->getSimpleValueType(0);
8094   MVT NarrowVT = Op1.getSimpleValueType();
8095   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
8096 
8097   unsigned VOpc;
8098   switch (N->getOpcode()) {
8099   default: llvm_unreachable("Unexpected opcode");
8100   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
8101   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8102   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8103   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8104   }
8105 
8106   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8107                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8108 
8109   SDLoc DL(N);
8110 
8111   // If the LHS is a sext or zext, we can narrow this op to the same size as
8112   // the RHS.
8113   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8114        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8115       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8116     unsigned ExtOpc = Op0.getOpcode();
8117     Op0 = Op0.getOperand(0);
8118     // Re-introduce narrower extends if needed.
8119     if (Op0.getValueType() != NarrowVT)
8120       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8121     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8122   }
8123 
8124   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8125                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8126 
8127   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8128   // to commute and use a vwadd(u).vx instead.
8129   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8130       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8131     Op0 = Op0.getOperand(1);
8132 
8133     // See if have enough sign bits or zero bits in the scalar to use a
8134     // widening add/sub by splatting to smaller element size.
8135     unsigned EltBits = VT.getScalarSizeInBits();
8136     unsigned ScalarBits = Op0.getValueSizeInBits();
8137     // Make sure we're getting all element bits from the scalar register.
8138     // FIXME: Support implicit sign extension of vmv.v.x?
8139     if (ScalarBits < EltBits)
8140       return SDValue();
8141 
8142     if (IsSigned) {
8143       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8144         return SDValue();
8145     } else {
8146       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8147       if (!DAG.MaskedValueIsZero(Op0, Mask))
8148         return SDValue();
8149     }
8150 
8151     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8152                       DAG.getUNDEF(NarrowVT), Op0, VL);
8153     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8154   }
8155 
8156   return SDValue();
8157 }
8158 
8159 // Try to form VWMUL, VWMULU or VWMULSU.
8160 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8161 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8162                                        bool Commute) {
8163   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8164   SDValue Op0 = N->getOperand(0);
8165   SDValue Op1 = N->getOperand(1);
8166   if (Commute)
8167     std::swap(Op0, Op1);
8168 
8169   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8170   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8171   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8172   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8173     return SDValue();
8174 
8175   SDValue Mask = N->getOperand(2);
8176   SDValue VL = N->getOperand(3);
8177 
8178   // Make sure the mask and VL match.
8179   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8180     return SDValue();
8181 
8182   MVT VT = N->getSimpleValueType(0);
8183 
8184   // Determine the narrow size for a widening multiply.
8185   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8186   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8187                                   VT.getVectorElementCount());
8188 
8189   SDLoc DL(N);
8190 
8191   // See if the other operand is the same opcode.
8192   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8193     if (!Op1.hasOneUse())
8194       return SDValue();
8195 
8196     // Make sure the mask and VL match.
8197     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8198       return SDValue();
8199 
8200     Op1 = Op1.getOperand(0);
8201   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8202     // The operand is a splat of a scalar.
8203 
8204     // The pasthru must be undef for tail agnostic
8205     if (!Op1.getOperand(0).isUndef())
8206       return SDValue();
8207     // The VL must be the same.
8208     if (Op1.getOperand(2) != VL)
8209       return SDValue();
8210 
8211     // Get the scalar value.
8212     Op1 = Op1.getOperand(1);
8213 
8214     // See if have enough sign bits or zero bits in the scalar to use a
8215     // widening multiply by splatting to smaller element size.
8216     unsigned EltBits = VT.getScalarSizeInBits();
8217     unsigned ScalarBits = Op1.getValueSizeInBits();
8218     // Make sure we're getting all element bits from the scalar register.
8219     // FIXME: Support implicit sign extension of vmv.v.x?
8220     if (ScalarBits < EltBits)
8221       return SDValue();
8222 
8223     // If the LHS is a sign extend, try to use vwmul.
8224     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8225       // Can use vwmul.
8226     } else {
8227       // Otherwise try to use vwmulu or vwmulsu.
8228       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8229       if (DAG.MaskedValueIsZero(Op1, Mask))
8230         IsVWMULSU = IsSignExt;
8231       else
8232         return SDValue();
8233     }
8234 
8235     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8236                       DAG.getUNDEF(NarrowVT), Op1, VL);
8237   } else
8238     return SDValue();
8239 
8240   Op0 = Op0.getOperand(0);
8241 
8242   // Re-introduce narrower extends if needed.
8243   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8244   if (Op0.getValueType() != NarrowVT)
8245     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8246   // vwmulsu requires second operand to be zero extended.
8247   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8248   if (Op1.getValueType() != NarrowVT)
8249     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8250 
8251   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8252   if (!IsVWMULSU)
8253     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8254   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8255 }
8256 
8257 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8258   switch (Op.getOpcode()) {
8259   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8260   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8261   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8262   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8263   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8264   }
8265 
8266   return RISCVFPRndMode::Invalid;
8267 }
8268 
8269 // Fold
8270 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8271 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8272 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8273 //   (fp_to_int (fceil X))      -> fcvt X, rup
8274 //   (fp_to_int (fround X))     -> fcvt X, rmm
8275 static SDValue performFP_TO_INTCombine(SDNode *N,
8276                                        TargetLowering::DAGCombinerInfo &DCI,
8277                                        const RISCVSubtarget &Subtarget) {
8278   SelectionDAG &DAG = DCI.DAG;
8279   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8280   MVT XLenVT = Subtarget.getXLenVT();
8281 
8282   // Only handle XLen or i32 types. Other types narrower than XLen will
8283   // eventually be legalized to XLenVT.
8284   EVT VT = N->getValueType(0);
8285   if (VT != MVT::i32 && VT != XLenVT)
8286     return SDValue();
8287 
8288   SDValue Src = N->getOperand(0);
8289 
8290   // Ensure the FP type is also legal.
8291   if (!TLI.isTypeLegal(Src.getValueType()))
8292     return SDValue();
8293 
8294   // Don't do this for f16 with Zfhmin and not Zfh.
8295   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8296     return SDValue();
8297 
8298   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8299   if (FRM == RISCVFPRndMode::Invalid)
8300     return SDValue();
8301 
8302   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8303 
8304   unsigned Opc;
8305   if (VT == XLenVT)
8306     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8307   else
8308     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8309 
8310   SDLoc DL(N);
8311   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8312                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8313   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8314 }
8315 
8316 // Fold
8317 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8318 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8319 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8320 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8321 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8322 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8323                                        TargetLowering::DAGCombinerInfo &DCI,
8324                                        const RISCVSubtarget &Subtarget) {
8325   SelectionDAG &DAG = DCI.DAG;
8326   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8327   MVT XLenVT = Subtarget.getXLenVT();
8328 
8329   // Only handle XLen types. Other types narrower than XLen will eventually be
8330   // legalized to XLenVT.
8331   EVT DstVT = N->getValueType(0);
8332   if (DstVT != XLenVT)
8333     return SDValue();
8334 
8335   SDValue Src = N->getOperand(0);
8336 
8337   // Ensure the FP type is also legal.
8338   if (!TLI.isTypeLegal(Src.getValueType()))
8339     return SDValue();
8340 
8341   // Don't do this for f16 with Zfhmin and not Zfh.
8342   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8343     return SDValue();
8344 
8345   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8346 
8347   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8348   if (FRM == RISCVFPRndMode::Invalid)
8349     return SDValue();
8350 
8351   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8352 
8353   unsigned Opc;
8354   if (SatVT == DstVT)
8355     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8356   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8357     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8358   else
8359     return SDValue();
8360   // FIXME: Support other SatVTs by clamping before or after the conversion.
8361 
8362   Src = Src.getOperand(0);
8363 
8364   SDLoc DL(N);
8365   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8366                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8367 
8368   // RISCV FP-to-int conversions saturate to the destination register size, but
8369   // don't produce 0 for nan.
8370   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8371   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8372 }
8373 
8374 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8375 // smaller than XLenVT.
8376 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8377                                         const RISCVSubtarget &Subtarget) {
8378   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8379 
8380   SDValue Src = N->getOperand(0);
8381   if (Src.getOpcode() != ISD::BSWAP)
8382     return SDValue();
8383 
8384   EVT VT = N->getValueType(0);
8385   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8386       !isPowerOf2_32(VT.getSizeInBits()))
8387     return SDValue();
8388 
8389   SDLoc DL(N);
8390   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8391                      DAG.getConstant(7, DL, VT));
8392 }
8393 
8394 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8395                                                DAGCombinerInfo &DCI) const {
8396   SelectionDAG &DAG = DCI.DAG;
8397 
8398   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8399   // bits are demanded. N will be added to the Worklist if it was not deleted.
8400   // Caller should return SDValue(N, 0) if this returns true.
8401   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8402     SDValue Op = N->getOperand(OpNo);
8403     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8404     if (!SimplifyDemandedBits(Op, Mask, DCI))
8405       return false;
8406 
8407     if (N->getOpcode() != ISD::DELETED_NODE)
8408       DCI.AddToWorklist(N);
8409     return true;
8410   };
8411 
8412   switch (N->getOpcode()) {
8413   default:
8414     break;
8415   case RISCVISD::SplitF64: {
8416     SDValue Op0 = N->getOperand(0);
8417     // If the input to SplitF64 is just BuildPairF64 then the operation is
8418     // redundant. Instead, use BuildPairF64's operands directly.
8419     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8420       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8421 
8422     if (Op0->isUndef()) {
8423       SDValue Lo = DAG.getUNDEF(MVT::i32);
8424       SDValue Hi = DAG.getUNDEF(MVT::i32);
8425       return DCI.CombineTo(N, Lo, Hi);
8426     }
8427 
8428     SDLoc DL(N);
8429 
8430     // It's cheaper to materialise two 32-bit integers than to load a double
8431     // from the constant pool and transfer it to integer registers through the
8432     // stack.
8433     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8434       APInt V = C->getValueAPF().bitcastToAPInt();
8435       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8436       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8437       return DCI.CombineTo(N, Lo, Hi);
8438     }
8439 
8440     // This is a target-specific version of a DAGCombine performed in
8441     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8442     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8443     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8444     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8445         !Op0.getNode()->hasOneUse())
8446       break;
8447     SDValue NewSplitF64 =
8448         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8449                     Op0.getOperand(0));
8450     SDValue Lo = NewSplitF64.getValue(0);
8451     SDValue Hi = NewSplitF64.getValue(1);
8452     APInt SignBit = APInt::getSignMask(32);
8453     if (Op0.getOpcode() == ISD::FNEG) {
8454       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8455                                   DAG.getConstant(SignBit, DL, MVT::i32));
8456       return DCI.CombineTo(N, Lo, NewHi);
8457     }
8458     assert(Op0.getOpcode() == ISD::FABS);
8459     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8460                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8461     return DCI.CombineTo(N, Lo, NewHi);
8462   }
8463   case RISCVISD::SLLW:
8464   case RISCVISD::SRAW:
8465   case RISCVISD::SRLW: {
8466     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8467     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8468         SimplifyDemandedLowBitsHelper(1, 5))
8469       return SDValue(N, 0);
8470 
8471     break;
8472   }
8473   case ISD::ROTR:
8474   case ISD::ROTL:
8475   case RISCVISD::RORW:
8476   case RISCVISD::ROLW: {
8477     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8478       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8479       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8480           SimplifyDemandedLowBitsHelper(1, 5))
8481         return SDValue(N, 0);
8482     }
8483 
8484     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8485   }
8486   case RISCVISD::CLZW:
8487   case RISCVISD::CTZW: {
8488     // Only the lower 32 bits of the first operand are read
8489     if (SimplifyDemandedLowBitsHelper(0, 32))
8490       return SDValue(N, 0);
8491     break;
8492   }
8493   case RISCVISD::GREV:
8494   case RISCVISD::GORC: {
8495     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8496     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8497     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8498     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8499       return SDValue(N, 0);
8500 
8501     return combineGREVI_GORCI(N, DAG);
8502   }
8503   case RISCVISD::GREVW:
8504   case RISCVISD::GORCW: {
8505     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8506     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8507         SimplifyDemandedLowBitsHelper(1, 5))
8508       return SDValue(N, 0);
8509 
8510     break;
8511   }
8512   case RISCVISD::SHFL:
8513   case RISCVISD::UNSHFL: {
8514     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8515     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8516     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8517     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8518       return SDValue(N, 0);
8519 
8520     break;
8521   }
8522   case RISCVISD::SHFLW:
8523   case RISCVISD::UNSHFLW: {
8524     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8525     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8526         SimplifyDemandedLowBitsHelper(1, 4))
8527       return SDValue(N, 0);
8528 
8529     break;
8530   }
8531   case RISCVISD::BCOMPRESSW:
8532   case RISCVISD::BDECOMPRESSW: {
8533     // Only the lower 32 bits of LHS and RHS are read.
8534     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8535         SimplifyDemandedLowBitsHelper(1, 32))
8536       return SDValue(N, 0);
8537 
8538     break;
8539   }
8540   case RISCVISD::FSR:
8541   case RISCVISD::FSL:
8542   case RISCVISD::FSRW:
8543   case RISCVISD::FSLW: {
8544     bool IsWInstruction =
8545         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8546     unsigned BitWidth =
8547         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8548     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8549     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8550     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8551       return SDValue(N, 0);
8552 
8553     break;
8554   }
8555   case RISCVISD::FMV_X_ANYEXTH:
8556   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8557     SDLoc DL(N);
8558     SDValue Op0 = N->getOperand(0);
8559     MVT VT = N->getSimpleValueType(0);
8560     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8561     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8562     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8563     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8564          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8565         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8566          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8567       assert(Op0.getOperand(0).getValueType() == VT &&
8568              "Unexpected value type!");
8569       return Op0.getOperand(0);
8570     }
8571 
8572     // This is a target-specific version of a DAGCombine performed in
8573     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8574     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8575     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8576     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8577         !Op0.getNode()->hasOneUse())
8578       break;
8579     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8580     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8581     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8582     if (Op0.getOpcode() == ISD::FNEG)
8583       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8584                          DAG.getConstant(SignBit, DL, VT));
8585 
8586     assert(Op0.getOpcode() == ISD::FABS);
8587     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8588                        DAG.getConstant(~SignBit, DL, VT));
8589   }
8590   case ISD::ADD:
8591     return performADDCombine(N, DAG, Subtarget);
8592   case ISD::SUB:
8593     return performSUBCombine(N, DAG);
8594   case ISD::AND:
8595     return performANDCombine(N, DAG);
8596   case ISD::OR:
8597     return performORCombine(N, DAG, Subtarget);
8598   case ISD::XOR:
8599     return performXORCombine(N, DAG);
8600   case ISD::FADD:
8601   case ISD::UMAX:
8602   case ISD::UMIN:
8603   case ISD::SMAX:
8604   case ISD::SMIN:
8605   case ISD::FMAXNUM:
8606   case ISD::FMINNUM:
8607     return combineBinOpToReduce(N, DAG);
8608   case ISD::SIGN_EXTEND_INREG:
8609     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8610   case ISD::ZERO_EXTEND:
8611     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8612     // type legalization. This is safe because fp_to_uint produces poison if
8613     // it overflows.
8614     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8615       SDValue Src = N->getOperand(0);
8616       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8617           isTypeLegal(Src.getOperand(0).getValueType()))
8618         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8619                            Src.getOperand(0));
8620       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8621           isTypeLegal(Src.getOperand(1).getValueType())) {
8622         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8623         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8624                                   Src.getOperand(0), Src.getOperand(1));
8625         DCI.CombineTo(N, Res);
8626         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8627         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8628         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8629       }
8630     }
8631     return SDValue();
8632   case RISCVISD::SELECT_CC: {
8633     // Transform
8634     SDValue LHS = N->getOperand(0);
8635     SDValue RHS = N->getOperand(1);
8636     SDValue TrueV = N->getOperand(3);
8637     SDValue FalseV = N->getOperand(4);
8638 
8639     // If the True and False values are the same, we don't need a select_cc.
8640     if (TrueV == FalseV)
8641       return TrueV;
8642 
8643     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8644     if (!ISD::isIntEqualitySetCC(CCVal))
8645       break;
8646 
8647     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8648     //      (select_cc X, Y, lt, trueV, falseV)
8649     // Sometimes the setcc is introduced after select_cc has been formed.
8650     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8651         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8652       // If we're looking for eq 0 instead of ne 0, we need to invert the
8653       // condition.
8654       bool Invert = CCVal == ISD::SETEQ;
8655       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8656       if (Invert)
8657         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8658 
8659       SDLoc DL(N);
8660       RHS = LHS.getOperand(1);
8661       LHS = LHS.getOperand(0);
8662       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8663 
8664       SDValue TargetCC = DAG.getCondCode(CCVal);
8665       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8666                          {LHS, RHS, TargetCC, TrueV, FalseV});
8667     }
8668 
8669     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8670     //      (select_cc X, Y, eq/ne, trueV, falseV)
8671     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8672       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8673                          {LHS.getOperand(0), LHS.getOperand(1),
8674                           N->getOperand(2), TrueV, FalseV});
8675     // (select_cc X, 1, setne, trueV, falseV) ->
8676     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8677     // This can occur when legalizing some floating point comparisons.
8678     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8679     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8680       SDLoc DL(N);
8681       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8682       SDValue TargetCC = DAG.getCondCode(CCVal);
8683       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8684       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8685                          {LHS, RHS, TargetCC, TrueV, FalseV});
8686     }
8687 
8688     break;
8689   }
8690   case RISCVISD::BR_CC: {
8691     SDValue LHS = N->getOperand(1);
8692     SDValue RHS = N->getOperand(2);
8693     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8694     if (!ISD::isIntEqualitySetCC(CCVal))
8695       break;
8696 
8697     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8698     //      (br_cc X, Y, lt, dest)
8699     // Sometimes the setcc is introduced after br_cc has been formed.
8700     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8701         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8702       // If we're looking for eq 0 instead of ne 0, we need to invert the
8703       // condition.
8704       bool Invert = CCVal == ISD::SETEQ;
8705       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8706       if (Invert)
8707         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8708 
8709       SDLoc DL(N);
8710       RHS = LHS.getOperand(1);
8711       LHS = LHS.getOperand(0);
8712       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8713 
8714       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8715                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8716                          N->getOperand(4));
8717     }
8718 
8719     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8720     //      (br_cc X, Y, eq/ne, trueV, falseV)
8721     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8722       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8723                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8724                          N->getOperand(3), N->getOperand(4));
8725 
8726     // (br_cc X, 1, setne, br_cc) ->
8727     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8728     // This can occur when legalizing some floating point comparisons.
8729     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8730     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8731       SDLoc DL(N);
8732       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8733       SDValue TargetCC = DAG.getCondCode(CCVal);
8734       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8735       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8736                          N->getOperand(0), LHS, RHS, TargetCC,
8737                          N->getOperand(4));
8738     }
8739     break;
8740   }
8741   case ISD::BITREVERSE:
8742     return performBITREVERSECombine(N, DAG, Subtarget);
8743   case ISD::FP_TO_SINT:
8744   case ISD::FP_TO_UINT:
8745     return performFP_TO_INTCombine(N, DCI, Subtarget);
8746   case ISD::FP_TO_SINT_SAT:
8747   case ISD::FP_TO_UINT_SAT:
8748     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8749   case ISD::FCOPYSIGN: {
8750     EVT VT = N->getValueType(0);
8751     if (!VT.isVector())
8752       break;
8753     // There is a form of VFSGNJ which injects the negated sign of its second
8754     // operand. Try and bubble any FNEG up after the extend/round to produce
8755     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8756     // TRUNC=1.
8757     SDValue In2 = N->getOperand(1);
8758     // Avoid cases where the extend/round has multiple uses, as duplicating
8759     // those is typically more expensive than removing a fneg.
8760     if (!In2.hasOneUse())
8761       break;
8762     if (In2.getOpcode() != ISD::FP_EXTEND &&
8763         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8764       break;
8765     In2 = In2.getOperand(0);
8766     if (In2.getOpcode() != ISD::FNEG)
8767       break;
8768     SDLoc DL(N);
8769     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8770     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8771                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8772   }
8773   case ISD::MGATHER:
8774   case ISD::MSCATTER:
8775   case ISD::VP_GATHER:
8776   case ISD::VP_SCATTER: {
8777     if (!DCI.isBeforeLegalize())
8778       break;
8779     SDValue Index, ScaleOp;
8780     bool IsIndexScaled = false;
8781     bool IsIndexSigned = false;
8782     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8783       Index = VPGSN->getIndex();
8784       ScaleOp = VPGSN->getScale();
8785       IsIndexScaled = VPGSN->isIndexScaled();
8786       IsIndexSigned = VPGSN->isIndexSigned();
8787     } else {
8788       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8789       Index = MGSN->getIndex();
8790       ScaleOp = MGSN->getScale();
8791       IsIndexScaled = MGSN->isIndexScaled();
8792       IsIndexSigned = MGSN->isIndexSigned();
8793     }
8794     EVT IndexVT = Index.getValueType();
8795     MVT XLenVT = Subtarget.getXLenVT();
8796     // RISCV indexed loads only support the "unsigned unscaled" addressing
8797     // mode, so anything else must be manually legalized.
8798     bool NeedsIdxLegalization =
8799         IsIndexScaled ||
8800         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8801     if (!NeedsIdxLegalization)
8802       break;
8803 
8804     SDLoc DL(N);
8805 
8806     // Any index legalization should first promote to XLenVT, so we don't lose
8807     // bits when scaling. This may create an illegal index type so we let
8808     // LLVM's legalization take care of the splitting.
8809     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8810     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8811       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8812       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8813                           DL, IndexVT, Index);
8814     }
8815 
8816     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8817     if (IsIndexScaled && Scale != 1) {
8818       // Manually scale the indices by the element size.
8819       // TODO: Sanitize the scale operand here?
8820       // TODO: For VP nodes, should we use VP_SHL here?
8821       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8822       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8823       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8824     }
8825 
8826     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8827     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8828       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8829                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8830                               VPGN->getScale(), VPGN->getMask(),
8831                               VPGN->getVectorLength()},
8832                              VPGN->getMemOperand(), NewIndexTy);
8833     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8834       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8835                               {VPSN->getChain(), VPSN->getValue(),
8836                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8837                                VPSN->getMask(), VPSN->getVectorLength()},
8838                               VPSN->getMemOperand(), NewIndexTy);
8839     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8840       return DAG.getMaskedGather(
8841           N->getVTList(), MGN->getMemoryVT(), DL,
8842           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8843            MGN->getBasePtr(), Index, MGN->getScale()},
8844           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8845     const auto *MSN = cast<MaskedScatterSDNode>(N);
8846     return DAG.getMaskedScatter(
8847         N->getVTList(), MSN->getMemoryVT(), DL,
8848         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8849          Index, MSN->getScale()},
8850         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8851   }
8852   case RISCVISD::SRA_VL:
8853   case RISCVISD::SRL_VL:
8854   case RISCVISD::SHL_VL: {
8855     SDValue ShAmt = N->getOperand(1);
8856     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8857       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8858       SDLoc DL(N);
8859       SDValue VL = N->getOperand(3);
8860       EVT VT = N->getValueType(0);
8861       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8862                           ShAmt.getOperand(1), VL);
8863       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8864                          N->getOperand(2), N->getOperand(3));
8865     }
8866     break;
8867   }
8868   case ISD::SRA:
8869   case ISD::SRL:
8870   case ISD::SHL: {
8871     SDValue ShAmt = N->getOperand(1);
8872     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8873       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8874       SDLoc DL(N);
8875       EVT VT = N->getValueType(0);
8876       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8877                           ShAmt.getOperand(1),
8878                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8879       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8880     }
8881     break;
8882   }
8883   case RISCVISD::ADD_VL:
8884     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8885       return V;
8886     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8887   case RISCVISD::SUB_VL:
8888     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8889   case RISCVISD::VWADD_W_VL:
8890   case RISCVISD::VWADDU_W_VL:
8891   case RISCVISD::VWSUB_W_VL:
8892   case RISCVISD::VWSUBU_W_VL:
8893     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8894   case RISCVISD::MUL_VL:
8895     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8896       return V;
8897     // Mul is commutative.
8898     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8899   case ISD::STORE: {
8900     auto *Store = cast<StoreSDNode>(N);
8901     SDValue Val = Store->getValue();
8902     // Combine store of vmv.x.s to vse with VL of 1.
8903     // FIXME: Support FP.
8904     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8905       SDValue Src = Val.getOperand(0);
8906       EVT VecVT = Src.getValueType();
8907       EVT MemVT = Store->getMemoryVT();
8908       // The memory VT and the element type must match.
8909       if (VecVT.getVectorElementType() == MemVT) {
8910         SDLoc DL(N);
8911         MVT MaskVT = getMaskTypeFor(VecVT);
8912         return DAG.getStoreVP(
8913             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8914             DAG.getConstant(1, DL, MaskVT),
8915             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8916             Store->getMemOperand(), Store->getAddressingMode(),
8917             Store->isTruncatingStore(), /*IsCompress*/ false);
8918       }
8919     }
8920 
8921     break;
8922   }
8923   case ISD::SPLAT_VECTOR: {
8924     EVT VT = N->getValueType(0);
8925     // Only perform this combine on legal MVT types.
8926     if (!isTypeLegal(VT))
8927       break;
8928     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8929                                          DAG, Subtarget))
8930       return Gather;
8931     break;
8932   }
8933   case RISCVISD::VMV_V_X_VL: {
8934     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8935     // scalar input.
8936     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8937     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8938     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8939       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8940         return SDValue(N, 0);
8941 
8942     break;
8943   }
8944   case ISD::INTRINSIC_WO_CHAIN: {
8945     unsigned IntNo = N->getConstantOperandVal(0);
8946     switch (IntNo) {
8947       // By default we do not combine any intrinsic.
8948     default:
8949       return SDValue();
8950     case Intrinsic::riscv_vcpop:
8951     case Intrinsic::riscv_vcpop_mask:
8952     case Intrinsic::riscv_vfirst:
8953     case Intrinsic::riscv_vfirst_mask: {
8954       SDValue VL = N->getOperand(2);
8955       if (IntNo == Intrinsic::riscv_vcpop_mask ||
8956           IntNo == Intrinsic::riscv_vfirst_mask)
8957         VL = N->getOperand(3);
8958       if (!isNullConstant(VL))
8959         return SDValue();
8960       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
8961       SDLoc DL(N);
8962       EVT VT = N->getValueType(0);
8963       if (IntNo == Intrinsic::riscv_vfirst ||
8964           IntNo == Intrinsic::riscv_vfirst_mask)
8965         return DAG.getConstant(-1, DL, VT);
8966       return DAG.getConstant(0, DL, VT);
8967     }
8968     }
8969   }
8970   }
8971 
8972   return SDValue();
8973 }
8974 
8975 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8976     const SDNode *N, CombineLevel Level) const {
8977   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8978   // materialised in fewer instructions than `(OP _, c1)`:
8979   //
8980   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8981   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8982   SDValue N0 = N->getOperand(0);
8983   EVT Ty = N0.getValueType();
8984   if (Ty.isScalarInteger() &&
8985       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8986     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8987     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8988     if (C1 && C2) {
8989       const APInt &C1Int = C1->getAPIntValue();
8990       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8991 
8992       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8993       // and the combine should happen, to potentially allow further combines
8994       // later.
8995       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8996           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8997         return true;
8998 
8999       // We can materialise `c1` in an add immediate, so it's "free", and the
9000       // combine should be prevented.
9001       if (C1Int.getMinSignedBits() <= 64 &&
9002           isLegalAddImmediate(C1Int.getSExtValue()))
9003         return false;
9004 
9005       // Neither constant will fit into an immediate, so find materialisation
9006       // costs.
9007       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
9008                                               Subtarget.getFeatureBits(),
9009                                               /*CompressionCost*/true);
9010       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
9011           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
9012           /*CompressionCost*/true);
9013 
9014       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
9015       // combine should be prevented.
9016       if (C1Cost < ShiftedC1Cost)
9017         return false;
9018     }
9019   }
9020   return true;
9021 }
9022 
9023 bool RISCVTargetLowering::targetShrinkDemandedConstant(
9024     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
9025     TargetLoweringOpt &TLO) const {
9026   // Delay this optimization as late as possible.
9027   if (!TLO.LegalOps)
9028     return false;
9029 
9030   EVT VT = Op.getValueType();
9031   if (VT.isVector())
9032     return false;
9033 
9034   // Only handle AND for now.
9035   if (Op.getOpcode() != ISD::AND)
9036     return false;
9037 
9038   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
9039   if (!C)
9040     return false;
9041 
9042   const APInt &Mask = C->getAPIntValue();
9043 
9044   // Clear all non-demanded bits initially.
9045   APInt ShrunkMask = Mask & DemandedBits;
9046 
9047   // Try to make a smaller immediate by setting undemanded bits.
9048 
9049   APInt ExpandedMask = Mask | ~DemandedBits;
9050 
9051   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
9052     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
9053   };
9054   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
9055     if (NewMask == Mask)
9056       return true;
9057     SDLoc DL(Op);
9058     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
9059     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
9060     return TLO.CombineTo(Op, NewOp);
9061   };
9062 
9063   // If the shrunk mask fits in sign extended 12 bits, let the target
9064   // independent code apply it.
9065   if (ShrunkMask.isSignedIntN(12))
9066     return false;
9067 
9068   // Preserve (and X, 0xffff) when zext.h is supported.
9069   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
9070     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
9071     if (IsLegalMask(NewMask))
9072       return UseMask(NewMask);
9073   }
9074 
9075   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
9076   if (VT == MVT::i64) {
9077     APInt NewMask = APInt(64, 0xffffffff);
9078     if (IsLegalMask(NewMask))
9079       return UseMask(NewMask);
9080   }
9081 
9082   // For the remaining optimizations, we need to be able to make a negative
9083   // number through a combination of mask and undemanded bits.
9084   if (!ExpandedMask.isNegative())
9085     return false;
9086 
9087   // What is the fewest number of bits we need to represent the negative number.
9088   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
9089 
9090   // Try to make a 12 bit negative immediate. If that fails try to make a 32
9091   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
9092   APInt NewMask = ShrunkMask;
9093   if (MinSignedBits <= 12)
9094     NewMask.setBitsFrom(11);
9095   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
9096     NewMask.setBitsFrom(31);
9097   else
9098     return false;
9099 
9100   // Check that our new mask is a subset of the demanded mask.
9101   assert(IsLegalMask(NewMask));
9102   return UseMask(NewMask);
9103 }
9104 
9105 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
9106   static const uint64_t GREVMasks[] = {
9107       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
9108       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
9109 
9110   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9111     unsigned Shift = 1 << Stage;
9112     if (ShAmt & Shift) {
9113       uint64_t Mask = GREVMasks[Stage];
9114       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9115       if (IsGORC)
9116         Res |= x;
9117       x = Res;
9118     }
9119   }
9120 
9121   return x;
9122 }
9123 
9124 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9125                                                         KnownBits &Known,
9126                                                         const APInt &DemandedElts,
9127                                                         const SelectionDAG &DAG,
9128                                                         unsigned Depth) const {
9129   unsigned BitWidth = Known.getBitWidth();
9130   unsigned Opc = Op.getOpcode();
9131   assert((Opc >= ISD::BUILTIN_OP_END ||
9132           Opc == ISD::INTRINSIC_WO_CHAIN ||
9133           Opc == ISD::INTRINSIC_W_CHAIN ||
9134           Opc == ISD::INTRINSIC_VOID) &&
9135          "Should use MaskedValueIsZero if you don't know whether Op"
9136          " is a target node!");
9137 
9138   Known.resetAll();
9139   switch (Opc) {
9140   default: break;
9141   case RISCVISD::SELECT_CC: {
9142     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9143     // If we don't know any bits, early out.
9144     if (Known.isUnknown())
9145       break;
9146     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9147 
9148     // Only known if known in both the LHS and RHS.
9149     Known = KnownBits::commonBits(Known, Known2);
9150     break;
9151   }
9152   case RISCVISD::REMUW: {
9153     KnownBits Known2;
9154     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9155     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9156     // We only care about the lower 32 bits.
9157     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9158     // Restore the original width by sign extending.
9159     Known = Known.sext(BitWidth);
9160     break;
9161   }
9162   case RISCVISD::DIVUW: {
9163     KnownBits Known2;
9164     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9165     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9166     // We only care about the lower 32 bits.
9167     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9168     // Restore the original width by sign extending.
9169     Known = Known.sext(BitWidth);
9170     break;
9171   }
9172   case RISCVISD::CTZW: {
9173     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9174     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9175     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9176     Known.Zero.setBitsFrom(LowBits);
9177     break;
9178   }
9179   case RISCVISD::CLZW: {
9180     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9181     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9182     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9183     Known.Zero.setBitsFrom(LowBits);
9184     break;
9185   }
9186   case RISCVISD::GREV:
9187   case RISCVISD::GORC: {
9188     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9189       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9190       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9191       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9192       // To compute zeros, we need to invert the value and invert it back after.
9193       Known.Zero =
9194           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9195       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9196     }
9197     break;
9198   }
9199   case RISCVISD::READ_VLENB: {
9200     // If we know the minimum VLen from Zvl extensions, we can use that to
9201     // determine the trailing zeros of VLENB.
9202     // FIXME: Limit to 128 bit vectors until we have more testing.
9203     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
9204     if (MinVLenB > 0)
9205       Known.Zero.setLowBits(Log2_32(MinVLenB));
9206     // We assume VLENB is no more than 65536 / 8 bytes.
9207     Known.Zero.setBitsFrom(14);
9208     break;
9209   }
9210   case ISD::INTRINSIC_W_CHAIN:
9211   case ISD::INTRINSIC_WO_CHAIN: {
9212     unsigned IntNo =
9213         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9214     switch (IntNo) {
9215     default:
9216       // We can't do anything for most intrinsics.
9217       break;
9218     case Intrinsic::riscv_vsetvli:
9219     case Intrinsic::riscv_vsetvlimax:
9220     case Intrinsic::riscv_vsetvli_opt:
9221     case Intrinsic::riscv_vsetvlimax_opt:
9222       // Assume that VL output is positive and would fit in an int32_t.
9223       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9224       if (BitWidth >= 32)
9225         Known.Zero.setBitsFrom(31);
9226       break;
9227     }
9228     break;
9229   }
9230   }
9231 }
9232 
9233 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9234     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9235     unsigned Depth) const {
9236   switch (Op.getOpcode()) {
9237   default:
9238     break;
9239   case RISCVISD::SELECT_CC: {
9240     unsigned Tmp =
9241         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9242     if (Tmp == 1) return 1;  // Early out.
9243     unsigned Tmp2 =
9244         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9245     return std::min(Tmp, Tmp2);
9246   }
9247   case RISCVISD::SLLW:
9248   case RISCVISD::SRAW:
9249   case RISCVISD::SRLW:
9250   case RISCVISD::DIVW:
9251   case RISCVISD::DIVUW:
9252   case RISCVISD::REMUW:
9253   case RISCVISD::ROLW:
9254   case RISCVISD::RORW:
9255   case RISCVISD::GREVW:
9256   case RISCVISD::GORCW:
9257   case RISCVISD::FSLW:
9258   case RISCVISD::FSRW:
9259   case RISCVISD::SHFLW:
9260   case RISCVISD::UNSHFLW:
9261   case RISCVISD::BCOMPRESSW:
9262   case RISCVISD::BDECOMPRESSW:
9263   case RISCVISD::BFPW:
9264   case RISCVISD::FCVT_W_RV64:
9265   case RISCVISD::FCVT_WU_RV64:
9266   case RISCVISD::STRICT_FCVT_W_RV64:
9267   case RISCVISD::STRICT_FCVT_WU_RV64:
9268     // TODO: As the result is sign-extended, this is conservatively correct. A
9269     // more precise answer could be calculated for SRAW depending on known
9270     // bits in the shift amount.
9271     return 33;
9272   case RISCVISD::SHFL:
9273   case RISCVISD::UNSHFL: {
9274     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9275     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9276     // will stay within the upper 32 bits. If there were more than 32 sign bits
9277     // before there will be at least 33 sign bits after.
9278     if (Op.getValueType() == MVT::i64 &&
9279         isa<ConstantSDNode>(Op.getOperand(1)) &&
9280         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9281       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9282       if (Tmp > 32)
9283         return 33;
9284     }
9285     break;
9286   }
9287   case RISCVISD::VMV_X_S: {
9288     // The number of sign bits of the scalar result is computed by obtaining the
9289     // element type of the input vector operand, subtracting its width from the
9290     // XLEN, and then adding one (sign bit within the element type). If the
9291     // element type is wider than XLen, the least-significant XLEN bits are
9292     // taken.
9293     unsigned XLen = Subtarget.getXLen();
9294     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9295     if (EltBits <= XLen)
9296       return XLen - EltBits + 1;
9297     break;
9298   }
9299   }
9300 
9301   return 1;
9302 }
9303 
9304 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9305                                                   MachineBasicBlock *BB) {
9306   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9307 
9308   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9309   // Should the count have wrapped while it was being read, we need to try
9310   // again.
9311   // ...
9312   // read:
9313   // rdcycleh x3 # load high word of cycle
9314   // rdcycle  x2 # load low word of cycle
9315   // rdcycleh x4 # load high word of cycle
9316   // bne x3, x4, read # check if high word reads match, otherwise try again
9317   // ...
9318 
9319   MachineFunction &MF = *BB->getParent();
9320   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9321   MachineFunction::iterator It = ++BB->getIterator();
9322 
9323   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9324   MF.insert(It, LoopMBB);
9325 
9326   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9327   MF.insert(It, DoneMBB);
9328 
9329   // Transfer the remainder of BB and its successor edges to DoneMBB.
9330   DoneMBB->splice(DoneMBB->begin(), BB,
9331                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9332   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9333 
9334   BB->addSuccessor(LoopMBB);
9335 
9336   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9337   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9338   Register LoReg = MI.getOperand(0).getReg();
9339   Register HiReg = MI.getOperand(1).getReg();
9340   DebugLoc DL = MI.getDebugLoc();
9341 
9342   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9343   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9344       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9345       .addReg(RISCV::X0);
9346   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9347       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9348       .addReg(RISCV::X0);
9349   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9350       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9351       .addReg(RISCV::X0);
9352 
9353   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9354       .addReg(HiReg)
9355       .addReg(ReadAgainReg)
9356       .addMBB(LoopMBB);
9357 
9358   LoopMBB->addSuccessor(LoopMBB);
9359   LoopMBB->addSuccessor(DoneMBB);
9360 
9361   MI.eraseFromParent();
9362 
9363   return DoneMBB;
9364 }
9365 
9366 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9367                                              MachineBasicBlock *BB) {
9368   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9369 
9370   MachineFunction &MF = *BB->getParent();
9371   DebugLoc DL = MI.getDebugLoc();
9372   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9373   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9374   Register LoReg = MI.getOperand(0).getReg();
9375   Register HiReg = MI.getOperand(1).getReg();
9376   Register SrcReg = MI.getOperand(2).getReg();
9377   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9378   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9379 
9380   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9381                           RI);
9382   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9383   MachineMemOperand *MMOLo =
9384       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9385   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9386       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9387   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9388       .addFrameIndex(FI)
9389       .addImm(0)
9390       .addMemOperand(MMOLo);
9391   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9392       .addFrameIndex(FI)
9393       .addImm(4)
9394       .addMemOperand(MMOHi);
9395   MI.eraseFromParent(); // The pseudo instruction is gone now.
9396   return BB;
9397 }
9398 
9399 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9400                                                  MachineBasicBlock *BB) {
9401   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9402          "Unexpected instruction");
9403 
9404   MachineFunction &MF = *BB->getParent();
9405   DebugLoc DL = MI.getDebugLoc();
9406   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9407   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9408   Register DstReg = MI.getOperand(0).getReg();
9409   Register LoReg = MI.getOperand(1).getReg();
9410   Register HiReg = MI.getOperand(2).getReg();
9411   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9412   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9413 
9414   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9415   MachineMemOperand *MMOLo =
9416       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9417   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9418       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9419   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9420       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9421       .addFrameIndex(FI)
9422       .addImm(0)
9423       .addMemOperand(MMOLo);
9424   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9425       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9426       .addFrameIndex(FI)
9427       .addImm(4)
9428       .addMemOperand(MMOHi);
9429   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9430   MI.eraseFromParent(); // The pseudo instruction is gone now.
9431   return BB;
9432 }
9433 
9434 static bool isSelectPseudo(MachineInstr &MI) {
9435   switch (MI.getOpcode()) {
9436   default:
9437     return false;
9438   case RISCV::Select_GPR_Using_CC_GPR:
9439   case RISCV::Select_FPR16_Using_CC_GPR:
9440   case RISCV::Select_FPR32_Using_CC_GPR:
9441   case RISCV::Select_FPR64_Using_CC_GPR:
9442     return true;
9443   }
9444 }
9445 
9446 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9447                                         unsigned RelOpcode, unsigned EqOpcode,
9448                                         const RISCVSubtarget &Subtarget) {
9449   DebugLoc DL = MI.getDebugLoc();
9450   Register DstReg = MI.getOperand(0).getReg();
9451   Register Src1Reg = MI.getOperand(1).getReg();
9452   Register Src2Reg = MI.getOperand(2).getReg();
9453   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9454   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9455   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9456 
9457   // Save the current FFLAGS.
9458   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9459 
9460   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9461                  .addReg(Src1Reg)
9462                  .addReg(Src2Reg);
9463   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9464     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9465 
9466   // Restore the FFLAGS.
9467   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9468       .addReg(SavedFFlags, RegState::Kill);
9469 
9470   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9471   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9472                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9473                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9474   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9475     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9476 
9477   // Erase the pseudoinstruction.
9478   MI.eraseFromParent();
9479   return BB;
9480 }
9481 
9482 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9483                                            MachineBasicBlock *BB,
9484                                            const RISCVSubtarget &Subtarget) {
9485   // To "insert" Select_* instructions, we actually have to insert the triangle
9486   // control-flow pattern.  The incoming instructions know the destination vreg
9487   // to set, the condition code register to branch on, the true/false values to
9488   // select between, and the condcode to use to select the appropriate branch.
9489   //
9490   // We produce the following control flow:
9491   //     HeadMBB
9492   //     |  \
9493   //     |  IfFalseMBB
9494   //     | /
9495   //    TailMBB
9496   //
9497   // When we find a sequence of selects we attempt to optimize their emission
9498   // by sharing the control flow. Currently we only handle cases where we have
9499   // multiple selects with the exact same condition (same LHS, RHS and CC).
9500   // The selects may be interleaved with other instructions if the other
9501   // instructions meet some requirements we deem safe:
9502   // - They are debug instructions. Otherwise,
9503   // - They do not have side-effects, do not access memory and their inputs do
9504   //   not depend on the results of the select pseudo-instructions.
9505   // The TrueV/FalseV operands of the selects cannot depend on the result of
9506   // previous selects in the sequence.
9507   // These conditions could be further relaxed. See the X86 target for a
9508   // related approach and more information.
9509   Register LHS = MI.getOperand(1).getReg();
9510   Register RHS = MI.getOperand(2).getReg();
9511   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9512 
9513   SmallVector<MachineInstr *, 4> SelectDebugValues;
9514   SmallSet<Register, 4> SelectDests;
9515   SelectDests.insert(MI.getOperand(0).getReg());
9516 
9517   MachineInstr *LastSelectPseudo = &MI;
9518 
9519   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9520        SequenceMBBI != E; ++SequenceMBBI) {
9521     if (SequenceMBBI->isDebugInstr())
9522       continue;
9523     if (isSelectPseudo(*SequenceMBBI)) {
9524       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9525           SequenceMBBI->getOperand(2).getReg() != RHS ||
9526           SequenceMBBI->getOperand(3).getImm() != CC ||
9527           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9528           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9529         break;
9530       LastSelectPseudo = &*SequenceMBBI;
9531       SequenceMBBI->collectDebugValues(SelectDebugValues);
9532       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9533     } else {
9534       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9535           SequenceMBBI->mayLoadOrStore())
9536         break;
9537       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9538             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9539           }))
9540         break;
9541     }
9542   }
9543 
9544   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9545   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9546   DebugLoc DL = MI.getDebugLoc();
9547   MachineFunction::iterator I = ++BB->getIterator();
9548 
9549   MachineBasicBlock *HeadMBB = BB;
9550   MachineFunction *F = BB->getParent();
9551   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9552   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9553 
9554   F->insert(I, IfFalseMBB);
9555   F->insert(I, TailMBB);
9556 
9557   // Transfer debug instructions associated with the selects to TailMBB.
9558   for (MachineInstr *DebugInstr : SelectDebugValues) {
9559     TailMBB->push_back(DebugInstr->removeFromParent());
9560   }
9561 
9562   // Move all instructions after the sequence to TailMBB.
9563   TailMBB->splice(TailMBB->end(), HeadMBB,
9564                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9565   // Update machine-CFG edges by transferring all successors of the current
9566   // block to the new block which will contain the Phi nodes for the selects.
9567   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9568   // Set the successors for HeadMBB.
9569   HeadMBB->addSuccessor(IfFalseMBB);
9570   HeadMBB->addSuccessor(TailMBB);
9571 
9572   // Insert appropriate branch.
9573   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9574     .addReg(LHS)
9575     .addReg(RHS)
9576     .addMBB(TailMBB);
9577 
9578   // IfFalseMBB just falls through to TailMBB.
9579   IfFalseMBB->addSuccessor(TailMBB);
9580 
9581   // Create PHIs for all of the select pseudo-instructions.
9582   auto SelectMBBI = MI.getIterator();
9583   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9584   auto InsertionPoint = TailMBB->begin();
9585   while (SelectMBBI != SelectEnd) {
9586     auto Next = std::next(SelectMBBI);
9587     if (isSelectPseudo(*SelectMBBI)) {
9588       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9589       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9590               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9591           .addReg(SelectMBBI->getOperand(4).getReg())
9592           .addMBB(HeadMBB)
9593           .addReg(SelectMBBI->getOperand(5).getReg())
9594           .addMBB(IfFalseMBB);
9595       SelectMBBI->eraseFromParent();
9596     }
9597     SelectMBBI = Next;
9598   }
9599 
9600   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9601   return TailMBB;
9602 }
9603 
9604 MachineBasicBlock *
9605 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9606                                                  MachineBasicBlock *BB) const {
9607   switch (MI.getOpcode()) {
9608   default:
9609     llvm_unreachable("Unexpected instr type to insert");
9610   case RISCV::ReadCycleWide:
9611     assert(!Subtarget.is64Bit() &&
9612            "ReadCycleWrite is only to be used on riscv32");
9613     return emitReadCycleWidePseudo(MI, BB);
9614   case RISCV::Select_GPR_Using_CC_GPR:
9615   case RISCV::Select_FPR16_Using_CC_GPR:
9616   case RISCV::Select_FPR32_Using_CC_GPR:
9617   case RISCV::Select_FPR64_Using_CC_GPR:
9618     return emitSelectPseudo(MI, BB, Subtarget);
9619   case RISCV::BuildPairF64Pseudo:
9620     return emitBuildPairF64Pseudo(MI, BB);
9621   case RISCV::SplitF64Pseudo:
9622     return emitSplitF64Pseudo(MI, BB);
9623   case RISCV::PseudoQuietFLE_H:
9624     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9625   case RISCV::PseudoQuietFLT_H:
9626     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9627   case RISCV::PseudoQuietFLE_S:
9628     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9629   case RISCV::PseudoQuietFLT_S:
9630     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9631   case RISCV::PseudoQuietFLE_D:
9632     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9633   case RISCV::PseudoQuietFLT_D:
9634     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9635   }
9636 }
9637 
9638 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9639                                                         SDNode *Node) const {
9640   // Add FRM dependency to any instructions with dynamic rounding mode.
9641   unsigned Opc = MI.getOpcode();
9642   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9643   if (Idx < 0)
9644     return;
9645   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9646     return;
9647   // If the instruction already reads FRM, don't add another read.
9648   if (MI.readsRegister(RISCV::FRM))
9649     return;
9650   MI.addOperand(
9651       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9652 }
9653 
9654 // Calling Convention Implementation.
9655 // The expectations for frontend ABI lowering vary from target to target.
9656 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9657 // details, but this is a longer term goal. For now, we simply try to keep the
9658 // role of the frontend as simple and well-defined as possible. The rules can
9659 // be summarised as:
9660 // * Never split up large scalar arguments. We handle them here.
9661 // * If a hardfloat calling convention is being used, and the struct may be
9662 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9663 // available, then pass as two separate arguments. If either the GPRs or FPRs
9664 // are exhausted, then pass according to the rule below.
9665 // * If a struct could never be passed in registers or directly in a stack
9666 // slot (as it is larger than 2*XLEN and the floating point rules don't
9667 // apply), then pass it using a pointer with the byval attribute.
9668 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9669 // word-sized array or a 2*XLEN scalar (depending on alignment).
9670 // * The frontend can determine whether a struct is returned by reference or
9671 // not based on its size and fields. If it will be returned by reference, the
9672 // frontend must modify the prototype so a pointer with the sret annotation is
9673 // passed as the first argument. This is not necessary for large scalar
9674 // returns.
9675 // * Struct return values and varargs should be coerced to structs containing
9676 // register-size fields in the same situations they would be for fixed
9677 // arguments.
9678 
9679 static const MCPhysReg ArgGPRs[] = {
9680   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9681   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9682 };
9683 static const MCPhysReg ArgFPR16s[] = {
9684   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9685   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9686 };
9687 static const MCPhysReg ArgFPR32s[] = {
9688   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9689   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9690 };
9691 static const MCPhysReg ArgFPR64s[] = {
9692   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9693   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9694 };
9695 // This is an interim calling convention and it may be changed in the future.
9696 static const MCPhysReg ArgVRs[] = {
9697     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9698     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9699     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9700 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9701                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9702                                      RISCV::V20M2, RISCV::V22M2};
9703 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9704                                      RISCV::V20M4};
9705 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9706 
9707 // Pass a 2*XLEN argument that has been split into two XLEN values through
9708 // registers or the stack as necessary.
9709 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9710                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9711                                 MVT ValVT2, MVT LocVT2,
9712                                 ISD::ArgFlagsTy ArgFlags2) {
9713   unsigned XLenInBytes = XLen / 8;
9714   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9715     // At least one half can be passed via register.
9716     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9717                                      VA1.getLocVT(), CCValAssign::Full));
9718   } else {
9719     // Both halves must be passed on the stack, with proper alignment.
9720     Align StackAlign =
9721         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9722     State.addLoc(
9723         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9724                             State.AllocateStack(XLenInBytes, StackAlign),
9725                             VA1.getLocVT(), CCValAssign::Full));
9726     State.addLoc(CCValAssign::getMem(
9727         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9728         LocVT2, CCValAssign::Full));
9729     return false;
9730   }
9731 
9732   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9733     // The second half can also be passed via register.
9734     State.addLoc(
9735         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9736   } else {
9737     // The second half is passed via the stack, without additional alignment.
9738     State.addLoc(CCValAssign::getMem(
9739         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9740         LocVT2, CCValAssign::Full));
9741   }
9742 
9743   return false;
9744 }
9745 
9746 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9747                                Optional<unsigned> FirstMaskArgument,
9748                                CCState &State, const RISCVTargetLowering &TLI) {
9749   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9750   if (RC == &RISCV::VRRegClass) {
9751     // Assign the first mask argument to V0.
9752     // This is an interim calling convention and it may be changed in the
9753     // future.
9754     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9755       return State.AllocateReg(RISCV::V0);
9756     return State.AllocateReg(ArgVRs);
9757   }
9758   if (RC == &RISCV::VRM2RegClass)
9759     return State.AllocateReg(ArgVRM2s);
9760   if (RC == &RISCV::VRM4RegClass)
9761     return State.AllocateReg(ArgVRM4s);
9762   if (RC == &RISCV::VRM8RegClass)
9763     return State.AllocateReg(ArgVRM8s);
9764   llvm_unreachable("Unhandled register class for ValueType");
9765 }
9766 
9767 // Implements the RISC-V calling convention. Returns true upon failure.
9768 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9769                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9770                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9771                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9772                      Optional<unsigned> FirstMaskArgument) {
9773   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9774   assert(XLen == 32 || XLen == 64);
9775   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9776 
9777   // Any return value split in to more than two values can't be returned
9778   // directly. Vectors are returned via the available vector registers.
9779   if (!LocVT.isVector() && IsRet && ValNo > 1)
9780     return true;
9781 
9782   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9783   // variadic argument, or if no F16/F32 argument registers are available.
9784   bool UseGPRForF16_F32 = true;
9785   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9786   // variadic argument, or if no F64 argument registers are available.
9787   bool UseGPRForF64 = true;
9788 
9789   switch (ABI) {
9790   default:
9791     llvm_unreachable("Unexpected ABI");
9792   case RISCVABI::ABI_ILP32:
9793   case RISCVABI::ABI_LP64:
9794     break;
9795   case RISCVABI::ABI_ILP32F:
9796   case RISCVABI::ABI_LP64F:
9797     UseGPRForF16_F32 = !IsFixed;
9798     break;
9799   case RISCVABI::ABI_ILP32D:
9800   case RISCVABI::ABI_LP64D:
9801     UseGPRForF16_F32 = !IsFixed;
9802     UseGPRForF64 = !IsFixed;
9803     break;
9804   }
9805 
9806   // FPR16, FPR32, and FPR64 alias each other.
9807   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9808     UseGPRForF16_F32 = true;
9809     UseGPRForF64 = true;
9810   }
9811 
9812   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9813   // similar local variables rather than directly checking against the target
9814   // ABI.
9815 
9816   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9817     LocVT = XLenVT;
9818     LocInfo = CCValAssign::BCvt;
9819   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9820     LocVT = MVT::i64;
9821     LocInfo = CCValAssign::BCvt;
9822   }
9823 
9824   // If this is a variadic argument, the RISC-V calling convention requires
9825   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9826   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9827   // be used regardless of whether the original argument was split during
9828   // legalisation or not. The argument will not be passed by registers if the
9829   // original type is larger than 2*XLEN, so the register alignment rule does
9830   // not apply.
9831   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9832   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9833       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9834     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9835     // Skip 'odd' register if necessary.
9836     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9837       State.AllocateReg(ArgGPRs);
9838   }
9839 
9840   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9841   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9842       State.getPendingArgFlags();
9843 
9844   assert(PendingLocs.size() == PendingArgFlags.size() &&
9845          "PendingLocs and PendingArgFlags out of sync");
9846 
9847   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9848   // registers are exhausted.
9849   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9850     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9851            "Can't lower f64 if it is split");
9852     // Depending on available argument GPRS, f64 may be passed in a pair of
9853     // GPRs, split between a GPR and the stack, or passed completely on the
9854     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9855     // cases.
9856     Register Reg = State.AllocateReg(ArgGPRs);
9857     LocVT = MVT::i32;
9858     if (!Reg) {
9859       unsigned StackOffset = State.AllocateStack(8, Align(8));
9860       State.addLoc(
9861           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9862       return false;
9863     }
9864     if (!State.AllocateReg(ArgGPRs))
9865       State.AllocateStack(4, Align(4));
9866     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9867     return false;
9868   }
9869 
9870   // Fixed-length vectors are located in the corresponding scalable-vector
9871   // container types.
9872   if (ValVT.isFixedLengthVector())
9873     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9874 
9875   // Split arguments might be passed indirectly, so keep track of the pending
9876   // values. Split vectors are passed via a mix of registers and indirectly, so
9877   // treat them as we would any other argument.
9878   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9879     LocVT = XLenVT;
9880     LocInfo = CCValAssign::Indirect;
9881     PendingLocs.push_back(
9882         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9883     PendingArgFlags.push_back(ArgFlags);
9884     if (!ArgFlags.isSplitEnd()) {
9885       return false;
9886     }
9887   }
9888 
9889   // If the split argument only had two elements, it should be passed directly
9890   // in registers or on the stack.
9891   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9892       PendingLocs.size() <= 2) {
9893     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9894     // Apply the normal calling convention rules to the first half of the
9895     // split argument.
9896     CCValAssign VA = PendingLocs[0];
9897     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9898     PendingLocs.clear();
9899     PendingArgFlags.clear();
9900     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9901                                ArgFlags);
9902   }
9903 
9904   // Allocate to a register if possible, or else a stack slot.
9905   Register Reg;
9906   unsigned StoreSizeBytes = XLen / 8;
9907   Align StackAlign = Align(XLen / 8);
9908 
9909   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9910     Reg = State.AllocateReg(ArgFPR16s);
9911   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9912     Reg = State.AllocateReg(ArgFPR32s);
9913   else if (ValVT == MVT::f64 && !UseGPRForF64)
9914     Reg = State.AllocateReg(ArgFPR64s);
9915   else if (ValVT.isVector()) {
9916     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9917     if (!Reg) {
9918       // For return values, the vector must be passed fully via registers or
9919       // via the stack.
9920       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9921       // but we're using all of them.
9922       if (IsRet)
9923         return true;
9924       // Try using a GPR to pass the address
9925       if ((Reg = State.AllocateReg(ArgGPRs))) {
9926         LocVT = XLenVT;
9927         LocInfo = CCValAssign::Indirect;
9928       } else if (ValVT.isScalableVector()) {
9929         LocVT = XLenVT;
9930         LocInfo = CCValAssign::Indirect;
9931       } else {
9932         // Pass fixed-length vectors on the stack.
9933         LocVT = ValVT;
9934         StoreSizeBytes = ValVT.getStoreSize();
9935         // Align vectors to their element sizes, being careful for vXi1
9936         // vectors.
9937         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9938       }
9939     }
9940   } else {
9941     Reg = State.AllocateReg(ArgGPRs);
9942   }
9943 
9944   unsigned StackOffset =
9945       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9946 
9947   // If we reach this point and PendingLocs is non-empty, we must be at the
9948   // end of a split argument that must be passed indirectly.
9949   if (!PendingLocs.empty()) {
9950     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9951     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9952 
9953     for (auto &It : PendingLocs) {
9954       if (Reg)
9955         It.convertToReg(Reg);
9956       else
9957         It.convertToMem(StackOffset);
9958       State.addLoc(It);
9959     }
9960     PendingLocs.clear();
9961     PendingArgFlags.clear();
9962     return false;
9963   }
9964 
9965   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9966           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9967          "Expected an XLenVT or vector types at this stage");
9968 
9969   if (Reg) {
9970     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9971     return false;
9972   }
9973 
9974   // When a floating-point value is passed on the stack, no bit-conversion is
9975   // needed.
9976   if (ValVT.isFloatingPoint()) {
9977     LocVT = ValVT;
9978     LocInfo = CCValAssign::Full;
9979   }
9980   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9981   return false;
9982 }
9983 
9984 template <typename ArgTy>
9985 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9986   for (const auto &ArgIdx : enumerate(Args)) {
9987     MVT ArgVT = ArgIdx.value().VT;
9988     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9989       return ArgIdx.index();
9990   }
9991   return None;
9992 }
9993 
9994 void RISCVTargetLowering::analyzeInputArgs(
9995     MachineFunction &MF, CCState &CCInfo,
9996     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9997     RISCVCCAssignFn Fn) const {
9998   unsigned NumArgs = Ins.size();
9999   FunctionType *FType = MF.getFunction().getFunctionType();
10000 
10001   Optional<unsigned> FirstMaskArgument;
10002   if (Subtarget.hasVInstructions())
10003     FirstMaskArgument = preAssignMask(Ins);
10004 
10005   for (unsigned i = 0; i != NumArgs; ++i) {
10006     MVT ArgVT = Ins[i].VT;
10007     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
10008 
10009     Type *ArgTy = nullptr;
10010     if (IsRet)
10011       ArgTy = FType->getReturnType();
10012     else if (Ins[i].isOrigArg())
10013       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
10014 
10015     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10016     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10017            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
10018            FirstMaskArgument)) {
10019       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
10020                         << EVT(ArgVT).getEVTString() << '\n');
10021       llvm_unreachable(nullptr);
10022     }
10023   }
10024 }
10025 
10026 void RISCVTargetLowering::analyzeOutputArgs(
10027     MachineFunction &MF, CCState &CCInfo,
10028     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
10029     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
10030   unsigned NumArgs = Outs.size();
10031 
10032   Optional<unsigned> FirstMaskArgument;
10033   if (Subtarget.hasVInstructions())
10034     FirstMaskArgument = preAssignMask(Outs);
10035 
10036   for (unsigned i = 0; i != NumArgs; i++) {
10037     MVT ArgVT = Outs[i].VT;
10038     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10039     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
10040 
10041     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10042     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10043            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
10044            FirstMaskArgument)) {
10045       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
10046                         << EVT(ArgVT).getEVTString() << "\n");
10047       llvm_unreachable(nullptr);
10048     }
10049   }
10050 }
10051 
10052 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
10053 // values.
10054 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
10055                                    const CCValAssign &VA, const SDLoc &DL,
10056                                    const RISCVSubtarget &Subtarget) {
10057   switch (VA.getLocInfo()) {
10058   default:
10059     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10060   case CCValAssign::Full:
10061     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
10062       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
10063     break;
10064   case CCValAssign::BCvt:
10065     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10066       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
10067     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10068       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
10069     else
10070       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
10071     break;
10072   }
10073   return Val;
10074 }
10075 
10076 // The caller is responsible for loading the full value if the argument is
10077 // passed with CCValAssign::Indirect.
10078 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
10079                                 const CCValAssign &VA, const SDLoc &DL,
10080                                 const RISCVTargetLowering &TLI) {
10081   MachineFunction &MF = DAG.getMachineFunction();
10082   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10083   EVT LocVT = VA.getLocVT();
10084   SDValue Val;
10085   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
10086   Register VReg = RegInfo.createVirtualRegister(RC);
10087   RegInfo.addLiveIn(VA.getLocReg(), VReg);
10088   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
10089 
10090   if (VA.getLocInfo() == CCValAssign::Indirect)
10091     return Val;
10092 
10093   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
10094 }
10095 
10096 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
10097                                    const CCValAssign &VA, const SDLoc &DL,
10098                                    const RISCVSubtarget &Subtarget) {
10099   EVT LocVT = VA.getLocVT();
10100 
10101   switch (VA.getLocInfo()) {
10102   default:
10103     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10104   case CCValAssign::Full:
10105     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
10106       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
10107     break;
10108   case CCValAssign::BCvt:
10109     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10110       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10111     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10112       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10113     else
10114       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10115     break;
10116   }
10117   return Val;
10118 }
10119 
10120 // The caller is responsible for loading the full value if the argument is
10121 // passed with CCValAssign::Indirect.
10122 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10123                                 const CCValAssign &VA, const SDLoc &DL) {
10124   MachineFunction &MF = DAG.getMachineFunction();
10125   MachineFrameInfo &MFI = MF.getFrameInfo();
10126   EVT LocVT = VA.getLocVT();
10127   EVT ValVT = VA.getValVT();
10128   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10129   if (ValVT.isScalableVector()) {
10130     // When the value is a scalable vector, we save the pointer which points to
10131     // the scalable vector value in the stack. The ValVT will be the pointer
10132     // type, instead of the scalable vector type.
10133     ValVT = LocVT;
10134   }
10135   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10136                                  /*IsImmutable=*/true);
10137   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10138   SDValue Val;
10139 
10140   ISD::LoadExtType ExtType;
10141   switch (VA.getLocInfo()) {
10142   default:
10143     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10144   case CCValAssign::Full:
10145   case CCValAssign::Indirect:
10146   case CCValAssign::BCvt:
10147     ExtType = ISD::NON_EXTLOAD;
10148     break;
10149   }
10150   Val = DAG.getExtLoad(
10151       ExtType, DL, LocVT, Chain, FIN,
10152       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10153   return Val;
10154 }
10155 
10156 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10157                                        const CCValAssign &VA, const SDLoc &DL) {
10158   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10159          "Unexpected VA");
10160   MachineFunction &MF = DAG.getMachineFunction();
10161   MachineFrameInfo &MFI = MF.getFrameInfo();
10162   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10163 
10164   if (VA.isMemLoc()) {
10165     // f64 is passed on the stack.
10166     int FI =
10167         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10168     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10169     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10170                        MachinePointerInfo::getFixedStack(MF, FI));
10171   }
10172 
10173   assert(VA.isRegLoc() && "Expected register VA assignment");
10174 
10175   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10176   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10177   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10178   SDValue Hi;
10179   if (VA.getLocReg() == RISCV::X17) {
10180     // Second half of f64 is passed on the stack.
10181     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10182     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10183     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10184                      MachinePointerInfo::getFixedStack(MF, FI));
10185   } else {
10186     // Second half of f64 is passed in another GPR.
10187     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10188     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10189     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10190   }
10191   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10192 }
10193 
10194 // FastCC has less than 1% performance improvement for some particular
10195 // benchmark. But theoretically, it may has benenfit for some cases.
10196 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10197                             unsigned ValNo, MVT ValVT, MVT LocVT,
10198                             CCValAssign::LocInfo LocInfo,
10199                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10200                             bool IsFixed, bool IsRet, Type *OrigTy,
10201                             const RISCVTargetLowering &TLI,
10202                             Optional<unsigned> FirstMaskArgument) {
10203 
10204   // X5 and X6 might be used for save-restore libcall.
10205   static const MCPhysReg GPRList[] = {
10206       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10207       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10208       RISCV::X29, RISCV::X30, RISCV::X31};
10209 
10210   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10211     if (unsigned Reg = State.AllocateReg(GPRList)) {
10212       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10213       return false;
10214     }
10215   }
10216 
10217   if (LocVT == MVT::f16) {
10218     static const MCPhysReg FPR16List[] = {
10219         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10220         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10221         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10222         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10223     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10224       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10225       return false;
10226     }
10227   }
10228 
10229   if (LocVT == MVT::f32) {
10230     static const MCPhysReg FPR32List[] = {
10231         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10232         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10233         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10234         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10235     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10236       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10237       return false;
10238     }
10239   }
10240 
10241   if (LocVT == MVT::f64) {
10242     static const MCPhysReg FPR64List[] = {
10243         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10244         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10245         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10246         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10247     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10248       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10249       return false;
10250     }
10251   }
10252 
10253   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10254     unsigned Offset4 = State.AllocateStack(4, Align(4));
10255     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10256     return false;
10257   }
10258 
10259   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10260     unsigned Offset5 = State.AllocateStack(8, Align(8));
10261     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10262     return false;
10263   }
10264 
10265   if (LocVT.isVector()) {
10266     if (unsigned Reg =
10267             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10268       // Fixed-length vectors are located in the corresponding scalable-vector
10269       // container types.
10270       if (ValVT.isFixedLengthVector())
10271         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10272       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10273     } else {
10274       // Try and pass the address via a "fast" GPR.
10275       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10276         LocInfo = CCValAssign::Indirect;
10277         LocVT = TLI.getSubtarget().getXLenVT();
10278         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10279       } else if (ValVT.isFixedLengthVector()) {
10280         auto StackAlign =
10281             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10282         unsigned StackOffset =
10283             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10284         State.addLoc(
10285             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10286       } else {
10287         // Can't pass scalable vectors on the stack.
10288         return true;
10289       }
10290     }
10291 
10292     return false;
10293   }
10294 
10295   return true; // CC didn't match.
10296 }
10297 
10298 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10299                          CCValAssign::LocInfo LocInfo,
10300                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10301 
10302   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10303     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10304     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10305     static const MCPhysReg GPRList[] = {
10306         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10307         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10308     if (unsigned Reg = State.AllocateReg(GPRList)) {
10309       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10310       return false;
10311     }
10312   }
10313 
10314   if (LocVT == MVT::f32) {
10315     // Pass in STG registers: F1, ..., F6
10316     //                        fs0 ... fs5
10317     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10318                                           RISCV::F18_F, RISCV::F19_F,
10319                                           RISCV::F20_F, RISCV::F21_F};
10320     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10321       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10322       return false;
10323     }
10324   }
10325 
10326   if (LocVT == MVT::f64) {
10327     // Pass in STG registers: D1, ..., D6
10328     //                        fs6 ... fs11
10329     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10330                                           RISCV::F24_D, RISCV::F25_D,
10331                                           RISCV::F26_D, RISCV::F27_D};
10332     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10333       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10334       return false;
10335     }
10336   }
10337 
10338   report_fatal_error("No registers left in GHC calling convention");
10339   return true;
10340 }
10341 
10342 // Transform physical registers into virtual registers.
10343 SDValue RISCVTargetLowering::LowerFormalArguments(
10344     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10345     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10346     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10347 
10348   MachineFunction &MF = DAG.getMachineFunction();
10349 
10350   switch (CallConv) {
10351   default:
10352     report_fatal_error("Unsupported calling convention");
10353   case CallingConv::C:
10354   case CallingConv::Fast:
10355     break;
10356   case CallingConv::GHC:
10357     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10358         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10359       report_fatal_error(
10360         "GHC calling convention requires the F and D instruction set extensions");
10361   }
10362 
10363   const Function &Func = MF.getFunction();
10364   if (Func.hasFnAttribute("interrupt")) {
10365     if (!Func.arg_empty())
10366       report_fatal_error(
10367         "Functions with the interrupt attribute cannot have arguments!");
10368 
10369     StringRef Kind =
10370       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10371 
10372     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10373       report_fatal_error(
10374         "Function interrupt attribute argument not supported!");
10375   }
10376 
10377   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10378   MVT XLenVT = Subtarget.getXLenVT();
10379   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10380   // Used with vargs to acumulate store chains.
10381   std::vector<SDValue> OutChains;
10382 
10383   // Assign locations to all of the incoming arguments.
10384   SmallVector<CCValAssign, 16> ArgLocs;
10385   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10386 
10387   if (CallConv == CallingConv::GHC)
10388     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10389   else
10390     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10391                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10392                                                    : CC_RISCV);
10393 
10394   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10395     CCValAssign &VA = ArgLocs[i];
10396     SDValue ArgValue;
10397     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10398     // case.
10399     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10400       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10401     else if (VA.isRegLoc())
10402       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10403     else
10404       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10405 
10406     if (VA.getLocInfo() == CCValAssign::Indirect) {
10407       // If the original argument was split and passed by reference (e.g. i128
10408       // on RV32), we need to load all parts of it here (using the same
10409       // address). Vectors may be partly split to registers and partly to the
10410       // stack, in which case the base address is partly offset and subsequent
10411       // stores are relative to that.
10412       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10413                                    MachinePointerInfo()));
10414       unsigned ArgIndex = Ins[i].OrigArgIndex;
10415       unsigned ArgPartOffset = Ins[i].PartOffset;
10416       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10417       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10418         CCValAssign &PartVA = ArgLocs[i + 1];
10419         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10420         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10421         if (PartVA.getValVT().isScalableVector())
10422           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10423         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10424         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10425                                      MachinePointerInfo()));
10426         ++i;
10427       }
10428       continue;
10429     }
10430     InVals.push_back(ArgValue);
10431   }
10432 
10433   if (IsVarArg) {
10434     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10435     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10436     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10437     MachineFrameInfo &MFI = MF.getFrameInfo();
10438     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10439     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10440 
10441     // Offset of the first variable argument from stack pointer, and size of
10442     // the vararg save area. For now, the varargs save area is either zero or
10443     // large enough to hold a0-a7.
10444     int VaArgOffset, VarArgsSaveSize;
10445 
10446     // If all registers are allocated, then all varargs must be passed on the
10447     // stack and we don't need to save any argregs.
10448     if (ArgRegs.size() == Idx) {
10449       VaArgOffset = CCInfo.getNextStackOffset();
10450       VarArgsSaveSize = 0;
10451     } else {
10452       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10453       VaArgOffset = -VarArgsSaveSize;
10454     }
10455 
10456     // Record the frame index of the first variable argument
10457     // which is a value necessary to VASTART.
10458     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10459     RVFI->setVarArgsFrameIndex(FI);
10460 
10461     // If saving an odd number of registers then create an extra stack slot to
10462     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10463     // offsets to even-numbered registered remain 2*XLEN-aligned.
10464     if (Idx % 2) {
10465       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10466       VarArgsSaveSize += XLenInBytes;
10467     }
10468 
10469     // Copy the integer registers that may have been used for passing varargs
10470     // to the vararg save area.
10471     for (unsigned I = Idx; I < ArgRegs.size();
10472          ++I, VaArgOffset += XLenInBytes) {
10473       const Register Reg = RegInfo.createVirtualRegister(RC);
10474       RegInfo.addLiveIn(ArgRegs[I], Reg);
10475       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10476       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10477       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10478       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10479                                    MachinePointerInfo::getFixedStack(MF, FI));
10480       cast<StoreSDNode>(Store.getNode())
10481           ->getMemOperand()
10482           ->setValue((Value *)nullptr);
10483       OutChains.push_back(Store);
10484     }
10485     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10486   }
10487 
10488   // All stores are grouped in one node to allow the matching between
10489   // the size of Ins and InVals. This only happens for vararg functions.
10490   if (!OutChains.empty()) {
10491     OutChains.push_back(Chain);
10492     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10493   }
10494 
10495   return Chain;
10496 }
10497 
10498 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10499 /// for tail call optimization.
10500 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10501 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10502     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10503     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10504 
10505   auto &Callee = CLI.Callee;
10506   auto CalleeCC = CLI.CallConv;
10507   auto &Outs = CLI.Outs;
10508   auto &Caller = MF.getFunction();
10509   auto CallerCC = Caller.getCallingConv();
10510 
10511   // Exception-handling functions need a special set of instructions to
10512   // indicate a return to the hardware. Tail-calling another function would
10513   // probably break this.
10514   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10515   // should be expanded as new function attributes are introduced.
10516   if (Caller.hasFnAttribute("interrupt"))
10517     return false;
10518 
10519   // Do not tail call opt if the stack is used to pass parameters.
10520   if (CCInfo.getNextStackOffset() != 0)
10521     return false;
10522 
10523   // Do not tail call opt if any parameters need to be passed indirectly.
10524   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10525   // passed indirectly. So the address of the value will be passed in a
10526   // register, or if not available, then the address is put on the stack. In
10527   // order to pass indirectly, space on the stack often needs to be allocated
10528   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10529   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10530   // are passed CCValAssign::Indirect.
10531   for (auto &VA : ArgLocs)
10532     if (VA.getLocInfo() == CCValAssign::Indirect)
10533       return false;
10534 
10535   // Do not tail call opt if either caller or callee uses struct return
10536   // semantics.
10537   auto IsCallerStructRet = Caller.hasStructRetAttr();
10538   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10539   if (IsCallerStructRet || IsCalleeStructRet)
10540     return false;
10541 
10542   // Externally-defined functions with weak linkage should not be
10543   // tail-called. The behaviour of branch instructions in this situation (as
10544   // used for tail calls) is implementation-defined, so we cannot rely on the
10545   // linker replacing the tail call with a return.
10546   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10547     const GlobalValue *GV = G->getGlobal();
10548     if (GV->hasExternalWeakLinkage())
10549       return false;
10550   }
10551 
10552   // The callee has to preserve all registers the caller needs to preserve.
10553   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10554   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10555   if (CalleeCC != CallerCC) {
10556     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10557     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10558       return false;
10559   }
10560 
10561   // Byval parameters hand the function a pointer directly into the stack area
10562   // we want to reuse during a tail call. Working around this *is* possible
10563   // but less efficient and uglier in LowerCall.
10564   for (auto &Arg : Outs)
10565     if (Arg.Flags.isByVal())
10566       return false;
10567 
10568   return true;
10569 }
10570 
10571 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10572   return DAG.getDataLayout().getPrefTypeAlign(
10573       VT.getTypeForEVT(*DAG.getContext()));
10574 }
10575 
10576 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10577 // and output parameter nodes.
10578 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10579                                        SmallVectorImpl<SDValue> &InVals) const {
10580   SelectionDAG &DAG = CLI.DAG;
10581   SDLoc &DL = CLI.DL;
10582   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10583   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10584   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10585   SDValue Chain = CLI.Chain;
10586   SDValue Callee = CLI.Callee;
10587   bool &IsTailCall = CLI.IsTailCall;
10588   CallingConv::ID CallConv = CLI.CallConv;
10589   bool IsVarArg = CLI.IsVarArg;
10590   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10591   MVT XLenVT = Subtarget.getXLenVT();
10592 
10593   MachineFunction &MF = DAG.getMachineFunction();
10594 
10595   // Analyze the operands of the call, assigning locations to each operand.
10596   SmallVector<CCValAssign, 16> ArgLocs;
10597   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10598 
10599   if (CallConv == CallingConv::GHC)
10600     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10601   else
10602     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10603                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10604                                                     : CC_RISCV);
10605 
10606   // Check if it's really possible to do a tail call.
10607   if (IsTailCall)
10608     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10609 
10610   if (IsTailCall)
10611     ++NumTailCalls;
10612   else if (CLI.CB && CLI.CB->isMustTailCall())
10613     report_fatal_error("failed to perform tail call elimination on a call "
10614                        "site marked musttail");
10615 
10616   // Get a count of how many bytes are to be pushed on the stack.
10617   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10618 
10619   // Create local copies for byval args
10620   SmallVector<SDValue, 8> ByValArgs;
10621   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10622     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10623     if (!Flags.isByVal())
10624       continue;
10625 
10626     SDValue Arg = OutVals[i];
10627     unsigned Size = Flags.getByValSize();
10628     Align Alignment = Flags.getNonZeroByValAlign();
10629 
10630     int FI =
10631         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10632     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10633     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10634 
10635     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10636                           /*IsVolatile=*/false,
10637                           /*AlwaysInline=*/false, IsTailCall,
10638                           MachinePointerInfo(), MachinePointerInfo());
10639     ByValArgs.push_back(FIPtr);
10640   }
10641 
10642   if (!IsTailCall)
10643     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10644 
10645   // Copy argument values to their designated locations.
10646   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10647   SmallVector<SDValue, 8> MemOpChains;
10648   SDValue StackPtr;
10649   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10650     CCValAssign &VA = ArgLocs[i];
10651     SDValue ArgValue = OutVals[i];
10652     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10653 
10654     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10655     bool IsF64OnRV32DSoftABI =
10656         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10657     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10658       SDValue SplitF64 = DAG.getNode(
10659           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10660       SDValue Lo = SplitF64.getValue(0);
10661       SDValue Hi = SplitF64.getValue(1);
10662 
10663       Register RegLo = VA.getLocReg();
10664       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10665 
10666       if (RegLo == RISCV::X17) {
10667         // Second half of f64 is passed on the stack.
10668         // Work out the address of the stack slot.
10669         if (!StackPtr.getNode())
10670           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10671         // Emit the store.
10672         MemOpChains.push_back(
10673             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10674       } else {
10675         // Second half of f64 is passed in another GPR.
10676         assert(RegLo < RISCV::X31 && "Invalid register pair");
10677         Register RegHigh = RegLo + 1;
10678         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10679       }
10680       continue;
10681     }
10682 
10683     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10684     // as any other MemLoc.
10685 
10686     // Promote the value if needed.
10687     // For now, only handle fully promoted and indirect arguments.
10688     if (VA.getLocInfo() == CCValAssign::Indirect) {
10689       // Store the argument in a stack slot and pass its address.
10690       Align StackAlign =
10691           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10692                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10693       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10694       // If the original argument was split (e.g. i128), we need
10695       // to store the required parts of it here (and pass just one address).
10696       // Vectors may be partly split to registers and partly to the stack, in
10697       // which case the base address is partly offset and subsequent stores are
10698       // relative to that.
10699       unsigned ArgIndex = Outs[i].OrigArgIndex;
10700       unsigned ArgPartOffset = Outs[i].PartOffset;
10701       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10702       // Calculate the total size to store. We don't have access to what we're
10703       // actually storing other than performing the loop and collecting the
10704       // info.
10705       SmallVector<std::pair<SDValue, SDValue>> Parts;
10706       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10707         SDValue PartValue = OutVals[i + 1];
10708         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10709         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10710         EVT PartVT = PartValue.getValueType();
10711         if (PartVT.isScalableVector())
10712           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10713         StoredSize += PartVT.getStoreSize();
10714         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10715         Parts.push_back(std::make_pair(PartValue, Offset));
10716         ++i;
10717       }
10718       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10719       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10720       MemOpChains.push_back(
10721           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10722                        MachinePointerInfo::getFixedStack(MF, FI)));
10723       for (const auto &Part : Parts) {
10724         SDValue PartValue = Part.first;
10725         SDValue PartOffset = Part.second;
10726         SDValue Address =
10727             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10728         MemOpChains.push_back(
10729             DAG.getStore(Chain, DL, PartValue, Address,
10730                          MachinePointerInfo::getFixedStack(MF, FI)));
10731       }
10732       ArgValue = SpillSlot;
10733     } else {
10734       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10735     }
10736 
10737     // Use local copy if it is a byval arg.
10738     if (Flags.isByVal())
10739       ArgValue = ByValArgs[j++];
10740 
10741     if (VA.isRegLoc()) {
10742       // Queue up the argument copies and emit them at the end.
10743       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10744     } else {
10745       assert(VA.isMemLoc() && "Argument not register or memory");
10746       assert(!IsTailCall && "Tail call not allowed if stack is used "
10747                             "for passing parameters");
10748 
10749       // Work out the address of the stack slot.
10750       if (!StackPtr.getNode())
10751         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10752       SDValue Address =
10753           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10754                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10755 
10756       // Emit the store.
10757       MemOpChains.push_back(
10758           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10759     }
10760   }
10761 
10762   // Join the stores, which are independent of one another.
10763   if (!MemOpChains.empty())
10764     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10765 
10766   SDValue Glue;
10767 
10768   // Build a sequence of copy-to-reg nodes, chained and glued together.
10769   for (auto &Reg : RegsToPass) {
10770     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10771     Glue = Chain.getValue(1);
10772   }
10773 
10774   // Validate that none of the argument registers have been marked as
10775   // reserved, if so report an error. Do the same for the return address if this
10776   // is not a tailcall.
10777   validateCCReservedRegs(RegsToPass, MF);
10778   if (!IsTailCall &&
10779       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10780     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10781         MF.getFunction(),
10782         "Return address register required, but has been reserved."});
10783 
10784   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10785   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10786   // split it and then direct call can be matched by PseudoCALL.
10787   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10788     const GlobalValue *GV = S->getGlobal();
10789 
10790     unsigned OpFlags = RISCVII::MO_CALL;
10791     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10792       OpFlags = RISCVII::MO_PLT;
10793 
10794     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10795   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10796     unsigned OpFlags = RISCVII::MO_CALL;
10797 
10798     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10799                                                  nullptr))
10800       OpFlags = RISCVII::MO_PLT;
10801 
10802     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10803   }
10804 
10805   // The first call operand is the chain and the second is the target address.
10806   SmallVector<SDValue, 8> Ops;
10807   Ops.push_back(Chain);
10808   Ops.push_back(Callee);
10809 
10810   // Add argument registers to the end of the list so that they are
10811   // known live into the call.
10812   for (auto &Reg : RegsToPass)
10813     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10814 
10815   if (!IsTailCall) {
10816     // Add a register mask operand representing the call-preserved registers.
10817     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10818     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10819     assert(Mask && "Missing call preserved mask for calling convention");
10820     Ops.push_back(DAG.getRegisterMask(Mask));
10821   }
10822 
10823   // Glue the call to the argument copies, if any.
10824   if (Glue.getNode())
10825     Ops.push_back(Glue);
10826 
10827   // Emit the call.
10828   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10829 
10830   if (IsTailCall) {
10831     MF.getFrameInfo().setHasTailCall();
10832     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10833   }
10834 
10835   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10836   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10837   Glue = Chain.getValue(1);
10838 
10839   // Mark the end of the call, which is glued to the call itself.
10840   Chain = DAG.getCALLSEQ_END(Chain,
10841                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10842                              DAG.getConstant(0, DL, PtrVT, true),
10843                              Glue, DL);
10844   Glue = Chain.getValue(1);
10845 
10846   // Assign locations to each value returned by this call.
10847   SmallVector<CCValAssign, 16> RVLocs;
10848   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10849   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10850 
10851   // Copy all of the result registers out of their specified physreg.
10852   for (auto &VA : RVLocs) {
10853     // Copy the value out
10854     SDValue RetValue =
10855         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10856     // Glue the RetValue to the end of the call sequence
10857     Chain = RetValue.getValue(1);
10858     Glue = RetValue.getValue(2);
10859 
10860     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10861       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10862       SDValue RetValue2 =
10863           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10864       Chain = RetValue2.getValue(1);
10865       Glue = RetValue2.getValue(2);
10866       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10867                              RetValue2);
10868     }
10869 
10870     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10871 
10872     InVals.push_back(RetValue);
10873   }
10874 
10875   return Chain;
10876 }
10877 
10878 bool RISCVTargetLowering::CanLowerReturn(
10879     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10880     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10881   SmallVector<CCValAssign, 16> RVLocs;
10882   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10883 
10884   Optional<unsigned> FirstMaskArgument;
10885   if (Subtarget.hasVInstructions())
10886     FirstMaskArgument = preAssignMask(Outs);
10887 
10888   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10889     MVT VT = Outs[i].VT;
10890     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10891     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10892     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10893                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10894                  *this, FirstMaskArgument))
10895       return false;
10896   }
10897   return true;
10898 }
10899 
10900 SDValue
10901 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10902                                  bool IsVarArg,
10903                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10904                                  const SmallVectorImpl<SDValue> &OutVals,
10905                                  const SDLoc &DL, SelectionDAG &DAG) const {
10906   const MachineFunction &MF = DAG.getMachineFunction();
10907   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10908 
10909   // Stores the assignment of the return value to a location.
10910   SmallVector<CCValAssign, 16> RVLocs;
10911 
10912   // Info about the registers and stack slot.
10913   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10914                  *DAG.getContext());
10915 
10916   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10917                     nullptr, CC_RISCV);
10918 
10919   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10920     report_fatal_error("GHC functions return void only");
10921 
10922   SDValue Glue;
10923   SmallVector<SDValue, 4> RetOps(1, Chain);
10924 
10925   // Copy the result values into the output registers.
10926   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10927     SDValue Val = OutVals[i];
10928     CCValAssign &VA = RVLocs[i];
10929     assert(VA.isRegLoc() && "Can only return in registers!");
10930 
10931     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10932       // Handle returning f64 on RV32D with a soft float ABI.
10933       assert(VA.isRegLoc() && "Expected return via registers");
10934       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10935                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10936       SDValue Lo = SplitF64.getValue(0);
10937       SDValue Hi = SplitF64.getValue(1);
10938       Register RegLo = VA.getLocReg();
10939       assert(RegLo < RISCV::X31 && "Invalid register pair");
10940       Register RegHi = RegLo + 1;
10941 
10942       if (STI.isRegisterReservedByUser(RegLo) ||
10943           STI.isRegisterReservedByUser(RegHi))
10944         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10945             MF.getFunction(),
10946             "Return value register required, but has been reserved."});
10947 
10948       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10949       Glue = Chain.getValue(1);
10950       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10951       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10952       Glue = Chain.getValue(1);
10953       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10954     } else {
10955       // Handle a 'normal' return.
10956       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10957       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10958 
10959       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10960         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10961             MF.getFunction(),
10962             "Return value register required, but has been reserved."});
10963 
10964       // Guarantee that all emitted copies are stuck together.
10965       Glue = Chain.getValue(1);
10966       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10967     }
10968   }
10969 
10970   RetOps[0] = Chain; // Update chain.
10971 
10972   // Add the glue node if we have it.
10973   if (Glue.getNode()) {
10974     RetOps.push_back(Glue);
10975   }
10976 
10977   unsigned RetOpc = RISCVISD::RET_FLAG;
10978   // Interrupt service routines use different return instructions.
10979   const Function &Func = DAG.getMachineFunction().getFunction();
10980   if (Func.hasFnAttribute("interrupt")) {
10981     if (!Func.getReturnType()->isVoidTy())
10982       report_fatal_error(
10983           "Functions with the interrupt attribute must have void return type!");
10984 
10985     MachineFunction &MF = DAG.getMachineFunction();
10986     StringRef Kind =
10987       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10988 
10989     if (Kind == "user")
10990       RetOpc = RISCVISD::URET_FLAG;
10991     else if (Kind == "supervisor")
10992       RetOpc = RISCVISD::SRET_FLAG;
10993     else
10994       RetOpc = RISCVISD::MRET_FLAG;
10995   }
10996 
10997   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10998 }
10999 
11000 void RISCVTargetLowering::validateCCReservedRegs(
11001     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
11002     MachineFunction &MF) const {
11003   const Function &F = MF.getFunction();
11004   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11005 
11006   if (llvm::any_of(Regs, [&STI](auto Reg) {
11007         return STI.isRegisterReservedByUser(Reg.first);
11008       }))
11009     F.getContext().diagnose(DiagnosticInfoUnsupported{
11010         F, "Argument register required, but has been reserved."});
11011 }
11012 
11013 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
11014   return CI->isTailCall();
11015 }
11016 
11017 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
11018 #define NODE_NAME_CASE(NODE)                                                   \
11019   case RISCVISD::NODE:                                                         \
11020     return "RISCVISD::" #NODE;
11021   // clang-format off
11022   switch ((RISCVISD::NodeType)Opcode) {
11023   case RISCVISD::FIRST_NUMBER:
11024     break;
11025   NODE_NAME_CASE(RET_FLAG)
11026   NODE_NAME_CASE(URET_FLAG)
11027   NODE_NAME_CASE(SRET_FLAG)
11028   NODE_NAME_CASE(MRET_FLAG)
11029   NODE_NAME_CASE(CALL)
11030   NODE_NAME_CASE(SELECT_CC)
11031   NODE_NAME_CASE(BR_CC)
11032   NODE_NAME_CASE(BuildPairF64)
11033   NODE_NAME_CASE(SplitF64)
11034   NODE_NAME_CASE(TAIL)
11035   NODE_NAME_CASE(MULHSU)
11036   NODE_NAME_CASE(SLLW)
11037   NODE_NAME_CASE(SRAW)
11038   NODE_NAME_CASE(SRLW)
11039   NODE_NAME_CASE(DIVW)
11040   NODE_NAME_CASE(DIVUW)
11041   NODE_NAME_CASE(REMUW)
11042   NODE_NAME_CASE(ROLW)
11043   NODE_NAME_CASE(RORW)
11044   NODE_NAME_CASE(CLZW)
11045   NODE_NAME_CASE(CTZW)
11046   NODE_NAME_CASE(FSLW)
11047   NODE_NAME_CASE(FSRW)
11048   NODE_NAME_CASE(FSL)
11049   NODE_NAME_CASE(FSR)
11050   NODE_NAME_CASE(FMV_H_X)
11051   NODE_NAME_CASE(FMV_X_ANYEXTH)
11052   NODE_NAME_CASE(FMV_X_SIGNEXTH)
11053   NODE_NAME_CASE(FMV_W_X_RV64)
11054   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
11055   NODE_NAME_CASE(FCVT_X)
11056   NODE_NAME_CASE(FCVT_XU)
11057   NODE_NAME_CASE(FCVT_W_RV64)
11058   NODE_NAME_CASE(FCVT_WU_RV64)
11059   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
11060   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
11061   NODE_NAME_CASE(READ_CYCLE_WIDE)
11062   NODE_NAME_CASE(GREV)
11063   NODE_NAME_CASE(GREVW)
11064   NODE_NAME_CASE(GORC)
11065   NODE_NAME_CASE(GORCW)
11066   NODE_NAME_CASE(SHFL)
11067   NODE_NAME_CASE(SHFLW)
11068   NODE_NAME_CASE(UNSHFL)
11069   NODE_NAME_CASE(UNSHFLW)
11070   NODE_NAME_CASE(BFP)
11071   NODE_NAME_CASE(BFPW)
11072   NODE_NAME_CASE(BCOMPRESS)
11073   NODE_NAME_CASE(BCOMPRESSW)
11074   NODE_NAME_CASE(BDECOMPRESS)
11075   NODE_NAME_CASE(BDECOMPRESSW)
11076   NODE_NAME_CASE(VMV_V_X_VL)
11077   NODE_NAME_CASE(VFMV_V_F_VL)
11078   NODE_NAME_CASE(VMV_X_S)
11079   NODE_NAME_CASE(VMV_S_X_VL)
11080   NODE_NAME_CASE(VFMV_S_F_VL)
11081   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
11082   NODE_NAME_CASE(READ_VLENB)
11083   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
11084   NODE_NAME_CASE(VSLIDEUP_VL)
11085   NODE_NAME_CASE(VSLIDE1UP_VL)
11086   NODE_NAME_CASE(VSLIDEDOWN_VL)
11087   NODE_NAME_CASE(VSLIDE1DOWN_VL)
11088   NODE_NAME_CASE(VID_VL)
11089   NODE_NAME_CASE(VFNCVT_ROD_VL)
11090   NODE_NAME_CASE(VECREDUCE_ADD_VL)
11091   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
11092   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
11093   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
11094   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
11095   NODE_NAME_CASE(VECREDUCE_AND_VL)
11096   NODE_NAME_CASE(VECREDUCE_OR_VL)
11097   NODE_NAME_CASE(VECREDUCE_XOR_VL)
11098   NODE_NAME_CASE(VECREDUCE_FADD_VL)
11099   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
11100   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
11101   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
11102   NODE_NAME_CASE(ADD_VL)
11103   NODE_NAME_CASE(AND_VL)
11104   NODE_NAME_CASE(MUL_VL)
11105   NODE_NAME_CASE(OR_VL)
11106   NODE_NAME_CASE(SDIV_VL)
11107   NODE_NAME_CASE(SHL_VL)
11108   NODE_NAME_CASE(SREM_VL)
11109   NODE_NAME_CASE(SRA_VL)
11110   NODE_NAME_CASE(SRL_VL)
11111   NODE_NAME_CASE(SUB_VL)
11112   NODE_NAME_CASE(UDIV_VL)
11113   NODE_NAME_CASE(UREM_VL)
11114   NODE_NAME_CASE(XOR_VL)
11115   NODE_NAME_CASE(SADDSAT_VL)
11116   NODE_NAME_CASE(UADDSAT_VL)
11117   NODE_NAME_CASE(SSUBSAT_VL)
11118   NODE_NAME_CASE(USUBSAT_VL)
11119   NODE_NAME_CASE(FADD_VL)
11120   NODE_NAME_CASE(FSUB_VL)
11121   NODE_NAME_CASE(FMUL_VL)
11122   NODE_NAME_CASE(FDIV_VL)
11123   NODE_NAME_CASE(FNEG_VL)
11124   NODE_NAME_CASE(FABS_VL)
11125   NODE_NAME_CASE(FSQRT_VL)
11126   NODE_NAME_CASE(FMA_VL)
11127   NODE_NAME_CASE(FCOPYSIGN_VL)
11128   NODE_NAME_CASE(SMIN_VL)
11129   NODE_NAME_CASE(SMAX_VL)
11130   NODE_NAME_CASE(UMIN_VL)
11131   NODE_NAME_CASE(UMAX_VL)
11132   NODE_NAME_CASE(FMINNUM_VL)
11133   NODE_NAME_CASE(FMAXNUM_VL)
11134   NODE_NAME_CASE(MULHS_VL)
11135   NODE_NAME_CASE(MULHU_VL)
11136   NODE_NAME_CASE(FP_TO_SINT_VL)
11137   NODE_NAME_CASE(FP_TO_UINT_VL)
11138   NODE_NAME_CASE(SINT_TO_FP_VL)
11139   NODE_NAME_CASE(UINT_TO_FP_VL)
11140   NODE_NAME_CASE(FP_EXTEND_VL)
11141   NODE_NAME_CASE(FP_ROUND_VL)
11142   NODE_NAME_CASE(VWMUL_VL)
11143   NODE_NAME_CASE(VWMULU_VL)
11144   NODE_NAME_CASE(VWMULSU_VL)
11145   NODE_NAME_CASE(VWADD_VL)
11146   NODE_NAME_CASE(VWADDU_VL)
11147   NODE_NAME_CASE(VWSUB_VL)
11148   NODE_NAME_CASE(VWSUBU_VL)
11149   NODE_NAME_CASE(VWADD_W_VL)
11150   NODE_NAME_CASE(VWADDU_W_VL)
11151   NODE_NAME_CASE(VWSUB_W_VL)
11152   NODE_NAME_CASE(VWSUBU_W_VL)
11153   NODE_NAME_CASE(SETCC_VL)
11154   NODE_NAME_CASE(VSELECT_VL)
11155   NODE_NAME_CASE(VP_MERGE_VL)
11156   NODE_NAME_CASE(VMAND_VL)
11157   NODE_NAME_CASE(VMOR_VL)
11158   NODE_NAME_CASE(VMXOR_VL)
11159   NODE_NAME_CASE(VMCLR_VL)
11160   NODE_NAME_CASE(VMSET_VL)
11161   NODE_NAME_CASE(VRGATHER_VX_VL)
11162   NODE_NAME_CASE(VRGATHER_VV_VL)
11163   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11164   NODE_NAME_CASE(VSEXT_VL)
11165   NODE_NAME_CASE(VZEXT_VL)
11166   NODE_NAME_CASE(VCPOP_VL)
11167   NODE_NAME_CASE(READ_CSR)
11168   NODE_NAME_CASE(WRITE_CSR)
11169   NODE_NAME_CASE(SWAP_CSR)
11170   }
11171   // clang-format on
11172   return nullptr;
11173 #undef NODE_NAME_CASE
11174 }
11175 
11176 /// getConstraintType - Given a constraint letter, return the type of
11177 /// constraint it is for this target.
11178 RISCVTargetLowering::ConstraintType
11179 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11180   if (Constraint.size() == 1) {
11181     switch (Constraint[0]) {
11182     default:
11183       break;
11184     case 'f':
11185       return C_RegisterClass;
11186     case 'I':
11187     case 'J':
11188     case 'K':
11189       return C_Immediate;
11190     case 'A':
11191       return C_Memory;
11192     case 'S': // A symbolic address
11193       return C_Other;
11194     }
11195   } else {
11196     if (Constraint == "vr" || Constraint == "vm")
11197       return C_RegisterClass;
11198   }
11199   return TargetLowering::getConstraintType(Constraint);
11200 }
11201 
11202 std::pair<unsigned, const TargetRegisterClass *>
11203 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11204                                                   StringRef Constraint,
11205                                                   MVT VT) const {
11206   // First, see if this is a constraint that directly corresponds to a
11207   // RISCV register class.
11208   if (Constraint.size() == 1) {
11209     switch (Constraint[0]) {
11210     case 'r':
11211       // TODO: Support fixed vectors up to XLen for P extension?
11212       if (VT.isVector())
11213         break;
11214       return std::make_pair(0U, &RISCV::GPRRegClass);
11215     case 'f':
11216       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11217         return std::make_pair(0U, &RISCV::FPR16RegClass);
11218       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11219         return std::make_pair(0U, &RISCV::FPR32RegClass);
11220       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11221         return std::make_pair(0U, &RISCV::FPR64RegClass);
11222       break;
11223     default:
11224       break;
11225     }
11226   } else if (Constraint == "vr") {
11227     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11228                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11229       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11230         return std::make_pair(0U, RC);
11231     }
11232   } else if (Constraint == "vm") {
11233     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11234       return std::make_pair(0U, &RISCV::VMV0RegClass);
11235   }
11236 
11237   // Clang will correctly decode the usage of register name aliases into their
11238   // official names. However, other frontends like `rustc` do not. This allows
11239   // users of these frontends to use the ABI names for registers in LLVM-style
11240   // register constraints.
11241   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11242                                .Case("{zero}", RISCV::X0)
11243                                .Case("{ra}", RISCV::X1)
11244                                .Case("{sp}", RISCV::X2)
11245                                .Case("{gp}", RISCV::X3)
11246                                .Case("{tp}", RISCV::X4)
11247                                .Case("{t0}", RISCV::X5)
11248                                .Case("{t1}", RISCV::X6)
11249                                .Case("{t2}", RISCV::X7)
11250                                .Cases("{s0}", "{fp}", RISCV::X8)
11251                                .Case("{s1}", RISCV::X9)
11252                                .Case("{a0}", RISCV::X10)
11253                                .Case("{a1}", RISCV::X11)
11254                                .Case("{a2}", RISCV::X12)
11255                                .Case("{a3}", RISCV::X13)
11256                                .Case("{a4}", RISCV::X14)
11257                                .Case("{a5}", RISCV::X15)
11258                                .Case("{a6}", RISCV::X16)
11259                                .Case("{a7}", RISCV::X17)
11260                                .Case("{s2}", RISCV::X18)
11261                                .Case("{s3}", RISCV::X19)
11262                                .Case("{s4}", RISCV::X20)
11263                                .Case("{s5}", RISCV::X21)
11264                                .Case("{s6}", RISCV::X22)
11265                                .Case("{s7}", RISCV::X23)
11266                                .Case("{s8}", RISCV::X24)
11267                                .Case("{s9}", RISCV::X25)
11268                                .Case("{s10}", RISCV::X26)
11269                                .Case("{s11}", RISCV::X27)
11270                                .Case("{t3}", RISCV::X28)
11271                                .Case("{t4}", RISCV::X29)
11272                                .Case("{t5}", RISCV::X30)
11273                                .Case("{t6}", RISCV::X31)
11274                                .Default(RISCV::NoRegister);
11275   if (XRegFromAlias != RISCV::NoRegister)
11276     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11277 
11278   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11279   // TableGen record rather than the AsmName to choose registers for InlineAsm
11280   // constraints, plus we want to match those names to the widest floating point
11281   // register type available, manually select floating point registers here.
11282   //
11283   // The second case is the ABI name of the register, so that frontends can also
11284   // use the ABI names in register constraint lists.
11285   if (Subtarget.hasStdExtF()) {
11286     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11287                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11288                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11289                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11290                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11291                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11292                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11293                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11294                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11295                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11296                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11297                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11298                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11299                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11300                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11301                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11302                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11303                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11304                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11305                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11306                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11307                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11308                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11309                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11310                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11311                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11312                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11313                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11314                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11315                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11316                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11317                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11318                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11319                         .Default(RISCV::NoRegister);
11320     if (FReg != RISCV::NoRegister) {
11321       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11322       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11323         unsigned RegNo = FReg - RISCV::F0_F;
11324         unsigned DReg = RISCV::F0_D + RegNo;
11325         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11326       }
11327       if (VT == MVT::f32 || VT == MVT::Other)
11328         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11329       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11330         unsigned RegNo = FReg - RISCV::F0_F;
11331         unsigned HReg = RISCV::F0_H + RegNo;
11332         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11333       }
11334     }
11335   }
11336 
11337   if (Subtarget.hasVInstructions()) {
11338     Register VReg = StringSwitch<Register>(Constraint.lower())
11339                         .Case("{v0}", RISCV::V0)
11340                         .Case("{v1}", RISCV::V1)
11341                         .Case("{v2}", RISCV::V2)
11342                         .Case("{v3}", RISCV::V3)
11343                         .Case("{v4}", RISCV::V4)
11344                         .Case("{v5}", RISCV::V5)
11345                         .Case("{v6}", RISCV::V6)
11346                         .Case("{v7}", RISCV::V7)
11347                         .Case("{v8}", RISCV::V8)
11348                         .Case("{v9}", RISCV::V9)
11349                         .Case("{v10}", RISCV::V10)
11350                         .Case("{v11}", RISCV::V11)
11351                         .Case("{v12}", RISCV::V12)
11352                         .Case("{v13}", RISCV::V13)
11353                         .Case("{v14}", RISCV::V14)
11354                         .Case("{v15}", RISCV::V15)
11355                         .Case("{v16}", RISCV::V16)
11356                         .Case("{v17}", RISCV::V17)
11357                         .Case("{v18}", RISCV::V18)
11358                         .Case("{v19}", RISCV::V19)
11359                         .Case("{v20}", RISCV::V20)
11360                         .Case("{v21}", RISCV::V21)
11361                         .Case("{v22}", RISCV::V22)
11362                         .Case("{v23}", RISCV::V23)
11363                         .Case("{v24}", RISCV::V24)
11364                         .Case("{v25}", RISCV::V25)
11365                         .Case("{v26}", RISCV::V26)
11366                         .Case("{v27}", RISCV::V27)
11367                         .Case("{v28}", RISCV::V28)
11368                         .Case("{v29}", RISCV::V29)
11369                         .Case("{v30}", RISCV::V30)
11370                         .Case("{v31}", RISCV::V31)
11371                         .Default(RISCV::NoRegister);
11372     if (VReg != RISCV::NoRegister) {
11373       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11374         return std::make_pair(VReg, &RISCV::VMRegClass);
11375       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11376         return std::make_pair(VReg, &RISCV::VRRegClass);
11377       for (const auto *RC :
11378            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11379         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11380           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11381           return std::make_pair(VReg, RC);
11382         }
11383       }
11384     }
11385   }
11386 
11387   std::pair<Register, const TargetRegisterClass *> Res =
11388       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11389 
11390   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11391   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11392   // Subtarget into account.
11393   if (Res.second == &RISCV::GPRF16RegClass ||
11394       Res.second == &RISCV::GPRF32RegClass ||
11395       Res.second == &RISCV::GPRF64RegClass)
11396     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11397 
11398   return Res;
11399 }
11400 
11401 unsigned
11402 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11403   // Currently only support length 1 constraints.
11404   if (ConstraintCode.size() == 1) {
11405     switch (ConstraintCode[0]) {
11406     case 'A':
11407       return InlineAsm::Constraint_A;
11408     default:
11409       break;
11410     }
11411   }
11412 
11413   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11414 }
11415 
11416 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11417     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11418     SelectionDAG &DAG) const {
11419   // Currently only support length 1 constraints.
11420   if (Constraint.length() == 1) {
11421     switch (Constraint[0]) {
11422     case 'I':
11423       // Validate & create a 12-bit signed immediate operand.
11424       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11425         uint64_t CVal = C->getSExtValue();
11426         if (isInt<12>(CVal))
11427           Ops.push_back(
11428               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11429       }
11430       return;
11431     case 'J':
11432       // Validate & create an integer zero operand.
11433       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11434         if (C->getZExtValue() == 0)
11435           Ops.push_back(
11436               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11437       return;
11438     case 'K':
11439       // Validate & create a 5-bit unsigned immediate operand.
11440       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11441         uint64_t CVal = C->getZExtValue();
11442         if (isUInt<5>(CVal))
11443           Ops.push_back(
11444               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11445       }
11446       return;
11447     case 'S':
11448       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11449         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11450                                                  GA->getValueType(0)));
11451       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11452         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11453                                                 BA->getValueType(0)));
11454       }
11455       return;
11456     default:
11457       break;
11458     }
11459   }
11460   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11461 }
11462 
11463 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11464                                                    Instruction *Inst,
11465                                                    AtomicOrdering Ord) const {
11466   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11467     return Builder.CreateFence(Ord);
11468   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11469     return Builder.CreateFence(AtomicOrdering::Release);
11470   return nullptr;
11471 }
11472 
11473 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11474                                                     Instruction *Inst,
11475                                                     AtomicOrdering Ord) const {
11476   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11477     return Builder.CreateFence(AtomicOrdering::Acquire);
11478   return nullptr;
11479 }
11480 
11481 TargetLowering::AtomicExpansionKind
11482 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11483   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11484   // point operations can't be used in an lr/sc sequence without breaking the
11485   // forward-progress guarantee.
11486   if (AI->isFloatingPointOperation())
11487     return AtomicExpansionKind::CmpXChg;
11488 
11489   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11490   if (Size == 8 || Size == 16)
11491     return AtomicExpansionKind::MaskedIntrinsic;
11492   return AtomicExpansionKind::None;
11493 }
11494 
11495 static Intrinsic::ID
11496 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11497   if (XLen == 32) {
11498     switch (BinOp) {
11499     default:
11500       llvm_unreachable("Unexpected AtomicRMW BinOp");
11501     case AtomicRMWInst::Xchg:
11502       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11503     case AtomicRMWInst::Add:
11504       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11505     case AtomicRMWInst::Sub:
11506       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11507     case AtomicRMWInst::Nand:
11508       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11509     case AtomicRMWInst::Max:
11510       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11511     case AtomicRMWInst::Min:
11512       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11513     case AtomicRMWInst::UMax:
11514       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11515     case AtomicRMWInst::UMin:
11516       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11517     }
11518   }
11519 
11520   if (XLen == 64) {
11521     switch (BinOp) {
11522     default:
11523       llvm_unreachable("Unexpected AtomicRMW BinOp");
11524     case AtomicRMWInst::Xchg:
11525       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11526     case AtomicRMWInst::Add:
11527       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11528     case AtomicRMWInst::Sub:
11529       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11530     case AtomicRMWInst::Nand:
11531       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11532     case AtomicRMWInst::Max:
11533       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11534     case AtomicRMWInst::Min:
11535       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11536     case AtomicRMWInst::UMax:
11537       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11538     case AtomicRMWInst::UMin:
11539       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11540     }
11541   }
11542 
11543   llvm_unreachable("Unexpected XLen\n");
11544 }
11545 
11546 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11547     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11548     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11549   unsigned XLen = Subtarget.getXLen();
11550   Value *Ordering =
11551       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11552   Type *Tys[] = {AlignedAddr->getType()};
11553   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11554       AI->getModule(),
11555       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11556 
11557   if (XLen == 64) {
11558     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11559     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11560     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11561   }
11562 
11563   Value *Result;
11564 
11565   // Must pass the shift amount needed to sign extend the loaded value prior
11566   // to performing a signed comparison for min/max. ShiftAmt is the number of
11567   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11568   // is the number of bits to left+right shift the value in order to
11569   // sign-extend.
11570   if (AI->getOperation() == AtomicRMWInst::Min ||
11571       AI->getOperation() == AtomicRMWInst::Max) {
11572     const DataLayout &DL = AI->getModule()->getDataLayout();
11573     unsigned ValWidth =
11574         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11575     Value *SextShamt =
11576         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11577     Result = Builder.CreateCall(LrwOpScwLoop,
11578                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11579   } else {
11580     Result =
11581         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11582   }
11583 
11584   if (XLen == 64)
11585     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11586   return Result;
11587 }
11588 
11589 TargetLowering::AtomicExpansionKind
11590 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11591     AtomicCmpXchgInst *CI) const {
11592   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11593   if (Size == 8 || Size == 16)
11594     return AtomicExpansionKind::MaskedIntrinsic;
11595   return AtomicExpansionKind::None;
11596 }
11597 
11598 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11599     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11600     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11601   unsigned XLen = Subtarget.getXLen();
11602   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11603   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11604   if (XLen == 64) {
11605     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11606     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11607     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11608     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11609   }
11610   Type *Tys[] = {AlignedAddr->getType()};
11611   Function *MaskedCmpXchg =
11612       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11613   Value *Result = Builder.CreateCall(
11614       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11615   if (XLen == 64)
11616     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11617   return Result;
11618 }
11619 
11620 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11621   return false;
11622 }
11623 
11624 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11625                                                EVT VT) const {
11626   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11627     return false;
11628 
11629   switch (FPVT.getSimpleVT().SimpleTy) {
11630   case MVT::f16:
11631     return Subtarget.hasStdExtZfh();
11632   case MVT::f32:
11633     return Subtarget.hasStdExtF();
11634   case MVT::f64:
11635     return Subtarget.hasStdExtD();
11636   default:
11637     return false;
11638   }
11639 }
11640 
11641 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11642   // If we are using the small code model, we can reduce size of jump table
11643   // entry to 4 bytes.
11644   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11645       getTargetMachine().getCodeModel() == CodeModel::Small) {
11646     return MachineJumpTableInfo::EK_Custom32;
11647   }
11648   return TargetLowering::getJumpTableEncoding();
11649 }
11650 
11651 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11652     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11653     unsigned uid, MCContext &Ctx) const {
11654   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11655          getTargetMachine().getCodeModel() == CodeModel::Small);
11656   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11657 }
11658 
11659 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11660                                                      EVT VT) const {
11661   VT = VT.getScalarType();
11662 
11663   if (!VT.isSimple())
11664     return false;
11665 
11666   switch (VT.getSimpleVT().SimpleTy) {
11667   case MVT::f16:
11668     return Subtarget.hasStdExtZfh();
11669   case MVT::f32:
11670     return Subtarget.hasStdExtF();
11671   case MVT::f64:
11672     return Subtarget.hasStdExtD();
11673   default:
11674     break;
11675   }
11676 
11677   return false;
11678 }
11679 
11680 Register RISCVTargetLowering::getExceptionPointerRegister(
11681     const Constant *PersonalityFn) const {
11682   return RISCV::X10;
11683 }
11684 
11685 Register RISCVTargetLowering::getExceptionSelectorRegister(
11686     const Constant *PersonalityFn) const {
11687   return RISCV::X11;
11688 }
11689 
11690 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11691   // Return false to suppress the unnecessary extensions if the LibCall
11692   // arguments or return value is f32 type for LP64 ABI.
11693   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11694   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11695     return false;
11696 
11697   return true;
11698 }
11699 
11700 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11701   if (Subtarget.is64Bit() && Type == MVT::i32)
11702     return true;
11703 
11704   return IsSigned;
11705 }
11706 
11707 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11708                                                  SDValue C) const {
11709   // Check integral scalar types.
11710   if (VT.isScalarInteger()) {
11711     // Omit the optimization if the sub target has the M extension and the data
11712     // size exceeds XLen.
11713     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11714       return false;
11715     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11716       // Break the MUL to a SLLI and an ADD/SUB.
11717       const APInt &Imm = ConstNode->getAPIntValue();
11718       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11719           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11720         return true;
11721       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11722       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11723           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11724            (Imm - 8).isPowerOf2()))
11725         return true;
11726       // Omit the following optimization if the sub target has the M extension
11727       // and the data size >= XLen.
11728       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11729         return false;
11730       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11731       // a pair of LUI/ADDI.
11732       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11733         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11734         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11735             (1 - ImmS).isPowerOf2())
11736         return true;
11737       }
11738     }
11739   }
11740 
11741   return false;
11742 }
11743 
11744 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11745                                                       SDValue ConstNode) const {
11746   // Let the DAGCombiner decide for vectors.
11747   EVT VT = AddNode.getValueType();
11748   if (VT.isVector())
11749     return true;
11750 
11751   // Let the DAGCombiner decide for larger types.
11752   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11753     return true;
11754 
11755   // It is worse if c1 is simm12 while c1*c2 is not.
11756   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11757   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11758   const APInt &C1 = C1Node->getAPIntValue();
11759   const APInt &C2 = C2Node->getAPIntValue();
11760   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11761     return false;
11762 
11763   // Default to true and let the DAGCombiner decide.
11764   return true;
11765 }
11766 
11767 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11768     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11769     bool *Fast) const {
11770   if (!VT.isVector())
11771     return false;
11772 
11773   EVT ElemVT = VT.getVectorElementType();
11774   if (Alignment >= ElemVT.getStoreSize()) {
11775     if (Fast)
11776       *Fast = true;
11777     return true;
11778   }
11779 
11780   return false;
11781 }
11782 
11783 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11784     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11785     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11786   bool IsABIRegCopy = CC.hasValue();
11787   EVT ValueVT = Val.getValueType();
11788   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11789     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11790     // and cast to f32.
11791     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11792     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11793     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11794                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11795     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11796     Parts[0] = Val;
11797     return true;
11798   }
11799 
11800   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11801     LLVMContext &Context = *DAG.getContext();
11802     EVT ValueEltVT = ValueVT.getVectorElementType();
11803     EVT PartEltVT = PartVT.getVectorElementType();
11804     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11805     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11806     if (PartVTBitSize % ValueVTBitSize == 0) {
11807       assert(PartVTBitSize >= ValueVTBitSize);
11808       // If the element types are different, bitcast to the same element type of
11809       // PartVT first.
11810       // Give an example here, we want copy a <vscale x 1 x i8> value to
11811       // <vscale x 4 x i16>.
11812       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11813       // subvector, then we can bitcast to <vscale x 4 x i16>.
11814       if (ValueEltVT != PartEltVT) {
11815         if (PartVTBitSize > ValueVTBitSize) {
11816           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11817           assert(Count != 0 && "The number of element should not be zero.");
11818           EVT SameEltTypeVT =
11819               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11820           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11821                             DAG.getUNDEF(SameEltTypeVT), Val,
11822                             DAG.getVectorIdxConstant(0, DL));
11823         }
11824         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11825       } else {
11826         Val =
11827             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11828                         Val, DAG.getVectorIdxConstant(0, DL));
11829       }
11830       Parts[0] = Val;
11831       return true;
11832     }
11833   }
11834   return false;
11835 }
11836 
11837 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11838     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11839     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11840   bool IsABIRegCopy = CC.hasValue();
11841   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11842     SDValue Val = Parts[0];
11843 
11844     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11845     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11846     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11847     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11848     return Val;
11849   }
11850 
11851   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11852     LLVMContext &Context = *DAG.getContext();
11853     SDValue Val = Parts[0];
11854     EVT ValueEltVT = ValueVT.getVectorElementType();
11855     EVT PartEltVT = PartVT.getVectorElementType();
11856     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11857     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11858     if (PartVTBitSize % ValueVTBitSize == 0) {
11859       assert(PartVTBitSize >= ValueVTBitSize);
11860       EVT SameEltTypeVT = ValueVT;
11861       // If the element types are different, convert it to the same element type
11862       // of PartVT.
11863       // Give an example here, we want copy a <vscale x 1 x i8> value from
11864       // <vscale x 4 x i16>.
11865       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11866       // then we can extract <vscale x 1 x i8>.
11867       if (ValueEltVT != PartEltVT) {
11868         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11869         assert(Count != 0 && "The number of element should not be zero.");
11870         SameEltTypeVT =
11871             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11872         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11873       }
11874       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11875                         DAG.getVectorIdxConstant(0, DL));
11876       return Val;
11877     }
11878   }
11879   return SDValue();
11880 }
11881 
11882 SDValue
11883 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11884                                    SelectionDAG &DAG,
11885                                    SmallVectorImpl<SDNode *> &Created) const {
11886   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11887   if (isIntDivCheap(N->getValueType(0), Attr))
11888     return SDValue(N, 0); // Lower SDIV as SDIV
11889 
11890   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11891          "Unexpected divisor!");
11892 
11893   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11894   if (!Subtarget.hasStdExtZbt())
11895     return SDValue();
11896 
11897   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11898   // Besides, more critical path instructions will be generated when dividing
11899   // by 2. So we keep using the original DAGs for these cases.
11900   unsigned Lg2 = Divisor.countTrailingZeros();
11901   if (Lg2 == 1 || Lg2 >= 12)
11902     return SDValue();
11903 
11904   // fold (sdiv X, pow2)
11905   EVT VT = N->getValueType(0);
11906   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11907     return SDValue();
11908 
11909   SDLoc DL(N);
11910   SDValue N0 = N->getOperand(0);
11911   SDValue Zero = DAG.getConstant(0, DL, VT);
11912   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11913 
11914   // Add (N0 < 0) ? Pow2 - 1 : 0;
11915   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11916   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11917   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11918 
11919   Created.push_back(Cmp.getNode());
11920   Created.push_back(Add.getNode());
11921   Created.push_back(Sel.getNode());
11922 
11923   // Divide by pow2.
11924   SDValue SRA =
11925       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11926 
11927   // If we're dividing by a positive value, we're done.  Otherwise, we must
11928   // negate the result.
11929   if (Divisor.isNonNegative())
11930     return SRA;
11931 
11932   Created.push_back(SRA.getNode());
11933   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11934 }
11935 
11936 #define GET_REGISTER_MATCHER
11937 #include "RISCVGenAsmMatcher.inc"
11938 
11939 Register
11940 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11941                                        const MachineFunction &MF) const {
11942   Register Reg = MatchRegisterAltName(RegName);
11943   if (Reg == RISCV::NoRegister)
11944     Reg = MatchRegisterName(RegName);
11945   if (Reg == RISCV::NoRegister)
11946     report_fatal_error(
11947         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11948   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11949   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11950     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11951                              StringRef(RegName) + "\"."));
11952   return Reg;
11953 }
11954 
11955 namespace llvm {
11956 namespace RISCVVIntrinsicsTable {
11957 
11958 #define GET_RISCVVIntrinsicsTable_IMPL
11959 #include "RISCVGenSearchableTables.inc"
11960 
11961 } // namespace RISCVVIntrinsicsTable
11962 
11963 } // namespace llvm
11964