1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
174                    MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
185 
186   setOperationAction(ISD::VASTART, MVT::Other, Custom);
187   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
188 
189   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
190 
191   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
192 
193   if (!Subtarget.hasStdExtZbb())
194     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
195 
196   if (Subtarget.is64Bit()) {
197     setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
198 
199     setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
200                        MVT::i32, Custom);
201 
202     setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
203                        MVT::i32, Custom);
204   } else {
205     setLibcallName(
206         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
207         nullptr);
208     setLibcallName(RTLIB::MULO_I64, nullptr);
209   }
210 
211   if (!Subtarget.hasStdExtM()) {
212     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV,
213                         ISD::SREM, ISD::UREM},
214                        XLenVT, Expand);
215   } else {
216     if (Subtarget.is64Bit()) {
217       setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
218 
219       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
220                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
221     } else {
222       setOperationAction(ISD::MUL, MVT::i64, Custom);
223     }
224   }
225 
226   setOperationAction(
227       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
228       Expand);
229 
230   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
231                      Custom);
232 
233   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
234       Subtarget.hasStdExtZbkb()) {
235     if (Subtarget.is64Bit())
236       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
237   } else {
238     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
239   }
240 
241   if (Subtarget.hasStdExtZbp()) {
242     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
243     // more combining.
244     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
245 
246     // BSWAP i8 doesn't exist.
247     setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
248 
249     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
250 
251     if (Subtarget.is64Bit())
252       setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
253   } else {
254     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
255     // pattern match it directly in isel.
256     setOperationAction(ISD::BSWAP, XLenVT,
257                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
258                            ? Legal
259                            : Expand);
260     // Zbkb can use rev8+brev8 to implement bitreverse.
261     setOperationAction(ISD::BITREVERSE, XLenVT,
262                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
263   }
264 
265   if (Subtarget.hasStdExtZbb()) {
266     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
267                        Legal);
268 
269     if (Subtarget.is64Bit())
270       setOperationAction(
271           {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
272           MVT::i32, Custom);
273   } else {
274     setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
275 
276     if (Subtarget.is64Bit())
277       setOperationAction(ISD::ABS, MVT::i32, Custom);
278   }
279 
280   if (Subtarget.hasStdExtZbt()) {
281     setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
282     setOperationAction(ISD::SELECT, XLenVT, Legal);
283 
284     if (Subtarget.is64Bit())
285       setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
286   } else {
287     setOperationAction(ISD::SELECT, XLenVT, Custom);
288   }
289 
290   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
291       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
292       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
293       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
294       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
295       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
296       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
297 
298   static const ISD::CondCode FPCCToExpand[] = {
299       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
300       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
301       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
302 
303   static const ISD::NodeType FPOpToExpand[] = {
304       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
305       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
306 
307   if (Subtarget.hasStdExtZfh())
308     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
309 
310   if (Subtarget.hasStdExtZfh()) {
311     for (auto NT : FPLegalNodeTypes)
312       setOperationAction(NT, MVT::f16, Legal);
313     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
314     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
315     setCondCodeAction(FPCCToExpand, MVT::f16, Expand);
316     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
317     setOperationAction(ISD::SELECT, MVT::f16, Custom);
318     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
319 
320     setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT,
321                         ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
322                         ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN,
323                         ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG,
324                         ISD::FLOG2, ISD::FLOG10},
325                        MVT::f16, Promote);
326 
327     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
328     // complete support for all operations in LegalizeDAG.
329 
330     // We need to custom promote this.
331     if (Subtarget.is64Bit())
332       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
333   }
334 
335   if (Subtarget.hasStdExtF()) {
336     for (auto NT : FPLegalNodeTypes)
337       setOperationAction(NT, MVT::f32, Legal);
338     setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
339     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT, MVT::f32, Custom);
341     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
342     for (auto Op : FPOpToExpand)
343       setOperationAction(Op, MVT::f32, Expand);
344     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
345     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
346   }
347 
348   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
349     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
350 
351   if (Subtarget.hasStdExtD()) {
352     for (auto NT : FPLegalNodeTypes)
353       setOperationAction(NT, MVT::f64, Legal);
354     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
355     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
356     setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
357     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
358     setOperationAction(ISD::SELECT, MVT::f64, Custom);
359     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
360     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
361     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
362     for (auto Op : FPOpToExpand)
363       setOperationAction(Op, MVT::f64, Expand);
364     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
365     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
366   }
367 
368   if (Subtarget.is64Bit())
369     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
370                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
371                        MVT::i32, Custom);
372 
373   if (Subtarget.hasStdExtF()) {
374     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
375                        Custom);
376 
377     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
378                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
379                        XLenVT, Legal);
380 
381     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
382     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
383   }
384 
385   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
386                       ISD::JumpTable},
387                      XLenVT, Custom);
388 
389   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
390 
391   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
392   // Unfortunately this can't be determined just from the ISA naming string.
393   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
394                      Subtarget.is64Bit() ? Legal : Custom);
395 
396   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
397   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
398   if (Subtarget.is64Bit())
399     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
400 
401   if (Subtarget.hasStdExtA()) {
402     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
403     setMinCmpXchgSizeInBits(32);
404   } else {
405     setMaxAtomicSizeInBitsSupported(0);
406   }
407 
408   setBooleanContents(ZeroOrOneBooleanContent);
409 
410   if (Subtarget.hasVInstructions()) {
411     setBooleanVectorContents(ZeroOrOneBooleanContent);
412 
413     setOperationAction(ISD::VSCALE, XLenVT, Custom);
414 
415     // RVV intrinsics may have illegal operands.
416     // We also need to custom legalize vmv.x.s.
417     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
418                        {MVT::i8, MVT::i16}, Custom);
419     if (Subtarget.is64Bit())
420       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
421     else
422       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
423                          MVT::i64, Custom);
424 
425     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
426                        MVT::Other, Custom);
427 
428     static const unsigned IntegerVPOps[] = {
429         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
430         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
431         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
432         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
433         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
434         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
435         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
436         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
437         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SIGN_EXTEND,
438         ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE};
439 
440     static const unsigned FloatingPointVPOps[] = {
441         ISD::VP_FADD,        ISD::VP_FSUB,
442         ISD::VP_FMUL,        ISD::VP_FDIV,
443         ISD::VP_FNEG,        ISD::VP_FMA,
444         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
445         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
446         ISD::VP_MERGE,       ISD::VP_SELECT,
447         ISD::VP_SITOFP,      ISD::VP_UITOFP,
448         ISD::VP_SETCC,       ISD::VP_FP_ROUND,
449         ISD::VP_FP_EXTEND};
450 
451     if (!Subtarget.is64Bit()) {
452       // We must custom-lower certain vXi64 operations on RV32 due to the vector
453       // element type being illegal.
454       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
455                          MVT::i64, Custom);
456 
457       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
458                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
459                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
460                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
461                          MVT::i64, Custom);
462 
463       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
464                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
465                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
466                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
467                          MVT::i64, Custom);
468     }
469 
470     for (MVT VT : BoolVecVTs) {
471       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
472 
473       // Mask VTs are custom-expanded into a series of standard nodes
474       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
475                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
476                          VT, Custom);
477 
478       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
479                          Custom);
480 
481       setOperationAction(ISD::SELECT, VT, Custom);
482       setOperationAction(
483           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
484           Expand);
485 
486       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
487 
488       setOperationAction(
489           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
490           Custom);
491 
492       setOperationAction(
493           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
494           Custom);
495 
496       // RVV has native int->float & float->int conversions where the
497       // element type sizes are within one power-of-two of each other. Any
498       // wider distances between type sizes have to be lowered as sequences
499       // which progressively narrow the gap in stages.
500       setOperationAction(
501           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
502           VT, Custom);
503 
504       // Expand all extending loads to types larger than this, and truncating
505       // stores from types larger than this.
506       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
507         setTruncStoreAction(OtherVT, VT, Expand);
508         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
509                          VT, Expand);
510       }
511 
512       setOperationAction(
513           {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT,
514           Custom);
515     }
516 
517     for (MVT VT : IntVecVTs) {
518       if (VT.getVectorElementType() == MVT::i64 &&
519           !Subtarget.hasVInstructionsI64())
520         continue;
521 
522       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
523       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
524 
525       // Vectors implement MULHS/MULHU.
526       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
527 
528       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
529       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
530         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
531 
532       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
533                          Legal);
534 
535       setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
536 
537       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT,
538                          Expand);
539 
540       setOperationAction(ISD::BSWAP, VT, Expand);
541 
542       // Custom-lower extensions and truncations from/to mask types.
543       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
544                          VT, Custom);
545 
546       // RVV has native int->float & float->int conversions where the
547       // element type sizes are within one power-of-two of each other. Any
548       // wider distances between type sizes have to be lowered as sequences
549       // which progressively narrow the gap in stages.
550       setOperationAction(
551           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
552           VT, Custom);
553 
554       setOperationAction(
555           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
556 
557       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
558       // nodes which truncate by one power of two at a time.
559       setOperationAction(ISD::TRUNCATE, VT, Custom);
560 
561       // Custom-lower insert/extract operations to simplify patterns.
562       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
563                          Custom);
564 
565       // Custom-lower reduction operations to set up the corresponding custom
566       // nodes' operands.
567       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
568                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
569                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
570                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
571                          VT, Custom);
572 
573       setOperationAction(IntegerVPOps, VT, Custom);
574 
575       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
576 
577       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
578                          VT, Custom);
579 
580       setOperationAction(
581           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
582           Custom);
583 
584       setOperationAction(
585           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
586           VT, Custom);
587 
588       setOperationAction(ISD::SELECT, VT, Custom);
589       setOperationAction(ISD::SELECT_CC, VT, Expand);
590 
591       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
592 
593       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
594         setTruncStoreAction(VT, OtherVT, Expand);
595         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
596                          VT, Expand);
597       }
598 
599       // Splice
600       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
601 
602       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
603       // type that can represent the value exactly.
604       if (VT.getVectorElementType() != MVT::i64) {
605         MVT FloatEltVT =
606             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
607         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
608         if (isTypeLegal(FloatVT)) {
609           setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
610                              Custom);
611         }
612       }
613     }
614 
615     // Expand various CCs to best match the RVV ISA, which natively supports UNE
616     // but no other unordered comparisons, and supports all ordered comparisons
617     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
618     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
619     // and we pattern-match those back to the "original", swapping operands once
620     // more. This way we catch both operations and both "vf" and "fv" forms with
621     // fewer patterns.
622     static const ISD::CondCode VFPCCToExpand[] = {
623         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
624         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
625         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
626     };
627 
628     // Sets common operation actions on RVV floating-point vector types.
629     const auto SetCommonVFPActions = [&](MVT VT) {
630       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
631       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
632       // sizes are within one power-of-two of each other. Therefore conversions
633       // between vXf16 and vXf64 must be lowered as sequences which convert via
634       // vXf32.
635       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
636       // Custom-lower insert/extract operations to simplify patterns.
637       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
638                          Custom);
639       // Expand various condition codes (explained above).
640       setCondCodeAction(VFPCCToExpand, VT, Expand);
641 
642       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
643 
644       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
645                          VT, Custom);
646 
647       setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
648                           ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
649                          VT, Custom);
650 
651       // Expand FP operations that need libcalls.
652       setOperationAction(ISD::FREM, VT, Expand);
653       setOperationAction(ISD::FPOW, VT, Expand);
654       setOperationAction(ISD::FCOS, VT, Expand);
655       setOperationAction(ISD::FSIN, VT, Expand);
656       setOperationAction(ISD::FSINCOS, VT, Expand);
657       setOperationAction(ISD::FEXP, VT, Expand);
658       setOperationAction(ISD::FEXP2, VT, Expand);
659       setOperationAction(ISD::FLOG, VT, Expand);
660       setOperationAction(ISD::FLOG2, VT, Expand);
661       setOperationAction(ISD::FLOG10, VT, Expand);
662       setOperationAction(ISD::FRINT, VT, Expand);
663       setOperationAction(ISD::FNEARBYINT, VT, Expand);
664 
665       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
666       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
667       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
668       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
669 
670       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
671 
672       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
673 
674       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
675                          VT, Custom);
676 
677       setOperationAction(
678           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
679           Custom);
680 
681       setOperationAction(ISD::SELECT, VT, Custom);
682       setOperationAction(ISD::SELECT_CC, VT, Expand);
683 
684       setOperationAction(
685           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
686           VT, Custom);
687 
688       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
689 
690       setOperationAction(FloatingPointVPOps, VT, Custom);
691     };
692 
693     // Sets common extload/truncstore actions on RVV floating-point vector
694     // types.
695     const auto SetCommonVFPExtLoadTruncStoreActions =
696         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
697           for (auto SmallVT : SmallerVTs) {
698             setTruncStoreAction(VT, SmallVT, Expand);
699             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
700           }
701         };
702 
703     if (Subtarget.hasVInstructionsF16())
704       for (MVT VT : F16VecVTs)
705         SetCommonVFPActions(VT);
706 
707     for (MVT VT : F32VecVTs) {
708       if (Subtarget.hasVInstructionsF32())
709         SetCommonVFPActions(VT);
710       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
711     }
712 
713     for (MVT VT : F64VecVTs) {
714       if (Subtarget.hasVInstructionsF64())
715         SetCommonVFPActions(VT);
716       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
717       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
718     }
719 
720     if (Subtarget.useRVVForFixedLengthVectors()) {
721       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
722         if (!useRVVForFixedLengthVectorVT(VT))
723           continue;
724 
725         // By default everything must be expanded.
726         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
727           setOperationAction(Op, VT, Expand);
728         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
729           setTruncStoreAction(VT, OtherVT, Expand);
730           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
731                            OtherVT, VT, Expand);
732         }
733 
734         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
735         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
736                            Custom);
737 
738         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
739                            Custom);
740 
741         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
742                            VT, Custom);
743 
744         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
745 
746         setOperationAction(ISD::SETCC, VT, Custom);
747 
748         setOperationAction(ISD::SELECT, VT, Custom);
749 
750         setOperationAction(ISD::TRUNCATE, VT, Custom);
751 
752         setOperationAction(ISD::BITCAST, VT, Custom);
753 
754         setOperationAction(
755             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
756             Custom);
757 
758         setOperationAction(
759             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
760             Custom);
761 
762         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
763                             ISD::FP_TO_UINT},
764                            VT, Custom);
765 
766         // Operations below are different for between masks and other vectors.
767         if (VT.getVectorElementType() == MVT::i1) {
768           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
769                               ISD::OR, ISD::XOR},
770                              VT, Custom);
771 
772           setOperationAction(
773               {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNCATE},
774               VT, Custom);
775           continue;
776         }
777 
778         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
779         // it before type legalization for i64 vectors on RV32. It will then be
780         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
781         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
782         // improvements first.
783         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
784           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
785           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
786         }
787 
788         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
789         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
790 
791         setOperationAction(
792             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
793 
794         setOperationAction(
795             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
796             Custom);
797 
798         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
799                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
800                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
801                            VT, Custom);
802 
803         setOperationAction(
804             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
805 
806         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
807         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
808           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
809 
810         setOperationAction(
811             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
812             Custom);
813 
814         setOperationAction(ISD::VSELECT, VT, Custom);
815         setOperationAction(ISD::SELECT_CC, VT, Expand);
816 
817         setOperationAction(
818             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
819 
820         // Custom-lower reduction operations to set up the corresponding custom
821         // nodes' operands.
822         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
823                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
824                             ISD::VECREDUCE_UMIN},
825                            VT, Custom);
826 
827         setOperationAction(IntegerVPOps, VT, Custom);
828 
829         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
830         // type that can represent the value exactly.
831         if (VT.getVectorElementType() != MVT::i64) {
832           MVT FloatEltVT =
833               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
834           EVT FloatVT =
835               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
836           if (isTypeLegal(FloatVT))
837             setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
838                                Custom);
839         }
840       }
841 
842       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
843         if (!useRVVForFixedLengthVectorVT(VT))
844           continue;
845 
846         // By default everything must be expanded.
847         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
848           setOperationAction(Op, VT, Expand);
849         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
850           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
851           setTruncStoreAction(VT, OtherVT, Expand);
852         }
853 
854         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
855         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
856                            Custom);
857 
858         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
859                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
860                             ISD::EXTRACT_VECTOR_ELT},
861                            VT, Custom);
862 
863         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
864                             ISD::MGATHER, ISD::MSCATTER},
865                            VT, Custom);
866 
867         setOperationAction(
868             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
869             Custom);
870 
871         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
872                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
873                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
874                            VT, Custom);
875 
876         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
877 
878         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
879                            VT, Custom);
880 
881         for (auto CC : VFPCCToExpand)
882           setCondCodeAction(CC, VT, Expand);
883 
884         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
885         setOperationAction(ISD::SELECT_CC, VT, Expand);
886 
887         setOperationAction(ISD::BITCAST, VT, Custom);
888 
889         setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
890                             ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
891                            VT, Custom);
892 
893         setOperationAction(FloatingPointVPOps, VT, Custom);
894       }
895 
896       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
897       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
898                          Custom);
899       if (Subtarget.hasStdExtZfh())
900         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
901       if (Subtarget.hasStdExtF())
902         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
903       if (Subtarget.hasStdExtD())
904         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
905     }
906   }
907 
908   // Function alignments.
909   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
910   setMinFunctionAlignment(FunctionAlignment);
911   setPrefFunctionAlignment(FunctionAlignment);
912 
913   setMinimumJumpTableEntries(5);
914 
915   // Jumps are expensive, compared to logic
916   setJumpIsExpensive();
917 
918   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
919                        ISD::OR, ISD::XOR});
920 
921   if (Subtarget.hasStdExtF())
922     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
923 
924   if (Subtarget.hasStdExtZbp())
925     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
926 
927   if (Subtarget.hasStdExtZbb())
928     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
929 
930   if (Subtarget.hasStdExtZbkb())
931     setTargetDAGCombine(ISD::BITREVERSE);
932   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
933     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
934   if (Subtarget.hasStdExtF())
935     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
936                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
937   if (Subtarget.hasVInstructions())
938     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
939                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
940                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
941 
942   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
943   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
944 }
945 
946 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
947                                             LLVMContext &Context,
948                                             EVT VT) const {
949   if (!VT.isVector())
950     return getPointerTy(DL);
951   if (Subtarget.hasVInstructions() &&
952       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
953     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
954   return VT.changeVectorElementTypeToInteger();
955 }
956 
957 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
958   return Subtarget.getXLenVT();
959 }
960 
961 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
962                                              const CallInst &I,
963                                              MachineFunction &MF,
964                                              unsigned Intrinsic) const {
965   auto &DL = I.getModule()->getDataLayout();
966   switch (Intrinsic) {
967   default:
968     return false;
969   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
970   case Intrinsic::riscv_masked_atomicrmw_add_i32:
971   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
972   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
973   case Intrinsic::riscv_masked_atomicrmw_max_i32:
974   case Intrinsic::riscv_masked_atomicrmw_min_i32:
975   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
976   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
977   case Intrinsic::riscv_masked_cmpxchg_i32:
978     Info.opc = ISD::INTRINSIC_W_CHAIN;
979     Info.memVT = MVT::i32;
980     Info.ptrVal = I.getArgOperand(0);
981     Info.offset = 0;
982     Info.align = Align(4);
983     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
984                  MachineMemOperand::MOVolatile;
985     return true;
986   case Intrinsic::riscv_masked_strided_load:
987     Info.opc = ISD::INTRINSIC_W_CHAIN;
988     Info.ptrVal = I.getArgOperand(1);
989     Info.memVT = getValueType(DL, I.getType()->getScalarType());
990     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
991     Info.size = MemoryLocation::UnknownSize;
992     Info.flags |= MachineMemOperand::MOLoad;
993     return true;
994   case Intrinsic::riscv_masked_strided_store:
995     Info.opc = ISD::INTRINSIC_VOID;
996     Info.ptrVal = I.getArgOperand(1);
997     Info.memVT =
998         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
999     Info.align = Align(
1000         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1001         8);
1002     Info.size = MemoryLocation::UnknownSize;
1003     Info.flags |= MachineMemOperand::MOStore;
1004     return true;
1005   case Intrinsic::riscv_seg2_load:
1006   case Intrinsic::riscv_seg3_load:
1007   case Intrinsic::riscv_seg4_load:
1008   case Intrinsic::riscv_seg5_load:
1009   case Intrinsic::riscv_seg6_load:
1010   case Intrinsic::riscv_seg7_load:
1011   case Intrinsic::riscv_seg8_load:
1012     Info.opc = ISD::INTRINSIC_W_CHAIN;
1013     Info.ptrVal = I.getArgOperand(0);
1014     Info.memVT =
1015         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1016     Info.align =
1017         Align(DL.getTypeSizeInBits(
1018                   I.getType()->getStructElementType(0)->getScalarType()) /
1019               8);
1020     Info.size = MemoryLocation::UnknownSize;
1021     Info.flags |= MachineMemOperand::MOLoad;
1022     return true;
1023   }
1024 }
1025 
1026 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1027                                                 const AddrMode &AM, Type *Ty,
1028                                                 unsigned AS,
1029                                                 Instruction *I) const {
1030   // No global is ever allowed as a base.
1031   if (AM.BaseGV)
1032     return false;
1033 
1034   // RVV instructions only support register addressing.
1035   if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
1036     return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
1037 
1038   // Require a 12-bit signed offset.
1039   if (!isInt<12>(AM.BaseOffs))
1040     return false;
1041 
1042   switch (AM.Scale) {
1043   case 0: // "r+i" or just "i", depending on HasBaseReg.
1044     break;
1045   case 1:
1046     if (!AM.HasBaseReg) // allow "r+i".
1047       break;
1048     return false; // disallow "r+r" or "r+r+i".
1049   default:
1050     return false;
1051   }
1052 
1053   return true;
1054 }
1055 
1056 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1057   return isInt<12>(Imm);
1058 }
1059 
1060 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1061   return isInt<12>(Imm);
1062 }
1063 
1064 // On RV32, 64-bit integers are split into their high and low parts and held
1065 // in two different registers, so the trunc is free since the low register can
1066 // just be used.
1067 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1068   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1069     return false;
1070   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1071   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1072   return (SrcBits == 64 && DestBits == 32);
1073 }
1074 
1075 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1076   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1077       !SrcVT.isInteger() || !DstVT.isInteger())
1078     return false;
1079   unsigned SrcBits = SrcVT.getSizeInBits();
1080   unsigned DestBits = DstVT.getSizeInBits();
1081   return (SrcBits == 64 && DestBits == 32);
1082 }
1083 
1084 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1085   // Zexts are free if they can be combined with a load.
1086   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1087   // poorly with type legalization of compares preferring sext.
1088   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1089     EVT MemVT = LD->getMemoryVT();
1090     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1091         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1092          LD->getExtensionType() == ISD::ZEXTLOAD))
1093       return true;
1094   }
1095 
1096   return TargetLowering::isZExtFree(Val, VT2);
1097 }
1098 
1099 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1100   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1101 }
1102 
1103 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1104   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1105 }
1106 
1107 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1108   return Subtarget.hasStdExtZbb();
1109 }
1110 
1111 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1112   return Subtarget.hasStdExtZbb();
1113 }
1114 
1115 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1116   EVT VT = Y.getValueType();
1117 
1118   // FIXME: Support vectors once we have tests.
1119   if (VT.isVector())
1120     return false;
1121 
1122   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1123           Subtarget.hasStdExtZbkb()) &&
1124          !isa<ConstantSDNode>(Y);
1125 }
1126 
1127 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1128   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1129   auto *C = dyn_cast<ConstantSDNode>(Y);
1130   return C && C->getAPIntValue().ule(10);
1131 }
1132 
1133 bool RISCVTargetLowering::
1134     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
1135         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
1136         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
1137         SelectionDAG &DAG) const {
1138   // One interesting pattern that we'd want to form is 'bit extract':
1139   //   ((1 >> Y) & 1) ==/!= 0
1140   // But we also need to be careful not to try to reverse that fold.
1141 
1142   // Is this '((1 >> Y) & 1)'?
1143   if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
1144     return false; // Keep the 'bit extract' pattern.
1145 
1146   // Will this be '((1 >> Y) & 1)' after the transform?
1147   if (NewShiftOpcode == ISD::SRL && CC->isOne())
1148     return true; // Do form the 'bit extract' pattern.
1149 
1150   // If 'X' is a constant, and we transform, then we will immediately
1151   // try to undo the fold, thus causing endless combine loop.
1152   // So only do the transform if X is not a constant. This matches the default
1153   // implementation of this function.
1154   return !XC;
1155 }
1156 
1157 /// Check if sinking \p I's operands to I's basic block is profitable, because
1158 /// the operands can be folded into a target instruction, e.g.
1159 /// splats of scalars can fold into vector instructions.
1160 bool RISCVTargetLowering::shouldSinkOperands(
1161     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1162   using namespace llvm::PatternMatch;
1163 
1164   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1165     return false;
1166 
1167   auto IsSinker = [&](Instruction *I, int Operand) {
1168     switch (I->getOpcode()) {
1169     case Instruction::Add:
1170     case Instruction::Sub:
1171     case Instruction::Mul:
1172     case Instruction::And:
1173     case Instruction::Or:
1174     case Instruction::Xor:
1175     case Instruction::FAdd:
1176     case Instruction::FSub:
1177     case Instruction::FMul:
1178     case Instruction::FDiv:
1179     case Instruction::ICmp:
1180     case Instruction::FCmp:
1181       return true;
1182     case Instruction::Shl:
1183     case Instruction::LShr:
1184     case Instruction::AShr:
1185     case Instruction::UDiv:
1186     case Instruction::SDiv:
1187     case Instruction::URem:
1188     case Instruction::SRem:
1189       return Operand == 1;
1190     case Instruction::Call:
1191       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1192         switch (II->getIntrinsicID()) {
1193         case Intrinsic::fma:
1194         case Intrinsic::vp_fma:
1195           return Operand == 0 || Operand == 1;
1196         // FIXME: Our patterns can only match vx/vf instructions when the splat
1197         // it on the RHS, because TableGen doesn't recognize our VP operations
1198         // as commutative.
1199         case Intrinsic::vp_add:
1200         case Intrinsic::vp_mul:
1201         case Intrinsic::vp_and:
1202         case Intrinsic::vp_or:
1203         case Intrinsic::vp_xor:
1204         case Intrinsic::vp_fadd:
1205         case Intrinsic::vp_fmul:
1206         case Intrinsic::vp_shl:
1207         case Intrinsic::vp_lshr:
1208         case Intrinsic::vp_ashr:
1209         case Intrinsic::vp_udiv:
1210         case Intrinsic::vp_sdiv:
1211         case Intrinsic::vp_urem:
1212         case Intrinsic::vp_srem:
1213           return Operand == 1;
1214         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1215         // explicit patterns for both LHS and RHS (as 'vr' versions).
1216         case Intrinsic::vp_sub:
1217         case Intrinsic::vp_fsub:
1218         case Intrinsic::vp_fdiv:
1219           return Operand == 0 || Operand == 1;
1220         default:
1221           return false;
1222         }
1223       }
1224       return false;
1225     default:
1226       return false;
1227     }
1228   };
1229 
1230   for (auto OpIdx : enumerate(I->operands())) {
1231     if (!IsSinker(I, OpIdx.index()))
1232       continue;
1233 
1234     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1235     // Make sure we are not already sinking this operand
1236     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1237       continue;
1238 
1239     // We are looking for a splat that can be sunk.
1240     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1241                              m_Undef(), m_ZeroMask())))
1242       continue;
1243 
1244     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1245     // and vector registers
1246     for (Use &U : Op->uses()) {
1247       Instruction *Insn = cast<Instruction>(U.getUser());
1248       if (!IsSinker(Insn, U.getOperandNo()))
1249         return false;
1250     }
1251 
1252     Ops.push_back(&Op->getOperandUse(0));
1253     Ops.push_back(&OpIdx.value());
1254   }
1255   return true;
1256 }
1257 
1258 bool RISCVTargetLowering::isOffsetFoldingLegal(
1259     const GlobalAddressSDNode *GA) const {
1260   // In order to maximise the opportunity for common subexpression elimination,
1261   // keep a separate ADD node for the global address offset instead of folding
1262   // it in the global address node. Later peephole optimisations may choose to
1263   // fold it back in when profitable.
1264   return false;
1265 }
1266 
1267 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1268                                        bool ForCodeSize) const {
1269   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1270   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1271     return false;
1272   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1273     return false;
1274   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1275     return false;
1276   return Imm.isZero();
1277 }
1278 
1279 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1280   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1281          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1282          (VT == MVT::f64 && Subtarget.hasStdExtD());
1283 }
1284 
1285 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1286                                                       CallingConv::ID CC,
1287                                                       EVT VT) const {
1288   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1289   // We might still end up using a GPR but that will be decided based on ABI.
1290   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1291   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1292     return MVT::f32;
1293 
1294   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1295 }
1296 
1297 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1298                                                            CallingConv::ID CC,
1299                                                            EVT VT) const {
1300   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1301   // We might still end up using a GPR but that will be decided based on ABI.
1302   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1303   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1304     return 1;
1305 
1306   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1307 }
1308 
1309 // Changes the condition code and swaps operands if necessary, so the SetCC
1310 // operation matches one of the comparisons supported directly by branches
1311 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1312 // with 1/-1.
1313 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1314                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1315   // Convert X > -1 to X >= 0.
1316   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1317     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1318     CC = ISD::SETGE;
1319     return;
1320   }
1321   // Convert X < 1 to 0 >= X.
1322   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1323     RHS = LHS;
1324     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1325     CC = ISD::SETGE;
1326     return;
1327   }
1328 
1329   switch (CC) {
1330   default:
1331     break;
1332   case ISD::SETGT:
1333   case ISD::SETLE:
1334   case ISD::SETUGT:
1335   case ISD::SETULE:
1336     CC = ISD::getSetCCSwappedOperands(CC);
1337     std::swap(LHS, RHS);
1338     break;
1339   }
1340 }
1341 
1342 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1343   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1344   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1345   if (VT.getVectorElementType() == MVT::i1)
1346     KnownSize *= 8;
1347 
1348   switch (KnownSize) {
1349   default:
1350     llvm_unreachable("Invalid LMUL.");
1351   case 8:
1352     return RISCVII::VLMUL::LMUL_F8;
1353   case 16:
1354     return RISCVII::VLMUL::LMUL_F4;
1355   case 32:
1356     return RISCVII::VLMUL::LMUL_F2;
1357   case 64:
1358     return RISCVII::VLMUL::LMUL_1;
1359   case 128:
1360     return RISCVII::VLMUL::LMUL_2;
1361   case 256:
1362     return RISCVII::VLMUL::LMUL_4;
1363   case 512:
1364     return RISCVII::VLMUL::LMUL_8;
1365   }
1366 }
1367 
1368 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1369   switch (LMul) {
1370   default:
1371     llvm_unreachable("Invalid LMUL.");
1372   case RISCVII::VLMUL::LMUL_F8:
1373   case RISCVII::VLMUL::LMUL_F4:
1374   case RISCVII::VLMUL::LMUL_F2:
1375   case RISCVII::VLMUL::LMUL_1:
1376     return RISCV::VRRegClassID;
1377   case RISCVII::VLMUL::LMUL_2:
1378     return RISCV::VRM2RegClassID;
1379   case RISCVII::VLMUL::LMUL_4:
1380     return RISCV::VRM4RegClassID;
1381   case RISCVII::VLMUL::LMUL_8:
1382     return RISCV::VRM8RegClassID;
1383   }
1384 }
1385 
1386 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1387   RISCVII::VLMUL LMUL = getLMUL(VT);
1388   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1389       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1390       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1391       LMUL == RISCVII::VLMUL::LMUL_1) {
1392     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1393                   "Unexpected subreg numbering");
1394     return RISCV::sub_vrm1_0 + Index;
1395   }
1396   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1397     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1398                   "Unexpected subreg numbering");
1399     return RISCV::sub_vrm2_0 + Index;
1400   }
1401   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1402     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1403                   "Unexpected subreg numbering");
1404     return RISCV::sub_vrm4_0 + Index;
1405   }
1406   llvm_unreachable("Invalid vector type.");
1407 }
1408 
1409 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1410   if (VT.getVectorElementType() == MVT::i1)
1411     return RISCV::VRRegClassID;
1412   return getRegClassIDForLMUL(getLMUL(VT));
1413 }
1414 
1415 // Attempt to decompose a subvector insert/extract between VecVT and
1416 // SubVecVT via subregister indices. Returns the subregister index that
1417 // can perform the subvector insert/extract with the given element index, as
1418 // well as the index corresponding to any leftover subvectors that must be
1419 // further inserted/extracted within the register class for SubVecVT.
1420 std::pair<unsigned, unsigned>
1421 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1422     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1423     const RISCVRegisterInfo *TRI) {
1424   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1425                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1426                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1427                 "Register classes not ordered");
1428   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1429   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1430   // Try to compose a subregister index that takes us from the incoming
1431   // LMUL>1 register class down to the outgoing one. At each step we half
1432   // the LMUL:
1433   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1434   // Note that this is not guaranteed to find a subregister index, such as
1435   // when we are extracting from one VR type to another.
1436   unsigned SubRegIdx = RISCV::NoSubRegister;
1437   for (const unsigned RCID :
1438        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1439     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1440       VecVT = VecVT.getHalfNumVectorElementsVT();
1441       bool IsHi =
1442           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1443       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1444                                             getSubregIndexByMVT(VecVT, IsHi));
1445       if (IsHi)
1446         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1447     }
1448   return {SubRegIdx, InsertExtractIdx};
1449 }
1450 
1451 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1452 // stores for those types.
1453 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1454   return !Subtarget.useRVVForFixedLengthVectors() ||
1455          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1456 }
1457 
1458 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1459   if (ScalarTy->isPointerTy())
1460     return true;
1461 
1462   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1463       ScalarTy->isIntegerTy(32))
1464     return true;
1465 
1466   if (ScalarTy->isIntegerTy(64))
1467     return Subtarget.hasVInstructionsI64();
1468 
1469   if (ScalarTy->isHalfTy())
1470     return Subtarget.hasVInstructionsF16();
1471   if (ScalarTy->isFloatTy())
1472     return Subtarget.hasVInstructionsF32();
1473   if (ScalarTy->isDoubleTy())
1474     return Subtarget.hasVInstructionsF64();
1475 
1476   return false;
1477 }
1478 
1479 static SDValue getVLOperand(SDValue Op) {
1480   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1481           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1482          "Unexpected opcode");
1483   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1484   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1485   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1486       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1487   if (!II)
1488     return SDValue();
1489   return Op.getOperand(II->VLOperand + 1 + HasChain);
1490 }
1491 
1492 static bool useRVVForFixedLengthVectorVT(MVT VT,
1493                                          const RISCVSubtarget &Subtarget) {
1494   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1495   if (!Subtarget.useRVVForFixedLengthVectors())
1496     return false;
1497 
1498   // We only support a set of vector types with a consistent maximum fixed size
1499   // across all supported vector element types to avoid legalization issues.
1500   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1501   // fixed-length vector type we support is 1024 bytes.
1502   if (VT.getFixedSizeInBits() > 1024 * 8)
1503     return false;
1504 
1505   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1506 
1507   MVT EltVT = VT.getVectorElementType();
1508 
1509   // Don't use RVV for vectors we cannot scalarize if required.
1510   switch (EltVT.SimpleTy) {
1511   // i1 is supported but has different rules.
1512   default:
1513     return false;
1514   case MVT::i1:
1515     // Masks can only use a single register.
1516     if (VT.getVectorNumElements() > MinVLen)
1517       return false;
1518     MinVLen /= 8;
1519     break;
1520   case MVT::i8:
1521   case MVT::i16:
1522   case MVT::i32:
1523     break;
1524   case MVT::i64:
1525     if (!Subtarget.hasVInstructionsI64())
1526       return false;
1527     break;
1528   case MVT::f16:
1529     if (!Subtarget.hasVInstructionsF16())
1530       return false;
1531     break;
1532   case MVT::f32:
1533     if (!Subtarget.hasVInstructionsF32())
1534       return false;
1535     break;
1536   case MVT::f64:
1537     if (!Subtarget.hasVInstructionsF64())
1538       return false;
1539     break;
1540   }
1541 
1542   // Reject elements larger than ELEN.
1543   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1544     return false;
1545 
1546   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1547   // Don't use RVV for types that don't fit.
1548   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1549     return false;
1550 
1551   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1552   // the base fixed length RVV support in place.
1553   if (!VT.isPow2VectorType())
1554     return false;
1555 
1556   return true;
1557 }
1558 
1559 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1560   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1561 }
1562 
1563 // Return the largest legal scalable vector type that matches VT's element type.
1564 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1565                                             const RISCVSubtarget &Subtarget) {
1566   // This may be called before legal types are setup.
1567   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1568           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1569          "Expected legal fixed length vector!");
1570 
1571   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1572   unsigned MaxELen = Subtarget.getELEN();
1573 
1574   MVT EltVT = VT.getVectorElementType();
1575   switch (EltVT.SimpleTy) {
1576   default:
1577     llvm_unreachable("unexpected element type for RVV container");
1578   case MVT::i1:
1579   case MVT::i8:
1580   case MVT::i16:
1581   case MVT::i32:
1582   case MVT::i64:
1583   case MVT::f16:
1584   case MVT::f32:
1585   case MVT::f64: {
1586     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1587     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1588     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1589     unsigned NumElts =
1590         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1591     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1592     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1593     return MVT::getScalableVectorVT(EltVT, NumElts);
1594   }
1595   }
1596 }
1597 
1598 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1599                                             const RISCVSubtarget &Subtarget) {
1600   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1601                                           Subtarget);
1602 }
1603 
1604 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1605   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1606 }
1607 
1608 // Grow V to consume an entire RVV register.
1609 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1610                                        const RISCVSubtarget &Subtarget) {
1611   assert(VT.isScalableVector() &&
1612          "Expected to convert into a scalable vector!");
1613   assert(V.getValueType().isFixedLengthVector() &&
1614          "Expected a fixed length vector operand!");
1615   SDLoc DL(V);
1616   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1617   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1618 }
1619 
1620 // Shrink V so it's just big enough to maintain a VT's worth of data.
1621 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1622                                          const RISCVSubtarget &Subtarget) {
1623   assert(VT.isFixedLengthVector() &&
1624          "Expected to convert into a fixed length vector!");
1625   assert(V.getValueType().isScalableVector() &&
1626          "Expected a scalable vector operand!");
1627   SDLoc DL(V);
1628   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1629   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1630 }
1631 
1632 /// Return the type of the mask type suitable for masking the provided
1633 /// vector type.  This is simply an i1 element type vector of the same
1634 /// (possibly scalable) length.
1635 static MVT getMaskTypeFor(EVT VecVT) {
1636   assert(VecVT.isVector());
1637   ElementCount EC = VecVT.getVectorElementCount();
1638   return MVT::getVectorVT(MVT::i1, EC);
1639 }
1640 
1641 /// Creates an all ones mask suitable for masking a vector of type VecTy with
1642 /// vector length VL.  .
1643 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
1644                               SelectionDAG &DAG) {
1645   MVT MaskVT = getMaskTypeFor(VecVT);
1646   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1647 }
1648 
1649 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1650 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1651 // the vector type that it is contained in.
1652 static std::pair<SDValue, SDValue>
1653 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1654                 const RISCVSubtarget &Subtarget) {
1655   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1656   MVT XLenVT = Subtarget.getXLenVT();
1657   SDValue VL = VecVT.isFixedLengthVector()
1658                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1659                    : DAG.getRegister(RISCV::X0, XLenVT);
1660   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
1661   return {Mask, VL};
1662 }
1663 
1664 // As above but assuming the given type is a scalable vector type.
1665 static std::pair<SDValue, SDValue>
1666 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1667                         const RISCVSubtarget &Subtarget) {
1668   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1669   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1670 }
1671 
1672 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1673 // of either is (currently) supported. This can get us into an infinite loop
1674 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1675 // as a ..., etc.
1676 // Until either (or both) of these can reliably lower any node, reporting that
1677 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1678 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1679 // which is not desirable.
1680 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1681     EVT VT, unsigned DefinedValues) const {
1682   return false;
1683 }
1684 
1685 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1686                                   const RISCVSubtarget &Subtarget) {
1687   // RISCV FP-to-int conversions saturate to the destination register size, but
1688   // don't produce 0 for nan. We can use a conversion instruction and fix the
1689   // nan case with a compare and a select.
1690   SDValue Src = Op.getOperand(0);
1691 
1692   EVT DstVT = Op.getValueType();
1693   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1694 
1695   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1696   unsigned Opc;
1697   if (SatVT == DstVT)
1698     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1699   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1700     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1701   else
1702     return SDValue();
1703   // FIXME: Support other SatVTs by clamping before or after the conversion.
1704 
1705   SDLoc DL(Op);
1706   SDValue FpToInt = DAG.getNode(
1707       Opc, DL, DstVT, Src,
1708       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1709 
1710   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1711   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1712 }
1713 
1714 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1715 // and back. Taking care to avoid converting values that are nan or already
1716 // correct.
1717 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1718 // have FRM dependencies modeled yet.
1719 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1720   MVT VT = Op.getSimpleValueType();
1721   assert(VT.isVector() && "Unexpected type");
1722 
1723   SDLoc DL(Op);
1724 
1725   // Freeze the source since we are increasing the number of uses.
1726   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1727 
1728   // Truncate to integer and convert back to FP.
1729   MVT IntVT = VT.changeVectorElementTypeToInteger();
1730   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1731   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1732 
1733   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1734 
1735   if (Op.getOpcode() == ISD::FCEIL) {
1736     // If the truncated value is the greater than or equal to the original
1737     // value, we've computed the ceil. Otherwise, we went the wrong way and
1738     // need to increase by 1.
1739     // FIXME: This should use a masked operation. Handle here or in isel?
1740     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1741                                  DAG.getConstantFP(1.0, DL, VT));
1742     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1743     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1744   } else if (Op.getOpcode() == ISD::FFLOOR) {
1745     // If the truncated value is the less than or equal to the original value,
1746     // we've computed the floor. Otherwise, we went the wrong way and need to
1747     // decrease by 1.
1748     // FIXME: This should use a masked operation. Handle here or in isel?
1749     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1750                                  DAG.getConstantFP(1.0, DL, VT));
1751     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1752     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1753   }
1754 
1755   // Restore the original sign so that -0.0 is preserved.
1756   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1757 
1758   // Determine the largest integer that can be represented exactly. This and
1759   // values larger than it don't have any fractional bits so don't need to
1760   // be converted.
1761   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1762   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1763   APFloat MaxVal = APFloat(FltSem);
1764   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1765                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1766   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1767 
1768   // If abs(Src) was larger than MaxVal or nan, keep it.
1769   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1770   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1771   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1772 }
1773 
1774 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1775 // This mode isn't supported in vector hardware on RISCV. But as long as we
1776 // aren't compiling with trapping math, we can emulate this with
1777 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1778 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1779 // dependencies modeled yet.
1780 // FIXME: Use masked operations to avoid final merge.
1781 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1782   MVT VT = Op.getSimpleValueType();
1783   assert(VT.isVector() && "Unexpected type");
1784 
1785   SDLoc DL(Op);
1786 
1787   // Freeze the source since we are increasing the number of uses.
1788   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1789 
1790   // We do the conversion on the absolute value and fix the sign at the end.
1791   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1792 
1793   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1794   bool Ignored;
1795   APFloat Point5Pred = APFloat(0.5f);
1796   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1797   Point5Pred.next(/*nextDown*/ true);
1798 
1799   // Add the adjustment.
1800   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1801                                DAG.getConstantFP(Point5Pred, DL, VT));
1802 
1803   // Truncate to integer and convert back to fp.
1804   MVT IntVT = VT.changeVectorElementTypeToInteger();
1805   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1806   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1807 
1808   // Restore the original sign.
1809   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1810 
1811   // Determine the largest integer that can be represented exactly. This and
1812   // values larger than it don't have any fractional bits so don't need to
1813   // be converted.
1814   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1815   APFloat MaxVal = APFloat(FltSem);
1816   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1817                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1818   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1819 
1820   // If abs(Src) was larger than MaxVal or nan, keep it.
1821   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1822   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1823   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1824 }
1825 
1826 struct VIDSequence {
1827   int64_t StepNumerator;
1828   unsigned StepDenominator;
1829   int64_t Addend;
1830 };
1831 
1832 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1833 // to the (non-zero) step S and start value X. This can be then lowered as the
1834 // RVV sequence (VID * S) + X, for example.
1835 // The step S is represented as an integer numerator divided by a positive
1836 // denominator. Note that the implementation currently only identifies
1837 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1838 // cannot detect 2/3, for example.
1839 // Note that this method will also match potentially unappealing index
1840 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1841 // determine whether this is worth generating code for.
1842 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1843   unsigned NumElts = Op.getNumOperands();
1844   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1845   if (!Op.getValueType().isInteger())
1846     return None;
1847 
1848   Optional<unsigned> SeqStepDenom;
1849   Optional<int64_t> SeqStepNum, SeqAddend;
1850   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1851   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1852   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1853     // Assume undef elements match the sequence; we just have to be careful
1854     // when interpolating across them.
1855     if (Op.getOperand(Idx).isUndef())
1856       continue;
1857     // The BUILD_VECTOR must be all constants.
1858     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1859       return None;
1860 
1861     uint64_t Val = Op.getConstantOperandVal(Idx) &
1862                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1863 
1864     if (PrevElt) {
1865       // Calculate the step since the last non-undef element, and ensure
1866       // it's consistent across the entire sequence.
1867       unsigned IdxDiff = Idx - PrevElt->second;
1868       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1869 
1870       // A zero-value value difference means that we're somewhere in the middle
1871       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1872       // step change before evaluating the sequence.
1873       if (ValDiff == 0)
1874         continue;
1875 
1876       int64_t Remainder = ValDiff % IdxDiff;
1877       // Normalize the step if it's greater than 1.
1878       if (Remainder != ValDiff) {
1879         // The difference must cleanly divide the element span.
1880         if (Remainder != 0)
1881           return None;
1882         ValDiff /= IdxDiff;
1883         IdxDiff = 1;
1884       }
1885 
1886       if (!SeqStepNum)
1887         SeqStepNum = ValDiff;
1888       else if (ValDiff != SeqStepNum)
1889         return None;
1890 
1891       if (!SeqStepDenom)
1892         SeqStepDenom = IdxDiff;
1893       else if (IdxDiff != *SeqStepDenom)
1894         return None;
1895     }
1896 
1897     // Record this non-undef element for later.
1898     if (!PrevElt || PrevElt->first != Val)
1899       PrevElt = std::make_pair(Val, Idx);
1900   }
1901 
1902   // We need to have logged a step for this to count as a legal index sequence.
1903   if (!SeqStepNum || !SeqStepDenom)
1904     return None;
1905 
1906   // Loop back through the sequence and validate elements we might have skipped
1907   // while waiting for a valid step. While doing this, log any sequence addend.
1908   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1909     if (Op.getOperand(Idx).isUndef())
1910       continue;
1911     uint64_t Val = Op.getConstantOperandVal(Idx) &
1912                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1913     uint64_t ExpectedVal =
1914         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1915     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1916     if (!SeqAddend)
1917       SeqAddend = Addend;
1918     else if (Addend != SeqAddend)
1919       return None;
1920   }
1921 
1922   assert(SeqAddend && "Must have an addend if we have a step");
1923 
1924   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1925 }
1926 
1927 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1928 // and lower it as a VRGATHER_VX_VL from the source vector.
1929 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1930                                   SelectionDAG &DAG,
1931                                   const RISCVSubtarget &Subtarget) {
1932   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1933     return SDValue();
1934   SDValue Vec = SplatVal.getOperand(0);
1935   // Only perform this optimization on vectors of the same size for simplicity.
1936   if (Vec.getValueType() != VT)
1937     return SDValue();
1938   SDValue Idx = SplatVal.getOperand(1);
1939   // The index must be a legal type.
1940   if (Idx.getValueType() != Subtarget.getXLenVT())
1941     return SDValue();
1942 
1943   MVT ContainerVT = VT;
1944   if (VT.isFixedLengthVector()) {
1945     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1946     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
1947   }
1948 
1949   SDValue Mask, VL;
1950   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1951 
1952   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
1953                                Idx, Mask, VL);
1954 
1955   if (!VT.isFixedLengthVector())
1956     return Gather;
1957 
1958   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1959 }
1960 
1961 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1962                                  const RISCVSubtarget &Subtarget) {
1963   MVT VT = Op.getSimpleValueType();
1964   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1965 
1966   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1967 
1968   SDLoc DL(Op);
1969   SDValue Mask, VL;
1970   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1971 
1972   MVT XLenVT = Subtarget.getXLenVT();
1973   unsigned NumElts = Op.getNumOperands();
1974 
1975   if (VT.getVectorElementType() == MVT::i1) {
1976     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1977       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1978       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1979     }
1980 
1981     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1982       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1983       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1984     }
1985 
1986     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1987     // scalar integer chunks whose bit-width depends on the number of mask
1988     // bits and XLEN.
1989     // First, determine the most appropriate scalar integer type to use. This
1990     // is at most XLenVT, but may be shrunk to a smaller vector element type
1991     // according to the size of the final vector - use i8 chunks rather than
1992     // XLenVT if we're producing a v8i1. This results in more consistent
1993     // codegen across RV32 and RV64.
1994     unsigned NumViaIntegerBits =
1995         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1996     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
1997     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1998       // If we have to use more than one INSERT_VECTOR_ELT then this
1999       // optimization is likely to increase code size; avoid peforming it in
2000       // such a case. We can use a load from a constant pool in this case.
2001       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2002         return SDValue();
2003       // Now we can create our integer vector type. Note that it may be larger
2004       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2005       MVT IntegerViaVecVT =
2006           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2007                            divideCeil(NumElts, NumViaIntegerBits));
2008 
2009       uint64_t Bits = 0;
2010       unsigned BitPos = 0, IntegerEltIdx = 0;
2011       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2012 
2013       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2014         // Once we accumulate enough bits to fill our scalar type, insert into
2015         // our vector and clear our accumulated data.
2016         if (I != 0 && I % NumViaIntegerBits == 0) {
2017           if (NumViaIntegerBits <= 32)
2018             Bits = SignExtend64<32>(Bits);
2019           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2020           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2021                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2022           Bits = 0;
2023           BitPos = 0;
2024           IntegerEltIdx++;
2025         }
2026         SDValue V = Op.getOperand(I);
2027         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2028         Bits |= ((uint64_t)BitValue << BitPos);
2029       }
2030 
2031       // Insert the (remaining) scalar value into position in our integer
2032       // vector type.
2033       if (NumViaIntegerBits <= 32)
2034         Bits = SignExtend64<32>(Bits);
2035       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2036       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2037                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2038 
2039       if (NumElts < NumViaIntegerBits) {
2040         // If we're producing a smaller vector than our minimum legal integer
2041         // type, bitcast to the equivalent (known-legal) mask type, and extract
2042         // our final mask.
2043         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2044         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2045         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2046                           DAG.getConstant(0, DL, XLenVT));
2047       } else {
2048         // Else we must have produced an integer type with the same size as the
2049         // mask type; bitcast for the final result.
2050         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2051         Vec = DAG.getBitcast(VT, Vec);
2052       }
2053 
2054       return Vec;
2055     }
2056 
2057     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2058     // vector type, we have a legal equivalently-sized i8 type, so we can use
2059     // that.
2060     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2061     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2062 
2063     SDValue WideVec;
2064     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2065       // For a splat, perform a scalar truncate before creating the wider
2066       // vector.
2067       assert(Splat.getValueType() == XLenVT &&
2068              "Unexpected type for i1 splat value");
2069       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2070                           DAG.getConstant(1, DL, XLenVT));
2071       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2072     } else {
2073       SmallVector<SDValue, 8> Ops(Op->op_values());
2074       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2075       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2076       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2077     }
2078 
2079     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2080   }
2081 
2082   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2083     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2084       return Gather;
2085     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2086                                         : RISCVISD::VMV_V_X_VL;
2087     Splat =
2088         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2089     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2090   }
2091 
2092   // Try and match index sequences, which we can lower to the vid instruction
2093   // with optional modifications. An all-undef vector is matched by
2094   // getSplatValue, above.
2095   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2096     int64_t StepNumerator = SimpleVID->StepNumerator;
2097     unsigned StepDenominator = SimpleVID->StepDenominator;
2098     int64_t Addend = SimpleVID->Addend;
2099 
2100     assert(StepNumerator != 0 && "Invalid step");
2101     bool Negate = false;
2102     int64_t SplatStepVal = StepNumerator;
2103     unsigned StepOpcode = ISD::MUL;
2104     if (StepNumerator != 1) {
2105       if (isPowerOf2_64(std::abs(StepNumerator))) {
2106         Negate = StepNumerator < 0;
2107         StepOpcode = ISD::SHL;
2108         SplatStepVal = Log2_64(std::abs(StepNumerator));
2109       }
2110     }
2111 
2112     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2113     // threshold since it's the immediate value many RVV instructions accept.
2114     // There is no vmul.vi instruction so ensure multiply constant can fit in
2115     // a single addi instruction.
2116     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2117          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2118         isPowerOf2_32(StepDenominator) &&
2119         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
2120       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2121       // Convert right out of the scalable type so we can use standard ISD
2122       // nodes for the rest of the computation. If we used scalable types with
2123       // these, we'd lose the fixed-length vector info and generate worse
2124       // vsetvli code.
2125       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2126       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2127           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2128         SDValue SplatStep = DAG.getSplatBuildVector(
2129             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2130         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2131       }
2132       if (StepDenominator != 1) {
2133         SDValue SplatStep = DAG.getSplatBuildVector(
2134             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2135         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2136       }
2137       if (Addend != 0 || Negate) {
2138         SDValue SplatAddend = DAG.getSplatBuildVector(
2139             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2140         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2141       }
2142       return VID;
2143     }
2144   }
2145 
2146   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2147   // when re-interpreted as a vector with a larger element type. For example,
2148   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2149   // could be instead splat as
2150   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2151   // TODO: This optimization could also work on non-constant splats, but it
2152   // would require bit-manipulation instructions to construct the splat value.
2153   SmallVector<SDValue> Sequence;
2154   unsigned EltBitSize = VT.getScalarSizeInBits();
2155   const auto *BV = cast<BuildVectorSDNode>(Op);
2156   if (VT.isInteger() && EltBitSize < 64 &&
2157       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2158       BV->getRepeatedSequence(Sequence) &&
2159       (Sequence.size() * EltBitSize) <= 64) {
2160     unsigned SeqLen = Sequence.size();
2161     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2162     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2163     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2164             ViaIntVT == MVT::i64) &&
2165            "Unexpected sequence type");
2166 
2167     unsigned EltIdx = 0;
2168     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2169     uint64_t SplatValue = 0;
2170     // Construct the amalgamated value which can be splatted as this larger
2171     // vector type.
2172     for (const auto &SeqV : Sequence) {
2173       if (!SeqV.isUndef())
2174         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2175                        << (EltIdx * EltBitSize));
2176       EltIdx++;
2177     }
2178 
2179     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2180     // achieve better constant materializion.
2181     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2182       SplatValue = SignExtend64<32>(SplatValue);
2183 
2184     // Since we can't introduce illegal i64 types at this stage, we can only
2185     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2186     // way we can use RVV instructions to splat.
2187     assert((ViaIntVT.bitsLE(XLenVT) ||
2188             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2189            "Unexpected bitcast sequence");
2190     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2191       SDValue ViaVL =
2192           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2193       MVT ViaContainerVT =
2194           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2195       SDValue Splat =
2196           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2197                       DAG.getUNDEF(ViaContainerVT),
2198                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2199       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2200       return DAG.getBitcast(VT, Splat);
2201     }
2202   }
2203 
2204   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2205   // which constitute a large proportion of the elements. In such cases we can
2206   // splat a vector with the dominant element and make up the shortfall with
2207   // INSERT_VECTOR_ELTs.
2208   // Note that this includes vectors of 2 elements by association. The
2209   // upper-most element is the "dominant" one, allowing us to use a splat to
2210   // "insert" the upper element, and an insert of the lower element at position
2211   // 0, which improves codegen.
2212   SDValue DominantValue;
2213   unsigned MostCommonCount = 0;
2214   DenseMap<SDValue, unsigned> ValueCounts;
2215   unsigned NumUndefElts =
2216       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2217 
2218   // Track the number of scalar loads we know we'd be inserting, estimated as
2219   // any non-zero floating-point constant. Other kinds of element are either
2220   // already in registers or are materialized on demand. The threshold at which
2221   // a vector load is more desirable than several scalar materializion and
2222   // vector-insertion instructions is not known.
2223   unsigned NumScalarLoads = 0;
2224 
2225   for (SDValue V : Op->op_values()) {
2226     if (V.isUndef())
2227       continue;
2228 
2229     ValueCounts.insert(std::make_pair(V, 0));
2230     unsigned &Count = ValueCounts[V];
2231 
2232     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2233       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2234 
2235     // Is this value dominant? In case of a tie, prefer the highest element as
2236     // it's cheaper to insert near the beginning of a vector than it is at the
2237     // end.
2238     if (++Count >= MostCommonCount) {
2239       DominantValue = V;
2240       MostCommonCount = Count;
2241     }
2242   }
2243 
2244   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2245   unsigned NumDefElts = NumElts - NumUndefElts;
2246   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2247 
2248   // Don't perform this optimization when optimizing for size, since
2249   // materializing elements and inserting them tends to cause code bloat.
2250   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2251       ((MostCommonCount > DominantValueCountThreshold) ||
2252        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2253     // Start by splatting the most common element.
2254     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2255 
2256     DenseSet<SDValue> Processed{DominantValue};
2257     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2258     for (const auto &OpIdx : enumerate(Op->ops())) {
2259       const SDValue &V = OpIdx.value();
2260       if (V.isUndef() || !Processed.insert(V).second)
2261         continue;
2262       if (ValueCounts[V] == 1) {
2263         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2264                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2265       } else {
2266         // Blend in all instances of this value using a VSELECT, using a
2267         // mask where each bit signals whether that element is the one
2268         // we're after.
2269         SmallVector<SDValue> Ops;
2270         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2271           return DAG.getConstant(V == V1, DL, XLenVT);
2272         });
2273         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2274                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2275                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2276       }
2277     }
2278 
2279     return Vec;
2280   }
2281 
2282   return SDValue();
2283 }
2284 
2285 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2286                                    SDValue Lo, SDValue Hi, SDValue VL,
2287                                    SelectionDAG &DAG) {
2288   if (!Passthru)
2289     Passthru = DAG.getUNDEF(VT);
2290   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2291     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2292     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2293     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2294     // node in order to try and match RVV vector/scalar instructions.
2295     if ((LoC >> 31) == HiC)
2296       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2297 
2298     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2299     // vmv.v.x whose EEW = 32 to lower it.
2300     auto *Const = dyn_cast<ConstantSDNode>(VL);
2301     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2302       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2303       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2304       // access the subtarget here now.
2305       auto InterVec = DAG.getNode(
2306           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2307                                   DAG.getRegister(RISCV::X0, MVT::i32));
2308       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2309     }
2310   }
2311 
2312   // Fall back to a stack store and stride x0 vector load.
2313   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2314                      Hi, VL);
2315 }
2316 
2317 // Called by type legalization to handle splat of i64 on RV32.
2318 // FIXME: We can optimize this when the type has sign or zero bits in one
2319 // of the halves.
2320 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2321                                    SDValue Scalar, SDValue VL,
2322                                    SelectionDAG &DAG) {
2323   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2324   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2325                            DAG.getConstant(0, DL, MVT::i32));
2326   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2327                            DAG.getConstant(1, DL, MVT::i32));
2328   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2329 }
2330 
2331 // This function lowers a splat of a scalar operand Splat with the vector
2332 // length VL. It ensures the final sequence is type legal, which is useful when
2333 // lowering a splat after type legalization.
2334 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2335                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2336                                 const RISCVSubtarget &Subtarget) {
2337   bool HasPassthru = Passthru && !Passthru.isUndef();
2338   if (!HasPassthru && !Passthru)
2339     Passthru = DAG.getUNDEF(VT);
2340   if (VT.isFloatingPoint()) {
2341     // If VL is 1, we could use vfmv.s.f.
2342     if (isOneConstant(VL))
2343       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2344     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2345   }
2346 
2347   MVT XLenVT = Subtarget.getXLenVT();
2348 
2349   // Simplest case is that the operand needs to be promoted to XLenVT.
2350   if (Scalar.getValueType().bitsLE(XLenVT)) {
2351     // If the operand is a constant, sign extend to increase our chances
2352     // of being able to use a .vi instruction. ANY_EXTEND would become a
2353     // a zero extend and the simm5 check in isel would fail.
2354     // FIXME: Should we ignore the upper bits in isel instead?
2355     unsigned ExtOpc =
2356         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2357     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2358     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2359     // If VL is 1 and the scalar value won't benefit from immediate, we could
2360     // use vmv.s.x.
2361     if (isOneConstant(VL) &&
2362         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2363       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2364     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2365   }
2366 
2367   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2368          "Unexpected scalar for splat lowering!");
2369 
2370   if (isOneConstant(VL) && isNullConstant(Scalar))
2371     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2372                        DAG.getConstant(0, DL, XLenVT), VL);
2373 
2374   // Otherwise use the more complicated splatting algorithm.
2375   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2376 }
2377 
2378 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2379                                 const RISCVSubtarget &Subtarget) {
2380   // We need to be able to widen elements to the next larger integer type.
2381   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2382     return false;
2383 
2384   int Size = Mask.size();
2385   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2386 
2387   int Srcs[] = {-1, -1};
2388   for (int i = 0; i != Size; ++i) {
2389     // Ignore undef elements.
2390     if (Mask[i] < 0)
2391       continue;
2392 
2393     // Is this an even or odd element.
2394     int Pol = i % 2;
2395 
2396     // Ensure we consistently use the same source for this element polarity.
2397     int Src = Mask[i] / Size;
2398     if (Srcs[Pol] < 0)
2399       Srcs[Pol] = Src;
2400     if (Srcs[Pol] != Src)
2401       return false;
2402 
2403     // Make sure the element within the source is appropriate for this element
2404     // in the destination.
2405     int Elt = Mask[i] % Size;
2406     if (Elt != i / 2)
2407       return false;
2408   }
2409 
2410   // We need to find a source for each polarity and they can't be the same.
2411   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2412     return false;
2413 
2414   // Swap the sources if the second source was in the even polarity.
2415   SwapSources = Srcs[0] > Srcs[1];
2416 
2417   return true;
2418 }
2419 
2420 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2421 /// and then extract the original number of elements from the rotated result.
2422 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2423 /// returned rotation amount is for a rotate right, where elements move from
2424 /// higher elements to lower elements. \p LoSrc indicates the first source
2425 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2426 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2427 /// 0 or 1 if a rotation is found.
2428 ///
2429 /// NOTE: We talk about rotate to the right which matches how bit shift and
2430 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2431 /// and the table below write vectors with the lowest elements on the left.
2432 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2433   int Size = Mask.size();
2434 
2435   // We need to detect various ways of spelling a rotation:
2436   //   [11, 12, 13, 14, 15,  0,  1,  2]
2437   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2438   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2439   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2440   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2441   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2442   int Rotation = 0;
2443   LoSrc = -1;
2444   HiSrc = -1;
2445   for (int i = 0; i != Size; ++i) {
2446     int M = Mask[i];
2447     if (M < 0)
2448       continue;
2449 
2450     // Determine where a rotate vector would have started.
2451     int StartIdx = i - (M % Size);
2452     // The identity rotation isn't interesting, stop.
2453     if (StartIdx == 0)
2454       return -1;
2455 
2456     // If we found the tail of a vector the rotation must be the missing
2457     // front. If we found the head of a vector, it must be how much of the
2458     // head.
2459     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2460 
2461     if (Rotation == 0)
2462       Rotation = CandidateRotation;
2463     else if (Rotation != CandidateRotation)
2464       // The rotations don't match, so we can't match this mask.
2465       return -1;
2466 
2467     // Compute which value this mask is pointing at.
2468     int MaskSrc = M < Size ? 0 : 1;
2469 
2470     // Compute which of the two target values this index should be assigned to.
2471     // This reflects whether the high elements are remaining or the low elemnts
2472     // are remaining.
2473     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2474 
2475     // Either set up this value if we've not encountered it before, or check
2476     // that it remains consistent.
2477     if (TargetSrc < 0)
2478       TargetSrc = MaskSrc;
2479     else if (TargetSrc != MaskSrc)
2480       // This may be a rotation, but it pulls from the inputs in some
2481       // unsupported interleaving.
2482       return -1;
2483   }
2484 
2485   // Check that we successfully analyzed the mask, and normalize the results.
2486   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2487   assert((LoSrc >= 0 || HiSrc >= 0) &&
2488          "Failed to find a rotated input vector!");
2489 
2490   return Rotation;
2491 }
2492 
2493 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2494                                    const RISCVSubtarget &Subtarget) {
2495   SDValue V1 = Op.getOperand(0);
2496   SDValue V2 = Op.getOperand(1);
2497   SDLoc DL(Op);
2498   MVT XLenVT = Subtarget.getXLenVT();
2499   MVT VT = Op.getSimpleValueType();
2500   unsigned NumElts = VT.getVectorNumElements();
2501   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2502 
2503   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2504 
2505   SDValue TrueMask, VL;
2506   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2507 
2508   if (SVN->isSplat()) {
2509     const int Lane = SVN->getSplatIndex();
2510     if (Lane >= 0) {
2511       MVT SVT = VT.getVectorElementType();
2512 
2513       // Turn splatted vector load into a strided load with an X0 stride.
2514       SDValue V = V1;
2515       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2516       // with undef.
2517       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2518       int Offset = Lane;
2519       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2520         int OpElements =
2521             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2522         V = V.getOperand(Offset / OpElements);
2523         Offset %= OpElements;
2524       }
2525 
2526       // We need to ensure the load isn't atomic or volatile.
2527       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2528         auto *Ld = cast<LoadSDNode>(V);
2529         Offset *= SVT.getStoreSize();
2530         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2531                                                    TypeSize::Fixed(Offset), DL);
2532 
2533         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2534         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2535           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2536           SDValue IntID =
2537               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2538           SDValue Ops[] = {Ld->getChain(),
2539                            IntID,
2540                            DAG.getUNDEF(ContainerVT),
2541                            NewAddr,
2542                            DAG.getRegister(RISCV::X0, XLenVT),
2543                            VL};
2544           SDValue NewLoad = DAG.getMemIntrinsicNode(
2545               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2546               DAG.getMachineFunction().getMachineMemOperand(
2547                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2548           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2549           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2550         }
2551 
2552         // Otherwise use a scalar load and splat. This will give the best
2553         // opportunity to fold a splat into the operation. ISel can turn it into
2554         // the x0 strided load if we aren't able to fold away the select.
2555         if (SVT.isFloatingPoint())
2556           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2557                           Ld->getPointerInfo().getWithOffset(Offset),
2558                           Ld->getOriginalAlign(),
2559                           Ld->getMemOperand()->getFlags());
2560         else
2561           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2562                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2563                              Ld->getOriginalAlign(),
2564                              Ld->getMemOperand()->getFlags());
2565         DAG.makeEquivalentMemoryOrdering(Ld, V);
2566 
2567         unsigned Opc =
2568             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2569         SDValue Splat =
2570             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2571         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2572       }
2573 
2574       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2575       assert(Lane < (int)NumElts && "Unexpected lane!");
2576       SDValue Gather =
2577           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2578                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2579       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2580     }
2581   }
2582 
2583   ArrayRef<int> Mask = SVN->getMask();
2584 
2585   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2586   // be undef which can be handled with a single SLIDEDOWN/UP.
2587   int LoSrc, HiSrc;
2588   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2589   if (Rotation > 0) {
2590     SDValue LoV, HiV;
2591     if (LoSrc >= 0) {
2592       LoV = LoSrc == 0 ? V1 : V2;
2593       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2594     }
2595     if (HiSrc >= 0) {
2596       HiV = HiSrc == 0 ? V1 : V2;
2597       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2598     }
2599 
2600     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2601     // to slide LoV up by (NumElts - Rotation).
2602     unsigned InvRotate = NumElts - Rotation;
2603 
2604     SDValue Res = DAG.getUNDEF(ContainerVT);
2605     if (HiV) {
2606       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2607       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2608       // causes multiple vsetvlis in some test cases such as lowering
2609       // reduce.mul
2610       SDValue DownVL = VL;
2611       if (LoV)
2612         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2613       Res =
2614           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2615                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2616     }
2617     if (LoV)
2618       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2619                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2620 
2621     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2622   }
2623 
2624   // Detect an interleave shuffle and lower to
2625   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2626   bool SwapSources;
2627   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2628     // Swap sources if needed.
2629     if (SwapSources)
2630       std::swap(V1, V2);
2631 
2632     // Extract the lower half of the vectors.
2633     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2634     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2635                      DAG.getConstant(0, DL, XLenVT));
2636     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2637                      DAG.getConstant(0, DL, XLenVT));
2638 
2639     // Double the element width and halve the number of elements in an int type.
2640     unsigned EltBits = VT.getScalarSizeInBits();
2641     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2642     MVT WideIntVT =
2643         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2644     // Convert this to a scalable vector. We need to base this on the
2645     // destination size to ensure there's always a type with a smaller LMUL.
2646     MVT WideIntContainerVT =
2647         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2648 
2649     // Convert sources to scalable vectors with the same element count as the
2650     // larger type.
2651     MVT HalfContainerVT = MVT::getVectorVT(
2652         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2653     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2654     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2655 
2656     // Cast sources to integer.
2657     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2658     MVT IntHalfVT =
2659         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2660     V1 = DAG.getBitcast(IntHalfVT, V1);
2661     V2 = DAG.getBitcast(IntHalfVT, V2);
2662 
2663     // Freeze V2 since we use it twice and we need to be sure that the add and
2664     // multiply see the same value.
2665     V2 = DAG.getFreeze(V2);
2666 
2667     // Recreate TrueMask using the widened type's element count.
2668     TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
2669 
2670     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2671     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2672                               V2, TrueMask, VL);
2673     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2674     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2675                                      DAG.getUNDEF(IntHalfVT),
2676                                      DAG.getAllOnesConstant(DL, XLenVT));
2677     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2678                                    V2, Multiplier, TrueMask, VL);
2679     // Add the new copies to our previous addition giving us 2^eltbits copies of
2680     // V2. This is equivalent to shifting V2 left by eltbits. This should
2681     // combine with the vwmulu.vv above to form vwmaccu.vv.
2682     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2683                       TrueMask, VL);
2684     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2685     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2686     // vector VT.
2687     ContainerVT =
2688         MVT::getVectorVT(VT.getVectorElementType(),
2689                          WideIntContainerVT.getVectorElementCount() * 2);
2690     Add = DAG.getBitcast(ContainerVT, Add);
2691     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2692   }
2693 
2694   // Detect shuffles which can be re-expressed as vector selects; these are
2695   // shuffles in which each element in the destination is taken from an element
2696   // at the corresponding index in either source vectors.
2697   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2698     int MaskIndex = MaskIdx.value();
2699     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2700   });
2701 
2702   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2703 
2704   SmallVector<SDValue> MaskVals;
2705   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2706   // merged with a second vrgather.
2707   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2708 
2709   // By default we preserve the original operand order, and use a mask to
2710   // select LHS as true and RHS as false. However, since RVV vector selects may
2711   // feature splats but only on the LHS, we may choose to invert our mask and
2712   // instead select between RHS and LHS.
2713   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2714   bool InvertMask = IsSelect == SwapOps;
2715 
2716   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2717   // half.
2718   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2719 
2720   // Now construct the mask that will be used by the vselect or blended
2721   // vrgather operation. For vrgathers, construct the appropriate indices into
2722   // each vector.
2723   for (int MaskIndex : Mask) {
2724     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2725     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2726     if (!IsSelect) {
2727       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2728       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2729                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2730                                      : DAG.getUNDEF(XLenVT));
2731       GatherIndicesRHS.push_back(
2732           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2733                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2734       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2735         ++LHSIndexCounts[MaskIndex];
2736       if (!IsLHSOrUndefIndex)
2737         ++RHSIndexCounts[MaskIndex - NumElts];
2738     }
2739   }
2740 
2741   if (SwapOps) {
2742     std::swap(V1, V2);
2743     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2744   }
2745 
2746   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2747   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2748   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2749 
2750   if (IsSelect)
2751     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2752 
2753   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2754     // On such a large vector we're unable to use i8 as the index type.
2755     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2756     // may involve vector splitting if we're already at LMUL=8, or our
2757     // user-supplied maximum fixed-length LMUL.
2758     return SDValue();
2759   }
2760 
2761   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2762   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2763   MVT IndexVT = VT.changeTypeToInteger();
2764   // Since we can't introduce illegal index types at this stage, use i16 and
2765   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2766   // than XLenVT.
2767   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2768     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2769     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2770   }
2771 
2772   MVT IndexContainerVT =
2773       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2774 
2775   SDValue Gather;
2776   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2777   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2778   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2779     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2780                               Subtarget);
2781   } else {
2782     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2783     // If only one index is used, we can use a "splat" vrgather.
2784     // TODO: We can splat the most-common index and fix-up any stragglers, if
2785     // that's beneficial.
2786     if (LHSIndexCounts.size() == 1) {
2787       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2788       Gather =
2789           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2790                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2791     } else {
2792       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2793       LHSIndices =
2794           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2795 
2796       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2797                            TrueMask, VL);
2798     }
2799   }
2800 
2801   // If a second vector operand is used by this shuffle, blend it in with an
2802   // additional vrgather.
2803   if (!V2.isUndef()) {
2804     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2805     // If only one index is used, we can use a "splat" vrgather.
2806     // TODO: We can splat the most-common index and fix-up any stragglers, if
2807     // that's beneficial.
2808     if (RHSIndexCounts.size() == 1) {
2809       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2810       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2811                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2812     } else {
2813       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2814       RHSIndices =
2815           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2816       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2817                        VL);
2818     }
2819 
2820     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2821     SelectMask =
2822         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2823 
2824     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2825                          Gather, VL);
2826   }
2827 
2828   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2829 }
2830 
2831 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2832   // Support splats for any type. These should type legalize well.
2833   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2834     return true;
2835 
2836   // Only support legal VTs for other shuffles for now.
2837   if (!isTypeLegal(VT))
2838     return false;
2839 
2840   MVT SVT = VT.getSimpleVT();
2841 
2842   bool SwapSources;
2843   int LoSrc, HiSrc;
2844   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2845          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2846 }
2847 
2848 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2849 // the exponent.
2850 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2851   MVT VT = Op.getSimpleValueType();
2852   unsigned EltSize = VT.getScalarSizeInBits();
2853   SDValue Src = Op.getOperand(0);
2854   SDLoc DL(Op);
2855 
2856   // We need a FP type that can represent the value.
2857   // TODO: Use f16 for i8 when possible?
2858   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2859   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2860 
2861   // Legal types should have been checked in the RISCVTargetLowering
2862   // constructor.
2863   // TODO: Splitting may make sense in some cases.
2864   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2865          "Expected legal float type!");
2866 
2867   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2868   // The trailing zero count is equal to log2 of this single bit value.
2869   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2870     SDValue Neg =
2871         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2872     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2873   }
2874 
2875   // We have a legal FP type, convert to it.
2876   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2877   // Bitcast to integer and shift the exponent to the LSB.
2878   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2879   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2880   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2881   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2882                               DAG.getConstant(ShiftAmt, DL, IntVT));
2883   // Truncate back to original type to allow vnsrl.
2884   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2885   // The exponent contains log2 of the value in biased form.
2886   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2887 
2888   // For trailing zeros, we just need to subtract the bias.
2889   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2890     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2891                        DAG.getConstant(ExponentBias, DL, VT));
2892 
2893   // For leading zeros, we need to remove the bias and convert from log2 to
2894   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2895   unsigned Adjust = ExponentBias + (EltSize - 1);
2896   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2897 }
2898 
2899 // While RVV has alignment restrictions, we should always be able to load as a
2900 // legal equivalently-sized byte-typed vector instead. This method is
2901 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2902 // the load is already correctly-aligned, it returns SDValue().
2903 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2904                                                     SelectionDAG &DAG) const {
2905   auto *Load = cast<LoadSDNode>(Op);
2906   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2907 
2908   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2909                                      Load->getMemoryVT(),
2910                                      *Load->getMemOperand()))
2911     return SDValue();
2912 
2913   SDLoc DL(Op);
2914   MVT VT = Op.getSimpleValueType();
2915   unsigned EltSizeBits = VT.getScalarSizeInBits();
2916   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2917          "Unexpected unaligned RVV load type");
2918   MVT NewVT =
2919       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2920   assert(NewVT.isValid() &&
2921          "Expecting equally-sized RVV vector types to be legal");
2922   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2923                           Load->getPointerInfo(), Load->getOriginalAlign(),
2924                           Load->getMemOperand()->getFlags());
2925   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2926 }
2927 
2928 // While RVV has alignment restrictions, we should always be able to store as a
2929 // legal equivalently-sized byte-typed vector instead. This method is
2930 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2931 // returns SDValue() if the store is already correctly aligned.
2932 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2933                                                      SelectionDAG &DAG) const {
2934   auto *Store = cast<StoreSDNode>(Op);
2935   assert(Store && Store->getValue().getValueType().isVector() &&
2936          "Expected vector store");
2937 
2938   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2939                                      Store->getMemoryVT(),
2940                                      *Store->getMemOperand()))
2941     return SDValue();
2942 
2943   SDLoc DL(Op);
2944   SDValue StoredVal = Store->getValue();
2945   MVT VT = StoredVal.getSimpleValueType();
2946   unsigned EltSizeBits = VT.getScalarSizeInBits();
2947   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2948          "Unexpected unaligned RVV store type");
2949   MVT NewVT =
2950       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2951   assert(NewVT.isValid() &&
2952          "Expecting equally-sized RVV vector types to be legal");
2953   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2954   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2955                       Store->getPointerInfo(), Store->getOriginalAlign(),
2956                       Store->getMemOperand()->getFlags());
2957 }
2958 
2959 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2960                                             SelectionDAG &DAG) const {
2961   switch (Op.getOpcode()) {
2962   default:
2963     report_fatal_error("unimplemented operand");
2964   case ISD::GlobalAddress:
2965     return lowerGlobalAddress(Op, DAG);
2966   case ISD::BlockAddress:
2967     return lowerBlockAddress(Op, DAG);
2968   case ISD::ConstantPool:
2969     return lowerConstantPool(Op, DAG);
2970   case ISD::JumpTable:
2971     return lowerJumpTable(Op, DAG);
2972   case ISD::GlobalTLSAddress:
2973     return lowerGlobalTLSAddress(Op, DAG);
2974   case ISD::SELECT:
2975     return lowerSELECT(Op, DAG);
2976   case ISD::BRCOND:
2977     return lowerBRCOND(Op, DAG);
2978   case ISD::VASTART:
2979     return lowerVASTART(Op, DAG);
2980   case ISD::FRAMEADDR:
2981     return lowerFRAMEADDR(Op, DAG);
2982   case ISD::RETURNADDR:
2983     return lowerRETURNADDR(Op, DAG);
2984   case ISD::SHL_PARTS:
2985     return lowerShiftLeftParts(Op, DAG);
2986   case ISD::SRA_PARTS:
2987     return lowerShiftRightParts(Op, DAG, true);
2988   case ISD::SRL_PARTS:
2989     return lowerShiftRightParts(Op, DAG, false);
2990   case ISD::BITCAST: {
2991     SDLoc DL(Op);
2992     EVT VT = Op.getValueType();
2993     SDValue Op0 = Op.getOperand(0);
2994     EVT Op0VT = Op0.getValueType();
2995     MVT XLenVT = Subtarget.getXLenVT();
2996     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2997       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2998       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2999       return FPConv;
3000     }
3001     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3002         Subtarget.hasStdExtF()) {
3003       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3004       SDValue FPConv =
3005           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3006       return FPConv;
3007     }
3008 
3009     // Consider other scalar<->scalar casts as legal if the types are legal.
3010     // Otherwise expand them.
3011     if (!VT.isVector() && !Op0VT.isVector()) {
3012       if (isTypeLegal(VT) && isTypeLegal(Op0VT))
3013         return Op;
3014       return SDValue();
3015     }
3016 
3017     assert(!VT.isScalableVector() && !Op0VT.isScalableVector() &&
3018            "Unexpected types");
3019 
3020     if (VT.isFixedLengthVector()) {
3021       // We can handle fixed length vector bitcasts with a simple replacement
3022       // in isel.
3023       if (Op0VT.isFixedLengthVector())
3024         return Op;
3025       // When bitcasting from scalar to fixed-length vector, insert the scalar
3026       // into a one-element vector of the result type, and perform a vector
3027       // bitcast.
3028       if (!Op0VT.isVector()) {
3029         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3030         if (!isTypeLegal(BVT))
3031           return SDValue();
3032         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3033                                               DAG.getUNDEF(BVT), Op0,
3034                                               DAG.getConstant(0, DL, XLenVT)));
3035       }
3036       return SDValue();
3037     }
3038     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3039     // thus: bitcast the vector to a one-element vector type whose element type
3040     // is the same as the result type, and extract the first element.
3041     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3042       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3043       if (!isTypeLegal(BVT))
3044         return SDValue();
3045       SDValue BVec = DAG.getBitcast(BVT, Op0);
3046       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3047                          DAG.getConstant(0, DL, XLenVT));
3048     }
3049     return SDValue();
3050   }
3051   case ISD::INTRINSIC_WO_CHAIN:
3052     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3053   case ISD::INTRINSIC_W_CHAIN:
3054     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3055   case ISD::INTRINSIC_VOID:
3056     return LowerINTRINSIC_VOID(Op, DAG);
3057   case ISD::BSWAP:
3058   case ISD::BITREVERSE: {
3059     MVT VT = Op.getSimpleValueType();
3060     SDLoc DL(Op);
3061     if (Subtarget.hasStdExtZbp()) {
3062       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3063       // Start with the maximum immediate value which is the bitwidth - 1.
3064       unsigned Imm = VT.getSizeInBits() - 1;
3065       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3066       if (Op.getOpcode() == ISD::BSWAP)
3067         Imm &= ~0x7U;
3068       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3069                          DAG.getConstant(Imm, DL, VT));
3070     }
3071     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3072     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3073     // Expand bitreverse to a bswap(rev8) followed by brev8.
3074     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3075     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3076     // as brev8 by an isel pattern.
3077     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3078                        DAG.getConstant(7, DL, VT));
3079   }
3080   case ISD::FSHL:
3081   case ISD::FSHR: {
3082     MVT VT = Op.getSimpleValueType();
3083     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3084     SDLoc DL(Op);
3085     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3086     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3087     // accidentally setting the extra bit.
3088     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3089     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3090                                 DAG.getConstant(ShAmtWidth, DL, VT));
3091     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3092     // instruction use different orders. fshl will return its first operand for
3093     // shift of zero, fshr will return its second operand. fsl and fsr both
3094     // return rs1 so the ISD nodes need to have different operand orders.
3095     // Shift amount is in rs2.
3096     SDValue Op0 = Op.getOperand(0);
3097     SDValue Op1 = Op.getOperand(1);
3098     unsigned Opc = RISCVISD::FSL;
3099     if (Op.getOpcode() == ISD::FSHR) {
3100       std::swap(Op0, Op1);
3101       Opc = RISCVISD::FSR;
3102     }
3103     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3104   }
3105   case ISD::TRUNCATE:
3106     // Only custom-lower vector truncates
3107     if (!Op.getSimpleValueType().isVector())
3108       return Op;
3109     return lowerVectorTruncLike(Op, DAG);
3110   case ISD::ANY_EXTEND:
3111   case ISD::ZERO_EXTEND:
3112     if (Op.getOperand(0).getValueType().isVector() &&
3113         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3114       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3115     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3116   case ISD::SIGN_EXTEND:
3117     if (Op.getOperand(0).getValueType().isVector() &&
3118         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3119       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3120     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3121   case ISD::SPLAT_VECTOR_PARTS:
3122     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3123   case ISD::INSERT_VECTOR_ELT:
3124     return lowerINSERT_VECTOR_ELT(Op, DAG);
3125   case ISD::EXTRACT_VECTOR_ELT:
3126     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3127   case ISD::VSCALE: {
3128     MVT VT = Op.getSimpleValueType();
3129     SDLoc DL(Op);
3130     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3131     // We define our scalable vector types for lmul=1 to use a 64 bit known
3132     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3133     // vscale as VLENB / 8.
3134     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3135     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3136       report_fatal_error("Support for VLEN==32 is incomplete.");
3137     // We assume VLENB is a multiple of 8. We manually choose the best shift
3138     // here because SimplifyDemandedBits isn't always able to simplify it.
3139     uint64_t Val = Op.getConstantOperandVal(0);
3140     if (isPowerOf2_64(Val)) {
3141       uint64_t Log2 = Log2_64(Val);
3142       if (Log2 < 3)
3143         return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3144                            DAG.getConstant(3 - Log2, DL, VT));
3145       if (Log2 > 3)
3146         return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3147                            DAG.getConstant(Log2 - 3, DL, VT));
3148       return VLENB;
3149     }
3150     // If the multiplier is a multiple of 8, scale it down to avoid needing
3151     // to shift the VLENB value.
3152     if ((Val % 8) == 0)
3153       return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3154                          DAG.getConstant(Val / 8, DL, VT));
3155 
3156     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3157                                  DAG.getConstant(3, DL, VT));
3158     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3159   }
3160   case ISD::FPOWI: {
3161     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3162     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3163     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3164         Op.getOperand(1).getValueType() == MVT::i32) {
3165       SDLoc DL(Op);
3166       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3167       SDValue Powi =
3168           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3169       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3170                          DAG.getIntPtrConstant(0, DL));
3171     }
3172     return SDValue();
3173   }
3174   case ISD::FP_EXTEND:
3175   case ISD::FP_ROUND:
3176     if (!Op.getValueType().isVector())
3177       return Op;
3178     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3179   case ISD::FP_TO_SINT:
3180   case ISD::FP_TO_UINT:
3181   case ISD::SINT_TO_FP:
3182   case ISD::UINT_TO_FP: {
3183     // RVV can only do fp<->int conversions to types half/double the size as
3184     // the source. We custom-lower any conversions that do two hops into
3185     // sequences.
3186     MVT VT = Op.getSimpleValueType();
3187     if (!VT.isVector())
3188       return Op;
3189     SDLoc DL(Op);
3190     SDValue Src = Op.getOperand(0);
3191     MVT EltVT = VT.getVectorElementType();
3192     MVT SrcVT = Src.getSimpleValueType();
3193     MVT SrcEltVT = SrcVT.getVectorElementType();
3194     unsigned EltSize = EltVT.getSizeInBits();
3195     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3196     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3197            "Unexpected vector element types");
3198 
3199     bool IsInt2FP = SrcEltVT.isInteger();
3200     // Widening conversions
3201     if (EltSize > (2 * SrcEltSize)) {
3202       if (IsInt2FP) {
3203         // Do a regular integer sign/zero extension then convert to float.
3204         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3205                                       VT.getVectorElementCount());
3206         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3207                                  ? ISD::ZERO_EXTEND
3208                                  : ISD::SIGN_EXTEND;
3209         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3210         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3211       }
3212       // FP2Int
3213       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3214       // Do one doubling fp_extend then complete the operation by converting
3215       // to int.
3216       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3217       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3218       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3219     }
3220 
3221     // Narrowing conversions
3222     if (SrcEltSize > (2 * EltSize)) {
3223       if (IsInt2FP) {
3224         // One narrowing int_to_fp, then an fp_round.
3225         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3226         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3227         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3228         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3229       }
3230       // FP2Int
3231       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3232       // representable by the integer, the result is poison.
3233       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3234                                     VT.getVectorElementCount());
3235       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3236       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3237     }
3238 
3239     // Scalable vectors can exit here. Patterns will handle equally-sized
3240     // conversions halving/doubling ones.
3241     if (!VT.isFixedLengthVector())
3242       return Op;
3243 
3244     // For fixed-length vectors we lower to a custom "VL" node.
3245     unsigned RVVOpc = 0;
3246     switch (Op.getOpcode()) {
3247     default:
3248       llvm_unreachable("Impossible opcode");
3249     case ISD::FP_TO_SINT:
3250       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3251       break;
3252     case ISD::FP_TO_UINT:
3253       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3254       break;
3255     case ISD::SINT_TO_FP:
3256       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3257       break;
3258     case ISD::UINT_TO_FP:
3259       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3260       break;
3261     }
3262 
3263     MVT ContainerVT, SrcContainerVT;
3264     // Derive the reference container type from the larger vector type.
3265     if (SrcEltSize > EltSize) {
3266       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3267       ContainerVT =
3268           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3269     } else {
3270       ContainerVT = getContainerForFixedLengthVector(VT);
3271       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3272     }
3273 
3274     SDValue Mask, VL;
3275     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3276 
3277     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3278     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3279     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3280   }
3281   case ISD::FP_TO_SINT_SAT:
3282   case ISD::FP_TO_UINT_SAT:
3283     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3284   case ISD::FTRUNC:
3285   case ISD::FCEIL:
3286   case ISD::FFLOOR:
3287     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3288   case ISD::FROUND:
3289     return lowerFROUND(Op, DAG);
3290   case ISD::VECREDUCE_ADD:
3291   case ISD::VECREDUCE_UMAX:
3292   case ISD::VECREDUCE_SMAX:
3293   case ISD::VECREDUCE_UMIN:
3294   case ISD::VECREDUCE_SMIN:
3295     return lowerVECREDUCE(Op, DAG);
3296   case ISD::VECREDUCE_AND:
3297   case ISD::VECREDUCE_OR:
3298   case ISD::VECREDUCE_XOR:
3299     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3300       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3301     return lowerVECREDUCE(Op, DAG);
3302   case ISD::VECREDUCE_FADD:
3303   case ISD::VECREDUCE_SEQ_FADD:
3304   case ISD::VECREDUCE_FMIN:
3305   case ISD::VECREDUCE_FMAX:
3306     return lowerFPVECREDUCE(Op, DAG);
3307   case ISD::VP_REDUCE_ADD:
3308   case ISD::VP_REDUCE_UMAX:
3309   case ISD::VP_REDUCE_SMAX:
3310   case ISD::VP_REDUCE_UMIN:
3311   case ISD::VP_REDUCE_SMIN:
3312   case ISD::VP_REDUCE_FADD:
3313   case ISD::VP_REDUCE_SEQ_FADD:
3314   case ISD::VP_REDUCE_FMIN:
3315   case ISD::VP_REDUCE_FMAX:
3316     return lowerVPREDUCE(Op, DAG);
3317   case ISD::VP_REDUCE_AND:
3318   case ISD::VP_REDUCE_OR:
3319   case ISD::VP_REDUCE_XOR:
3320     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3321       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3322     return lowerVPREDUCE(Op, DAG);
3323   case ISD::INSERT_SUBVECTOR:
3324     return lowerINSERT_SUBVECTOR(Op, DAG);
3325   case ISD::EXTRACT_SUBVECTOR:
3326     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3327   case ISD::STEP_VECTOR:
3328     return lowerSTEP_VECTOR(Op, DAG);
3329   case ISD::VECTOR_REVERSE:
3330     return lowerVECTOR_REVERSE(Op, DAG);
3331   case ISD::VECTOR_SPLICE:
3332     return lowerVECTOR_SPLICE(Op, DAG);
3333   case ISD::BUILD_VECTOR:
3334     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3335   case ISD::SPLAT_VECTOR:
3336     if (Op.getValueType().getVectorElementType() == MVT::i1)
3337       return lowerVectorMaskSplat(Op, DAG);
3338     return SDValue();
3339   case ISD::VECTOR_SHUFFLE:
3340     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3341   case ISD::CONCAT_VECTORS: {
3342     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3343     // better than going through the stack, as the default expansion does.
3344     SDLoc DL(Op);
3345     MVT VT = Op.getSimpleValueType();
3346     unsigned NumOpElts =
3347         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3348     SDValue Vec = DAG.getUNDEF(VT);
3349     for (const auto &OpIdx : enumerate(Op->ops())) {
3350       SDValue SubVec = OpIdx.value();
3351       // Don't insert undef subvectors.
3352       if (SubVec.isUndef())
3353         continue;
3354       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3355                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3356     }
3357     return Vec;
3358   }
3359   case ISD::LOAD:
3360     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3361       return V;
3362     if (Op.getValueType().isFixedLengthVector())
3363       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3364     return Op;
3365   case ISD::STORE:
3366     if (auto V = expandUnalignedRVVStore(Op, DAG))
3367       return V;
3368     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3369       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3370     return Op;
3371   case ISD::MLOAD:
3372   case ISD::VP_LOAD:
3373     return lowerMaskedLoad(Op, DAG);
3374   case ISD::MSTORE:
3375   case ISD::VP_STORE:
3376     return lowerMaskedStore(Op, DAG);
3377   case ISD::SETCC:
3378     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3379   case ISD::ADD:
3380     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3381   case ISD::SUB:
3382     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3383   case ISD::MUL:
3384     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3385   case ISD::MULHS:
3386     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3387   case ISD::MULHU:
3388     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3389   case ISD::AND:
3390     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3391                                               RISCVISD::AND_VL);
3392   case ISD::OR:
3393     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3394                                               RISCVISD::OR_VL);
3395   case ISD::XOR:
3396     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3397                                               RISCVISD::XOR_VL);
3398   case ISD::SDIV:
3399     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3400   case ISD::SREM:
3401     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3402   case ISD::UDIV:
3403     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3404   case ISD::UREM:
3405     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3406   case ISD::SHL:
3407   case ISD::SRA:
3408   case ISD::SRL:
3409     if (Op.getSimpleValueType().isFixedLengthVector())
3410       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3411     // This can be called for an i32 shift amount that needs to be promoted.
3412     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3413            "Unexpected custom legalisation");
3414     return SDValue();
3415   case ISD::SADDSAT:
3416     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3417   case ISD::UADDSAT:
3418     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3419   case ISD::SSUBSAT:
3420     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3421   case ISD::USUBSAT:
3422     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3423   case ISD::FADD:
3424     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3425   case ISD::FSUB:
3426     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3427   case ISD::FMUL:
3428     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3429   case ISD::FDIV:
3430     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3431   case ISD::FNEG:
3432     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3433   case ISD::FABS:
3434     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3435   case ISD::FSQRT:
3436     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3437   case ISD::FMA:
3438     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3439   case ISD::SMIN:
3440     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3441   case ISD::SMAX:
3442     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3443   case ISD::UMIN:
3444     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3445   case ISD::UMAX:
3446     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3447   case ISD::FMINNUM:
3448     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3449   case ISD::FMAXNUM:
3450     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3451   case ISD::ABS:
3452     return lowerABS(Op, DAG);
3453   case ISD::CTLZ_ZERO_UNDEF:
3454   case ISD::CTTZ_ZERO_UNDEF:
3455     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3456   case ISD::VSELECT:
3457     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3458   case ISD::FCOPYSIGN:
3459     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3460   case ISD::MGATHER:
3461   case ISD::VP_GATHER:
3462     return lowerMaskedGather(Op, DAG);
3463   case ISD::MSCATTER:
3464   case ISD::VP_SCATTER:
3465     return lowerMaskedScatter(Op, DAG);
3466   case ISD::FLT_ROUNDS_:
3467     return lowerGET_ROUNDING(Op, DAG);
3468   case ISD::SET_ROUNDING:
3469     return lowerSET_ROUNDING(Op, DAG);
3470   case ISD::EH_DWARF_CFA:
3471     return lowerEH_DWARF_CFA(Op, DAG);
3472   case ISD::VP_SELECT:
3473     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3474   case ISD::VP_MERGE:
3475     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3476   case ISD::VP_ADD:
3477     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3478   case ISD::VP_SUB:
3479     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3480   case ISD::VP_MUL:
3481     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3482   case ISD::VP_SDIV:
3483     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3484   case ISD::VP_UDIV:
3485     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3486   case ISD::VP_SREM:
3487     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3488   case ISD::VP_UREM:
3489     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3490   case ISD::VP_AND:
3491     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3492   case ISD::VP_OR:
3493     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3494   case ISD::VP_XOR:
3495     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3496   case ISD::VP_ASHR:
3497     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3498   case ISD::VP_LSHR:
3499     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3500   case ISD::VP_SHL:
3501     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3502   case ISD::VP_FADD:
3503     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3504   case ISD::VP_FSUB:
3505     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3506   case ISD::VP_FMUL:
3507     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3508   case ISD::VP_FDIV:
3509     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3510   case ISD::VP_FNEG:
3511     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3512   case ISD::VP_FMA:
3513     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3514   case ISD::VP_SIGN_EXTEND:
3515   case ISD::VP_ZERO_EXTEND:
3516     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3517       return lowerVPExtMaskOp(Op, DAG);
3518     return lowerVPOp(Op, DAG,
3519                      Op.getOpcode() == ISD::VP_SIGN_EXTEND
3520                          ? RISCVISD::VSEXT_VL
3521                          : RISCVISD::VZEXT_VL);
3522   case ISD::VP_TRUNCATE:
3523     return lowerVectorTruncLike(Op, DAG);
3524   case ISD::VP_FP_EXTEND:
3525   case ISD::VP_FP_ROUND:
3526     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3527   case ISD::VP_FPTOSI:
3528     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3529   case ISD::VP_FPTOUI:
3530     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3531   case ISD::VP_SITOFP:
3532     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3533   case ISD::VP_UITOFP:
3534     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3535   case ISD::VP_SETCC:
3536     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3537       return lowerVPSetCCMaskOp(Op, DAG);
3538     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3539   }
3540 }
3541 
3542 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3543                              SelectionDAG &DAG, unsigned Flags) {
3544   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3545 }
3546 
3547 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3548                              SelectionDAG &DAG, unsigned Flags) {
3549   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3550                                    Flags);
3551 }
3552 
3553 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3554                              SelectionDAG &DAG, unsigned Flags) {
3555   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3556                                    N->getOffset(), Flags);
3557 }
3558 
3559 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3560                              SelectionDAG &DAG, unsigned Flags) {
3561   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3562 }
3563 
3564 template <class NodeTy>
3565 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3566                                      bool IsLocal) const {
3567   SDLoc DL(N);
3568   EVT Ty = getPointerTy(DAG.getDataLayout());
3569 
3570   if (isPositionIndependent()) {
3571     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3572     if (IsLocal)
3573       // Use PC-relative addressing to access the symbol. This generates the
3574       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3575       // %pcrel_lo(auipc)).
3576       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3577 
3578     // Use PC-relative addressing to access the GOT for this symbol, then load
3579     // the address from the GOT. This generates the pattern (PseudoLA sym),
3580     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3581     SDValue Load =
3582         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3583     MachineFunction &MF = DAG.getMachineFunction();
3584     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3585         MachinePointerInfo::getGOT(MF),
3586         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3587             MachineMemOperand::MOInvariant,
3588         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3589     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3590     return Load;
3591   }
3592 
3593   switch (getTargetMachine().getCodeModel()) {
3594   default:
3595     report_fatal_error("Unsupported code model for lowering");
3596   case CodeModel::Small: {
3597     // Generate a sequence for accessing addresses within the first 2 GiB of
3598     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3599     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3600     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3601     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3602     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3603   }
3604   case CodeModel::Medium: {
3605     // Generate a sequence for accessing addresses within any 2GiB range within
3606     // the address space. This generates the pattern (PseudoLLA sym), which
3607     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3608     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3609     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3610   }
3611   }
3612 }
3613 
3614 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>(
3615     GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3616 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>(
3617     BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3618 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>(
3619     ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3620 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>(
3621     JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3622 
3623 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3624                                                 SelectionDAG &DAG) const {
3625   SDLoc DL(Op);
3626   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3627   assert(N->getOffset() == 0 && "unexpected offset in global node");
3628 
3629   const GlobalValue *GV = N->getGlobal();
3630   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3631   return getAddr(N, DAG, IsLocal);
3632 }
3633 
3634 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3635                                                SelectionDAG &DAG) const {
3636   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3637 
3638   return getAddr(N, DAG);
3639 }
3640 
3641 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3642                                                SelectionDAG &DAG) const {
3643   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3644 
3645   return getAddr(N, DAG);
3646 }
3647 
3648 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3649                                             SelectionDAG &DAG) const {
3650   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3651 
3652   return getAddr(N, DAG);
3653 }
3654 
3655 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3656                                               SelectionDAG &DAG,
3657                                               bool UseGOT) const {
3658   SDLoc DL(N);
3659   EVT Ty = getPointerTy(DAG.getDataLayout());
3660   const GlobalValue *GV = N->getGlobal();
3661   MVT XLenVT = Subtarget.getXLenVT();
3662 
3663   if (UseGOT) {
3664     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3665     // load the address from the GOT and add the thread pointer. This generates
3666     // the pattern (PseudoLA_TLS_IE sym), which expands to
3667     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3668     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3669     SDValue Load =
3670         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3671     MachineFunction &MF = DAG.getMachineFunction();
3672     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3673         MachinePointerInfo::getGOT(MF),
3674         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3675             MachineMemOperand::MOInvariant,
3676         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3677     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3678 
3679     // Add the thread pointer.
3680     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3681     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3682   }
3683 
3684   // Generate a sequence for accessing the address relative to the thread
3685   // pointer, with the appropriate adjustment for the thread pointer offset.
3686   // This generates the pattern
3687   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3688   SDValue AddrHi =
3689       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3690   SDValue AddrAdd =
3691       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3692   SDValue AddrLo =
3693       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3694 
3695   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3696   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3697   SDValue MNAdd = SDValue(
3698       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3699       0);
3700   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3701 }
3702 
3703 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3704                                                SelectionDAG &DAG) const {
3705   SDLoc DL(N);
3706   EVT Ty = getPointerTy(DAG.getDataLayout());
3707   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3708   const GlobalValue *GV = N->getGlobal();
3709 
3710   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3711   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3712   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3713   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3714   SDValue Load =
3715       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3716 
3717   // Prepare argument list to generate call.
3718   ArgListTy Args;
3719   ArgListEntry Entry;
3720   Entry.Node = Load;
3721   Entry.Ty = CallTy;
3722   Args.push_back(Entry);
3723 
3724   // Setup call to __tls_get_addr.
3725   TargetLowering::CallLoweringInfo CLI(DAG);
3726   CLI.setDebugLoc(DL)
3727       .setChain(DAG.getEntryNode())
3728       .setLibCallee(CallingConv::C, CallTy,
3729                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3730                     std::move(Args));
3731 
3732   return LowerCallTo(CLI).first;
3733 }
3734 
3735 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3736                                                    SelectionDAG &DAG) const {
3737   SDLoc DL(Op);
3738   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3739   assert(N->getOffset() == 0 && "unexpected offset in global node");
3740 
3741   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3742 
3743   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3744       CallingConv::GHC)
3745     report_fatal_error("In GHC calling convention TLS is not supported");
3746 
3747   SDValue Addr;
3748   switch (Model) {
3749   case TLSModel::LocalExec:
3750     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3751     break;
3752   case TLSModel::InitialExec:
3753     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3754     break;
3755   case TLSModel::LocalDynamic:
3756   case TLSModel::GeneralDynamic:
3757     Addr = getDynamicTLSAddr(N, DAG);
3758     break;
3759   }
3760 
3761   return Addr;
3762 }
3763 
3764 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3765   SDValue CondV = Op.getOperand(0);
3766   SDValue TrueV = Op.getOperand(1);
3767   SDValue FalseV = Op.getOperand(2);
3768   SDLoc DL(Op);
3769   MVT VT = Op.getSimpleValueType();
3770   MVT XLenVT = Subtarget.getXLenVT();
3771 
3772   // Lower vector SELECTs to VSELECTs by splatting the condition.
3773   if (VT.isVector()) {
3774     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3775     SDValue CondSplat = VT.isScalableVector()
3776                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3777                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3778     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3779   }
3780 
3781   // If the result type is XLenVT and CondV is the output of a SETCC node
3782   // which also operated on XLenVT inputs, then merge the SETCC node into the
3783   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3784   // compare+branch instructions. i.e.:
3785   // (select (setcc lhs, rhs, cc), truev, falsev)
3786   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3787   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3788       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3789     SDValue LHS = CondV.getOperand(0);
3790     SDValue RHS = CondV.getOperand(1);
3791     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3792     ISD::CondCode CCVal = CC->get();
3793 
3794     // Special case for a select of 2 constants that have a diffence of 1.
3795     // Normally this is done by DAGCombine, but if the select is introduced by
3796     // type legalization or op legalization, we miss it. Restricting to SETLT
3797     // case for now because that is what signed saturating add/sub need.
3798     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3799     // but we would probably want to swap the true/false values if the condition
3800     // is SETGE/SETLE to avoid an XORI.
3801     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3802         CCVal == ISD::SETLT) {
3803       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3804       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3805       if (TrueVal - 1 == FalseVal)
3806         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3807       if (TrueVal + 1 == FalseVal)
3808         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3809     }
3810 
3811     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3812 
3813     SDValue TargetCC = DAG.getCondCode(CCVal);
3814     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3815     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3816   }
3817 
3818   // Otherwise:
3819   // (select condv, truev, falsev)
3820   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3821   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3822   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3823 
3824   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3825 
3826   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3827 }
3828 
3829 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3830   SDValue CondV = Op.getOperand(1);
3831   SDLoc DL(Op);
3832   MVT XLenVT = Subtarget.getXLenVT();
3833 
3834   if (CondV.getOpcode() == ISD::SETCC &&
3835       CondV.getOperand(0).getValueType() == XLenVT) {
3836     SDValue LHS = CondV.getOperand(0);
3837     SDValue RHS = CondV.getOperand(1);
3838     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3839 
3840     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3841 
3842     SDValue TargetCC = DAG.getCondCode(CCVal);
3843     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3844                        LHS, RHS, TargetCC, Op.getOperand(2));
3845   }
3846 
3847   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3848                      CondV, DAG.getConstant(0, DL, XLenVT),
3849                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3850 }
3851 
3852 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3853   MachineFunction &MF = DAG.getMachineFunction();
3854   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3855 
3856   SDLoc DL(Op);
3857   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3858                                  getPointerTy(MF.getDataLayout()));
3859 
3860   // vastart just stores the address of the VarArgsFrameIndex slot into the
3861   // memory location argument.
3862   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3863   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3864                       MachinePointerInfo(SV));
3865 }
3866 
3867 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3868                                             SelectionDAG &DAG) const {
3869   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3870   MachineFunction &MF = DAG.getMachineFunction();
3871   MachineFrameInfo &MFI = MF.getFrameInfo();
3872   MFI.setFrameAddressIsTaken(true);
3873   Register FrameReg = RI.getFrameRegister(MF);
3874   int XLenInBytes = Subtarget.getXLen() / 8;
3875 
3876   EVT VT = Op.getValueType();
3877   SDLoc DL(Op);
3878   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3879   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3880   while (Depth--) {
3881     int Offset = -(XLenInBytes * 2);
3882     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3883                               DAG.getIntPtrConstant(Offset, DL));
3884     FrameAddr =
3885         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3886   }
3887   return FrameAddr;
3888 }
3889 
3890 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3891                                              SelectionDAG &DAG) const {
3892   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3893   MachineFunction &MF = DAG.getMachineFunction();
3894   MachineFrameInfo &MFI = MF.getFrameInfo();
3895   MFI.setReturnAddressIsTaken(true);
3896   MVT XLenVT = Subtarget.getXLenVT();
3897   int XLenInBytes = Subtarget.getXLen() / 8;
3898 
3899   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3900     return SDValue();
3901 
3902   EVT VT = Op.getValueType();
3903   SDLoc DL(Op);
3904   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3905   if (Depth) {
3906     int Off = -XLenInBytes;
3907     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3908     SDValue Offset = DAG.getConstant(Off, DL, VT);
3909     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3910                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3911                        MachinePointerInfo());
3912   }
3913 
3914   // Return the value of the return address register, marking it an implicit
3915   // live-in.
3916   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3917   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3918 }
3919 
3920 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3921                                                  SelectionDAG &DAG) const {
3922   SDLoc DL(Op);
3923   SDValue Lo = Op.getOperand(0);
3924   SDValue Hi = Op.getOperand(1);
3925   SDValue Shamt = Op.getOperand(2);
3926   EVT VT = Lo.getValueType();
3927 
3928   // if Shamt-XLEN < 0: // Shamt < XLEN
3929   //   Lo = Lo << Shamt
3930   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
3931   // else:
3932   //   Lo = 0
3933   //   Hi = Lo << (Shamt-XLEN)
3934 
3935   SDValue Zero = DAG.getConstant(0, DL, VT);
3936   SDValue One = DAG.getConstant(1, DL, VT);
3937   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3938   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3939   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3940   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3941 
3942   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3943   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3944   SDValue ShiftRightLo =
3945       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3946   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3947   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3948   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3949 
3950   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3951 
3952   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3953   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3954 
3955   SDValue Parts[2] = {Lo, Hi};
3956   return DAG.getMergeValues(Parts, DL);
3957 }
3958 
3959 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3960                                                   bool IsSRA) const {
3961   SDLoc DL(Op);
3962   SDValue Lo = Op.getOperand(0);
3963   SDValue Hi = Op.getOperand(1);
3964   SDValue Shamt = Op.getOperand(2);
3965   EVT VT = Lo.getValueType();
3966 
3967   // SRA expansion:
3968   //   if Shamt-XLEN < 0: // Shamt < XLEN
3969   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3970   //     Hi = Hi >>s Shamt
3971   //   else:
3972   //     Lo = Hi >>s (Shamt-XLEN);
3973   //     Hi = Hi >>s (XLEN-1)
3974   //
3975   // SRL expansion:
3976   //   if Shamt-XLEN < 0: // Shamt < XLEN
3977   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3978   //     Hi = Hi >>u Shamt
3979   //   else:
3980   //     Lo = Hi >>u (Shamt-XLEN);
3981   //     Hi = 0;
3982 
3983   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3984 
3985   SDValue Zero = DAG.getConstant(0, DL, VT);
3986   SDValue One = DAG.getConstant(1, DL, VT);
3987   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3988   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3989   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3990   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3991 
3992   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3993   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3994   SDValue ShiftLeftHi =
3995       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3996   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3997   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3998   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3999   SDValue HiFalse =
4000       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4001 
4002   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4003 
4004   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4005   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4006 
4007   SDValue Parts[2] = {Lo, Hi};
4008   return DAG.getMergeValues(Parts, DL);
4009 }
4010 
4011 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4012 // legal equivalently-sized i8 type, so we can use that as a go-between.
4013 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4014                                                   SelectionDAG &DAG) const {
4015   SDLoc DL(Op);
4016   MVT VT = Op.getSimpleValueType();
4017   SDValue SplatVal = Op.getOperand(0);
4018   // All-zeros or all-ones splats are handled specially.
4019   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4020     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4021     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4022   }
4023   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4024     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4025     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4026   }
4027   MVT XLenVT = Subtarget.getXLenVT();
4028   assert(SplatVal.getValueType() == XLenVT &&
4029          "Unexpected type for i1 splat value");
4030   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4031   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4032                          DAG.getConstant(1, DL, XLenVT));
4033   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4034   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4035   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4036 }
4037 
4038 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4039 // illegal (currently only vXi64 RV32).
4040 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4041 // them to VMV_V_X_VL.
4042 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4043                                                      SelectionDAG &DAG) const {
4044   SDLoc DL(Op);
4045   MVT VecVT = Op.getSimpleValueType();
4046   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4047          "Unexpected SPLAT_VECTOR_PARTS lowering");
4048 
4049   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4050   SDValue Lo = Op.getOperand(0);
4051   SDValue Hi = Op.getOperand(1);
4052 
4053   if (VecVT.isFixedLengthVector()) {
4054     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4055     SDLoc DL(Op);
4056     SDValue Mask, VL;
4057     std::tie(Mask, VL) =
4058         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4059 
4060     SDValue Res =
4061         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4062     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4063   }
4064 
4065   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4066     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4067     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4068     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4069     // node in order to try and match RVV vector/scalar instructions.
4070     if ((LoC >> 31) == HiC)
4071       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4072                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4073   }
4074 
4075   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4076   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4077       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4078       Hi.getConstantOperandVal(1) == 31)
4079     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4080                        DAG.getRegister(RISCV::X0, MVT::i32));
4081 
4082   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4083   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4084                      DAG.getUNDEF(VecVT), Lo, Hi,
4085                      DAG.getRegister(RISCV::X0, MVT::i32));
4086 }
4087 
4088 // Custom-lower extensions from mask vectors by using a vselect either with 1
4089 // for zero/any-extension or -1 for sign-extension:
4090 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4091 // Note that any-extension is lowered identically to zero-extension.
4092 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4093                                                 int64_t ExtTrueVal) const {
4094   SDLoc DL(Op);
4095   MVT VecVT = Op.getSimpleValueType();
4096   SDValue Src = Op.getOperand(0);
4097   // Only custom-lower extensions from mask types
4098   assert(Src.getValueType().isVector() &&
4099          Src.getValueType().getVectorElementType() == MVT::i1);
4100 
4101   if (VecVT.isScalableVector()) {
4102     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4103     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4104     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4105   }
4106 
4107   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4108   MVT I1ContainerVT =
4109       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4110 
4111   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4112 
4113   SDValue Mask, VL;
4114   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4115 
4116   MVT XLenVT = Subtarget.getXLenVT();
4117   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4118   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4119 
4120   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4121                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4122   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4123                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4124   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4125                                SplatTrueVal, SplatZero, VL);
4126 
4127   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4128 }
4129 
4130 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4131     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4132   MVT ExtVT = Op.getSimpleValueType();
4133   // Only custom-lower extensions from fixed-length vector types.
4134   if (!ExtVT.isFixedLengthVector())
4135     return Op;
4136   MVT VT = Op.getOperand(0).getSimpleValueType();
4137   // Grab the canonical container type for the extended type. Infer the smaller
4138   // type from that to ensure the same number of vector elements, as we know
4139   // the LMUL will be sufficient to hold the smaller type.
4140   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4141   // Get the extended container type manually to ensure the same number of
4142   // vector elements between source and dest.
4143   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4144                                      ContainerExtVT.getVectorElementCount());
4145 
4146   SDValue Op1 =
4147       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4148 
4149   SDLoc DL(Op);
4150   SDValue Mask, VL;
4151   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4152 
4153   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4154 
4155   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4156 }
4157 
4158 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4159 // setcc operation:
4160 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4161 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
4162                                                       SelectionDAG &DAG) const {
4163   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4164   SDLoc DL(Op);
4165   EVT MaskVT = Op.getValueType();
4166   // Only expect to custom-lower truncations to mask types
4167   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4168          "Unexpected type for vector mask lowering");
4169   SDValue Src = Op.getOperand(0);
4170   MVT VecVT = Src.getSimpleValueType();
4171   SDValue Mask, VL;
4172   if (IsVPTrunc) {
4173     Mask = Op.getOperand(1);
4174     VL = Op.getOperand(2);
4175   }
4176   // If this is a fixed vector, we need to convert it to a scalable vector.
4177   MVT ContainerVT = VecVT;
4178 
4179   if (VecVT.isFixedLengthVector()) {
4180     ContainerVT = getContainerForFixedLengthVector(VecVT);
4181     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4182     if (IsVPTrunc) {
4183       MVT MaskContainerVT =
4184           getContainerForFixedLengthVector(Mask.getSimpleValueType());
4185       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
4186     }
4187   }
4188 
4189   if (!IsVPTrunc) {
4190     std::tie(Mask, VL) =
4191         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4192   }
4193 
4194   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4195   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4196 
4197   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4198                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
4199   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4200                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4201 
4202   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4203   SDValue Trunc =
4204       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4205   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4206                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4207   if (MaskVT.isFixedLengthVector())
4208     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4209   return Trunc;
4210 }
4211 
4212 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
4213                                                   SelectionDAG &DAG) const {
4214   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4215   SDLoc DL(Op);
4216 
4217   MVT VT = Op.getSimpleValueType();
4218   // Only custom-lower vector truncates
4219   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4220 
4221   // Truncates to mask types are handled differently
4222   if (VT.getVectorElementType() == MVT::i1)
4223     return lowerVectorMaskTruncLike(Op, DAG);
4224 
4225   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
4226   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
4227   // truncate by one power of two at a time.
4228   MVT DstEltVT = VT.getVectorElementType();
4229 
4230   SDValue Src = Op.getOperand(0);
4231   MVT SrcVT = Src.getSimpleValueType();
4232   MVT SrcEltVT = SrcVT.getVectorElementType();
4233 
4234   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
4235          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
4236          "Unexpected vector truncate lowering");
4237 
4238   MVT ContainerVT = SrcVT;
4239   SDValue Mask, VL;
4240   if (IsVPTrunc) {
4241     Mask = Op.getOperand(1);
4242     VL = Op.getOperand(2);
4243   }
4244   if (SrcVT.isFixedLengthVector()) {
4245     ContainerVT = getContainerForFixedLengthVector(SrcVT);
4246     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4247     if (IsVPTrunc) {
4248       MVT MaskVT = getMaskTypeFor(ContainerVT);
4249       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4250     }
4251   }
4252 
4253   SDValue Result = Src;
4254   if (!IsVPTrunc) {
4255     std::tie(Mask, VL) =
4256         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4257   }
4258 
4259   LLVMContext &Context = *DAG.getContext();
4260   const ElementCount Count = ContainerVT.getVectorElementCount();
4261   do {
4262     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
4263     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
4264     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
4265                          Mask, VL);
4266   } while (SrcEltVT != DstEltVT);
4267 
4268   if (SrcVT.isFixedLengthVector())
4269     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4270 
4271   return Result;
4272 }
4273 
4274 SDValue
4275 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
4276                                                     SelectionDAG &DAG) const {
4277   bool IsVP =
4278       Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
4279   bool IsExtend =
4280       Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
4281   // RVV can only do truncate fp to types half the size as the source. We
4282   // custom-lower f64->f16 rounds via RVV's round-to-odd float
4283   // conversion instruction.
4284   SDLoc DL(Op);
4285   MVT VT = Op.getSimpleValueType();
4286 
4287   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4288 
4289   SDValue Src = Op.getOperand(0);
4290   MVT SrcVT = Src.getSimpleValueType();
4291 
4292   bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
4293                                      SrcVT.getVectorElementType() != MVT::f16);
4294   bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
4295                                      SrcVT.getVectorElementType() != MVT::f64);
4296 
4297   bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
4298 
4299   // Prepare any fixed-length vector operands.
4300   MVT ContainerVT = VT;
4301   SDValue Mask, VL;
4302   if (IsVP) {
4303     Mask = Op.getOperand(1);
4304     VL = Op.getOperand(2);
4305   }
4306   if (VT.isFixedLengthVector()) {
4307     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
4308     ContainerVT =
4309         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
4310     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4311     if (IsVP) {
4312       MVT MaskVT = getMaskTypeFor(ContainerVT);
4313       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4314     }
4315   }
4316 
4317   if (!IsVP)
4318     std::tie(Mask, VL) =
4319         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4320 
4321   unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
4322 
4323   if (IsDirectConv) {
4324     Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
4325     if (VT.isFixedLengthVector())
4326       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
4327     return Src;
4328   }
4329 
4330   unsigned InterConvOpc =
4331       IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
4332 
4333   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
4334   SDValue IntermediateConv =
4335       DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
4336   SDValue Result =
4337       DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
4338   if (VT.isFixedLengthVector())
4339     return convertFromScalableVector(VT, Result, DAG, Subtarget);
4340   return Result;
4341 }
4342 
4343 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4344 // first position of a vector, and that vector is slid up to the insert index.
4345 // By limiting the active vector length to index+1 and merging with the
4346 // original vector (with an undisturbed tail policy for elements >= VL), we
4347 // achieve the desired result of leaving all elements untouched except the one
4348 // at VL-1, which is replaced with the desired value.
4349 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4350                                                     SelectionDAG &DAG) const {
4351   SDLoc DL(Op);
4352   MVT VecVT = Op.getSimpleValueType();
4353   SDValue Vec = Op.getOperand(0);
4354   SDValue Val = Op.getOperand(1);
4355   SDValue Idx = Op.getOperand(2);
4356 
4357   if (VecVT.getVectorElementType() == MVT::i1) {
4358     // FIXME: For now we just promote to an i8 vector and insert into that,
4359     // but this is probably not optimal.
4360     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4361     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4362     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4363     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4364   }
4365 
4366   MVT ContainerVT = VecVT;
4367   // If the operand is a fixed-length vector, convert to a scalable one.
4368   if (VecVT.isFixedLengthVector()) {
4369     ContainerVT = getContainerForFixedLengthVector(VecVT);
4370     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4371   }
4372 
4373   MVT XLenVT = Subtarget.getXLenVT();
4374 
4375   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4376   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4377   // Even i64-element vectors on RV32 can be lowered without scalar
4378   // legalization if the most-significant 32 bits of the value are not affected
4379   // by the sign-extension of the lower 32 bits.
4380   // TODO: We could also catch sign extensions of a 32-bit value.
4381   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4382     const auto *CVal = cast<ConstantSDNode>(Val);
4383     if (isInt<32>(CVal->getSExtValue())) {
4384       IsLegalInsert = true;
4385       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4386     }
4387   }
4388 
4389   SDValue Mask, VL;
4390   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4391 
4392   SDValue ValInVec;
4393 
4394   if (IsLegalInsert) {
4395     unsigned Opc =
4396         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4397     if (isNullConstant(Idx)) {
4398       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4399       if (!VecVT.isFixedLengthVector())
4400         return Vec;
4401       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4402     }
4403     ValInVec =
4404         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4405   } else {
4406     // On RV32, i64-element vectors must be specially handled to place the
4407     // value at element 0, by using two vslide1up instructions in sequence on
4408     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4409     // this.
4410     SDValue One = DAG.getConstant(1, DL, XLenVT);
4411     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4412     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4413     MVT I32ContainerVT =
4414         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4415     SDValue I32Mask =
4416         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4417     // Limit the active VL to two.
4418     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4419     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4420     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4421     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4422                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4423     // First slide in the hi value, then the lo in underneath it.
4424     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4425                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4426                            I32Mask, InsertI64VL);
4427     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4428                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4429                            I32Mask, InsertI64VL);
4430     // Bitcast back to the right container type.
4431     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4432   }
4433 
4434   // Now that the value is in a vector, slide it into position.
4435   SDValue InsertVL =
4436       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4437   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4438                                 ValInVec, Idx, Mask, InsertVL);
4439   if (!VecVT.isFixedLengthVector())
4440     return Slideup;
4441   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4442 }
4443 
4444 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4445 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4446 // types this is done using VMV_X_S to allow us to glean information about the
4447 // sign bits of the result.
4448 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4449                                                      SelectionDAG &DAG) const {
4450   SDLoc DL(Op);
4451   SDValue Idx = Op.getOperand(1);
4452   SDValue Vec = Op.getOperand(0);
4453   EVT EltVT = Op.getValueType();
4454   MVT VecVT = Vec.getSimpleValueType();
4455   MVT XLenVT = Subtarget.getXLenVT();
4456 
4457   if (VecVT.getVectorElementType() == MVT::i1) {
4458     if (VecVT.isFixedLengthVector()) {
4459       unsigned NumElts = VecVT.getVectorNumElements();
4460       if (NumElts >= 8) {
4461         MVT WideEltVT;
4462         unsigned WidenVecLen;
4463         SDValue ExtractElementIdx;
4464         SDValue ExtractBitIdx;
4465         unsigned MaxEEW = Subtarget.getELEN();
4466         MVT LargestEltVT = MVT::getIntegerVT(
4467             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4468         if (NumElts <= LargestEltVT.getSizeInBits()) {
4469           assert(isPowerOf2_32(NumElts) &&
4470                  "the number of elements should be power of 2");
4471           WideEltVT = MVT::getIntegerVT(NumElts);
4472           WidenVecLen = 1;
4473           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4474           ExtractBitIdx = Idx;
4475         } else {
4476           WideEltVT = LargestEltVT;
4477           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4478           // extract element index = index / element width
4479           ExtractElementIdx = DAG.getNode(
4480               ISD::SRL, DL, XLenVT, Idx,
4481               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4482           // mask bit index = index % element width
4483           ExtractBitIdx = DAG.getNode(
4484               ISD::AND, DL, XLenVT, Idx,
4485               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4486         }
4487         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4488         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4489         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4490                                          Vec, ExtractElementIdx);
4491         // Extract the bit from GPR.
4492         SDValue ShiftRight =
4493             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4494         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4495                            DAG.getConstant(1, DL, XLenVT));
4496       }
4497     }
4498     // Otherwise, promote to an i8 vector and extract from that.
4499     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4500     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4501     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4502   }
4503 
4504   // If this is a fixed vector, we need to convert it to a scalable vector.
4505   MVT ContainerVT = VecVT;
4506   if (VecVT.isFixedLengthVector()) {
4507     ContainerVT = getContainerForFixedLengthVector(VecVT);
4508     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4509   }
4510 
4511   // If the index is 0, the vector is already in the right position.
4512   if (!isNullConstant(Idx)) {
4513     // Use a VL of 1 to avoid processing more elements than we need.
4514     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4515     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
4516     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4517                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4518   }
4519 
4520   if (!EltVT.isInteger()) {
4521     // Floating-point extracts are handled in TableGen.
4522     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4523                        DAG.getConstant(0, DL, XLenVT));
4524   }
4525 
4526   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4527   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4528 }
4529 
4530 // Some RVV intrinsics may claim that they want an integer operand to be
4531 // promoted or expanded.
4532 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4533                                            const RISCVSubtarget &Subtarget) {
4534   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4535           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4536          "Unexpected opcode");
4537 
4538   if (!Subtarget.hasVInstructions())
4539     return SDValue();
4540 
4541   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4542   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4543   SDLoc DL(Op);
4544 
4545   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4546       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4547   if (!II || !II->hasScalarOperand())
4548     return SDValue();
4549 
4550   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4551   assert(SplatOp < Op.getNumOperands());
4552 
4553   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4554   SDValue &ScalarOp = Operands[SplatOp];
4555   MVT OpVT = ScalarOp.getSimpleValueType();
4556   MVT XLenVT = Subtarget.getXLenVT();
4557 
4558   // If this isn't a scalar, or its type is XLenVT we're done.
4559   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4560     return SDValue();
4561 
4562   // Simplest case is that the operand needs to be promoted to XLenVT.
4563   if (OpVT.bitsLT(XLenVT)) {
4564     // If the operand is a constant, sign extend to increase our chances
4565     // of being able to use a .vi instruction. ANY_EXTEND would become a
4566     // a zero extend and the simm5 check in isel would fail.
4567     // FIXME: Should we ignore the upper bits in isel instead?
4568     unsigned ExtOpc =
4569         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4570     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4571     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4572   }
4573 
4574   // Use the previous operand to get the vXi64 VT. The result might be a mask
4575   // VT for compares. Using the previous operand assumes that the previous
4576   // operand will never have a smaller element size than a scalar operand and
4577   // that a widening operation never uses SEW=64.
4578   // NOTE: If this fails the below assert, we can probably just find the
4579   // element count from any operand or result and use it to construct the VT.
4580   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4581   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4582 
4583   // The more complex case is when the scalar is larger than XLenVT.
4584   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4585          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4586 
4587   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4588   // instruction to sign-extend since SEW>XLEN.
4589   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4590     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4591     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4592   }
4593 
4594   switch (IntNo) {
4595   case Intrinsic::riscv_vslide1up:
4596   case Intrinsic::riscv_vslide1down:
4597   case Intrinsic::riscv_vslide1up_mask:
4598   case Intrinsic::riscv_vslide1down_mask: {
4599     // We need to special case these when the scalar is larger than XLen.
4600     unsigned NumOps = Op.getNumOperands();
4601     bool IsMasked = NumOps == 7;
4602 
4603     // Convert the vector source to the equivalent nxvXi32 vector.
4604     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4605     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4606 
4607     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4608                                    DAG.getConstant(0, DL, XLenVT));
4609     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4610                                    DAG.getConstant(1, DL, XLenVT));
4611 
4612     // Double the VL since we halved SEW.
4613     SDValue AVL = getVLOperand(Op);
4614     SDValue I32VL;
4615 
4616     // Optimize for constant AVL
4617     if (isa<ConstantSDNode>(AVL)) {
4618       unsigned EltSize = VT.getScalarSizeInBits();
4619       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4620 
4621       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4622       unsigned MaxVLMAX =
4623           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4624 
4625       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4626       unsigned MinVLMAX =
4627           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4628 
4629       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4630       if (AVLInt <= MinVLMAX) {
4631         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4632       } else if (AVLInt >= 2 * MaxVLMAX) {
4633         // Just set vl to VLMAX in this situation
4634         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4635         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4636         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4637         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4638         SDValue SETVLMAX = DAG.getTargetConstant(
4639             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4640         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4641                             LMUL);
4642       } else {
4643         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4644         // is related to the hardware implementation.
4645         // So let the following code handle
4646       }
4647     }
4648     if (!I32VL) {
4649       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4650       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4651       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4652       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4653       SDValue SETVL =
4654           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4655       // Using vsetvli instruction to get actually used length which related to
4656       // the hardware implementation
4657       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4658                                SEW, LMUL);
4659       I32VL =
4660           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4661     }
4662 
4663     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
4664 
4665     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4666     // instructions.
4667     SDValue Passthru;
4668     if (IsMasked)
4669       Passthru = DAG.getUNDEF(I32VT);
4670     else
4671       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4672 
4673     if (IntNo == Intrinsic::riscv_vslide1up ||
4674         IntNo == Intrinsic::riscv_vslide1up_mask) {
4675       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4676                         ScalarHi, I32Mask, I32VL);
4677       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4678                         ScalarLo, I32Mask, I32VL);
4679     } else {
4680       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4681                         ScalarLo, I32Mask, I32VL);
4682       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4683                         ScalarHi, I32Mask, I32VL);
4684     }
4685 
4686     // Convert back to nxvXi64.
4687     Vec = DAG.getBitcast(VT, Vec);
4688 
4689     if (!IsMasked)
4690       return Vec;
4691     // Apply mask after the operation.
4692     SDValue Mask = Operands[NumOps - 3];
4693     SDValue MaskedOff = Operands[1];
4694     // Assume Policy operand is the last operand.
4695     uint64_t Policy =
4696         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4697     // We don't need to select maskedoff if it's undef.
4698     if (MaskedOff.isUndef())
4699       return Vec;
4700     // TAMU
4701     if (Policy == RISCVII::TAIL_AGNOSTIC)
4702       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4703                          AVL);
4704     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4705     // It's fine because vmerge does not care mask policy.
4706     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4707                        AVL);
4708   }
4709   }
4710 
4711   // We need to convert the scalar to a splat vector.
4712   SDValue VL = getVLOperand(Op);
4713   assert(VL.getValueType() == XLenVT);
4714   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4715   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4716 }
4717 
4718 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4719                                                      SelectionDAG &DAG) const {
4720   unsigned IntNo = Op.getConstantOperandVal(0);
4721   SDLoc DL(Op);
4722   MVT XLenVT = Subtarget.getXLenVT();
4723 
4724   switch (IntNo) {
4725   default:
4726     break; // Don't custom lower most intrinsics.
4727   case Intrinsic::thread_pointer: {
4728     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4729     return DAG.getRegister(RISCV::X4, PtrVT);
4730   }
4731   case Intrinsic::riscv_orc_b:
4732   case Intrinsic::riscv_brev8: {
4733     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4734     unsigned Opc =
4735         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4736     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4737                        DAG.getConstant(7, DL, XLenVT));
4738   }
4739   case Intrinsic::riscv_grev:
4740   case Intrinsic::riscv_gorc: {
4741     unsigned Opc =
4742         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4743     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4744   }
4745   case Intrinsic::riscv_zip:
4746   case Intrinsic::riscv_unzip: {
4747     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4748     // For i32 the immediate is 15. For i64 the immediate is 31.
4749     unsigned Opc =
4750         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4751     unsigned BitWidth = Op.getValueSizeInBits();
4752     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4753     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4754                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4755   }
4756   case Intrinsic::riscv_shfl:
4757   case Intrinsic::riscv_unshfl: {
4758     unsigned Opc =
4759         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4760     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4761   }
4762   case Intrinsic::riscv_bcompress:
4763   case Intrinsic::riscv_bdecompress: {
4764     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4765                                                        : RISCVISD::BDECOMPRESS;
4766     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4767   }
4768   case Intrinsic::riscv_bfp:
4769     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4770                        Op.getOperand(2));
4771   case Intrinsic::riscv_fsl:
4772     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4773                        Op.getOperand(2), Op.getOperand(3));
4774   case Intrinsic::riscv_fsr:
4775     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4776                        Op.getOperand(2), Op.getOperand(3));
4777   case Intrinsic::riscv_vmv_x_s:
4778     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4779     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4780                        Op.getOperand(1));
4781   case Intrinsic::riscv_vmv_v_x:
4782     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4783                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4784                             Subtarget);
4785   case Intrinsic::riscv_vfmv_v_f:
4786     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4787                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4788   case Intrinsic::riscv_vmv_s_x: {
4789     SDValue Scalar = Op.getOperand(2);
4790 
4791     if (Scalar.getValueType().bitsLE(XLenVT)) {
4792       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4793       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4794                          Op.getOperand(1), Scalar, Op.getOperand(3));
4795     }
4796 
4797     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4798 
4799     // This is an i64 value that lives in two scalar registers. We have to
4800     // insert this in a convoluted way. First we build vXi64 splat containing
4801     // the two values that we assemble using some bit math. Next we'll use
4802     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4803     // to merge element 0 from our splat into the source vector.
4804     // FIXME: This is probably not the best way to do this, but it is
4805     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4806     // point.
4807     //   sw lo, (a0)
4808     //   sw hi, 4(a0)
4809     //   vlse vX, (a0)
4810     //
4811     //   vid.v      vVid
4812     //   vmseq.vx   mMask, vVid, 0
4813     //   vmerge.vvm vDest, vSrc, vVal, mMask
4814     MVT VT = Op.getSimpleValueType();
4815     SDValue Vec = Op.getOperand(1);
4816     SDValue VL = getVLOperand(Op);
4817 
4818     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4819     if (Op.getOperand(1).isUndef())
4820       return SplattedVal;
4821     SDValue SplattedIdx =
4822         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4823                     DAG.getConstant(0, DL, MVT::i32), VL);
4824 
4825     MVT MaskVT = getMaskTypeFor(VT);
4826     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
4827     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4828     SDValue SelectCond =
4829         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4830                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4831     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4832                        Vec, VL);
4833   }
4834   }
4835 
4836   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4837 }
4838 
4839 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4840                                                     SelectionDAG &DAG) const {
4841   unsigned IntNo = Op.getConstantOperandVal(1);
4842   switch (IntNo) {
4843   default:
4844     break;
4845   case Intrinsic::riscv_masked_strided_load: {
4846     SDLoc DL(Op);
4847     MVT XLenVT = Subtarget.getXLenVT();
4848 
4849     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4850     // the selection of the masked intrinsics doesn't do this for us.
4851     SDValue Mask = Op.getOperand(5);
4852     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4853 
4854     MVT VT = Op->getSimpleValueType(0);
4855     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4856 
4857     SDValue PassThru = Op.getOperand(2);
4858     if (!IsUnmasked) {
4859       MVT MaskVT = getMaskTypeFor(ContainerVT);
4860       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4861       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4862     }
4863 
4864     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4865 
4866     SDValue IntID = DAG.getTargetConstant(
4867         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4868         XLenVT);
4869 
4870     auto *Load = cast<MemIntrinsicSDNode>(Op);
4871     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4872     if (IsUnmasked)
4873       Ops.push_back(DAG.getUNDEF(ContainerVT));
4874     else
4875       Ops.push_back(PassThru);
4876     Ops.push_back(Op.getOperand(3)); // Ptr
4877     Ops.push_back(Op.getOperand(4)); // Stride
4878     if (!IsUnmasked)
4879       Ops.push_back(Mask);
4880     Ops.push_back(VL);
4881     if (!IsUnmasked) {
4882       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4883       Ops.push_back(Policy);
4884     }
4885 
4886     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4887     SDValue Result =
4888         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4889                                 Load->getMemoryVT(), Load->getMemOperand());
4890     SDValue Chain = Result.getValue(1);
4891     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4892     return DAG.getMergeValues({Result, Chain}, DL);
4893   }
4894   case Intrinsic::riscv_seg2_load:
4895   case Intrinsic::riscv_seg3_load:
4896   case Intrinsic::riscv_seg4_load:
4897   case Intrinsic::riscv_seg5_load:
4898   case Intrinsic::riscv_seg6_load:
4899   case Intrinsic::riscv_seg7_load:
4900   case Intrinsic::riscv_seg8_load: {
4901     SDLoc DL(Op);
4902     static const Intrinsic::ID VlsegInts[7] = {
4903         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4904         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4905         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4906         Intrinsic::riscv_vlseg8};
4907     unsigned NF = Op->getNumValues() - 1;
4908     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4909     MVT XLenVT = Subtarget.getXLenVT();
4910     MVT VT = Op->getSimpleValueType(0);
4911     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4912 
4913     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4914     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4915     auto *Load = cast<MemIntrinsicSDNode>(Op);
4916     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4917     ContainerVTs.push_back(MVT::Other);
4918     SDVTList VTs = DAG.getVTList(ContainerVTs);
4919     SmallVector<SDValue, 12> Ops = {Load->getChain(), IntID};
4920     Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT));
4921     Ops.push_back(Op.getOperand(2));
4922     Ops.push_back(VL);
4923     SDValue Result =
4924         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4925                                 Load->getMemoryVT(), Load->getMemOperand());
4926     SmallVector<SDValue, 9> Results;
4927     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4928       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4929                                                   DAG, Subtarget));
4930     Results.push_back(Result.getValue(NF));
4931     return DAG.getMergeValues(Results, DL);
4932   }
4933   }
4934 
4935   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4936 }
4937 
4938 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4939                                                  SelectionDAG &DAG) const {
4940   unsigned IntNo = Op.getConstantOperandVal(1);
4941   switch (IntNo) {
4942   default:
4943     break;
4944   case Intrinsic::riscv_masked_strided_store: {
4945     SDLoc DL(Op);
4946     MVT XLenVT = Subtarget.getXLenVT();
4947 
4948     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4949     // the selection of the masked intrinsics doesn't do this for us.
4950     SDValue Mask = Op.getOperand(5);
4951     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4952 
4953     SDValue Val = Op.getOperand(2);
4954     MVT VT = Val.getSimpleValueType();
4955     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4956 
4957     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4958     if (!IsUnmasked) {
4959       MVT MaskVT = getMaskTypeFor(ContainerVT);
4960       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4961     }
4962 
4963     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4964 
4965     SDValue IntID = DAG.getTargetConstant(
4966         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4967         XLenVT);
4968 
4969     auto *Store = cast<MemIntrinsicSDNode>(Op);
4970     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4971     Ops.push_back(Val);
4972     Ops.push_back(Op.getOperand(3)); // Ptr
4973     Ops.push_back(Op.getOperand(4)); // Stride
4974     if (!IsUnmasked)
4975       Ops.push_back(Mask);
4976     Ops.push_back(VL);
4977 
4978     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4979                                    Ops, Store->getMemoryVT(),
4980                                    Store->getMemOperand());
4981   }
4982   }
4983 
4984   return SDValue();
4985 }
4986 
4987 static MVT getLMUL1VT(MVT VT) {
4988   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4989          "Unexpected vector MVT");
4990   return MVT::getScalableVectorVT(
4991       VT.getVectorElementType(),
4992       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4993 }
4994 
4995 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4996   switch (ISDOpcode) {
4997   default:
4998     llvm_unreachable("Unhandled reduction");
4999   case ISD::VECREDUCE_ADD:
5000     return RISCVISD::VECREDUCE_ADD_VL;
5001   case ISD::VECREDUCE_UMAX:
5002     return RISCVISD::VECREDUCE_UMAX_VL;
5003   case ISD::VECREDUCE_SMAX:
5004     return RISCVISD::VECREDUCE_SMAX_VL;
5005   case ISD::VECREDUCE_UMIN:
5006     return RISCVISD::VECREDUCE_UMIN_VL;
5007   case ISD::VECREDUCE_SMIN:
5008     return RISCVISD::VECREDUCE_SMIN_VL;
5009   case ISD::VECREDUCE_AND:
5010     return RISCVISD::VECREDUCE_AND_VL;
5011   case ISD::VECREDUCE_OR:
5012     return RISCVISD::VECREDUCE_OR_VL;
5013   case ISD::VECREDUCE_XOR:
5014     return RISCVISD::VECREDUCE_XOR_VL;
5015   }
5016 }
5017 
5018 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5019                                                          SelectionDAG &DAG,
5020                                                          bool IsVP) const {
5021   SDLoc DL(Op);
5022   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5023   MVT VecVT = Vec.getSimpleValueType();
5024   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5025           Op.getOpcode() == ISD::VECREDUCE_OR ||
5026           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5027           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5028           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5029           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5030          "Unexpected reduction lowering");
5031 
5032   MVT XLenVT = Subtarget.getXLenVT();
5033   assert(Op.getValueType() == XLenVT &&
5034          "Expected reduction output to be legalized to XLenVT");
5035 
5036   MVT ContainerVT = VecVT;
5037   if (VecVT.isFixedLengthVector()) {
5038     ContainerVT = getContainerForFixedLengthVector(VecVT);
5039     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5040   }
5041 
5042   SDValue Mask, VL;
5043   if (IsVP) {
5044     Mask = Op.getOperand(2);
5045     VL = Op.getOperand(3);
5046   } else {
5047     std::tie(Mask, VL) =
5048         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5049   }
5050 
5051   unsigned BaseOpc;
5052   ISD::CondCode CC;
5053   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5054 
5055   switch (Op.getOpcode()) {
5056   default:
5057     llvm_unreachable("Unhandled reduction");
5058   case ISD::VECREDUCE_AND:
5059   case ISD::VP_REDUCE_AND: {
5060     // vcpop ~x == 0
5061     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5062     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5063     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5064     CC = ISD::SETEQ;
5065     BaseOpc = ISD::AND;
5066     break;
5067   }
5068   case ISD::VECREDUCE_OR:
5069   case ISD::VP_REDUCE_OR:
5070     // vcpop x != 0
5071     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5072     CC = ISD::SETNE;
5073     BaseOpc = ISD::OR;
5074     break;
5075   case ISD::VECREDUCE_XOR:
5076   case ISD::VP_REDUCE_XOR: {
5077     // ((vcpop x) & 1) != 0
5078     SDValue One = DAG.getConstant(1, DL, XLenVT);
5079     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5080     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5081     CC = ISD::SETNE;
5082     BaseOpc = ISD::XOR;
5083     break;
5084   }
5085   }
5086 
5087   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5088 
5089   if (!IsVP)
5090     return SetCC;
5091 
5092   // Now include the start value in the operation.
5093   // Note that we must return the start value when no elements are operated
5094   // upon. The vcpop instructions we've emitted in each case above will return
5095   // 0 for an inactive vector, and so we've already received the neutral value:
5096   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5097   // can simply include the start value.
5098   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5099 }
5100 
5101 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5102                                             SelectionDAG &DAG) const {
5103   SDLoc DL(Op);
5104   SDValue Vec = Op.getOperand(0);
5105   EVT VecEVT = Vec.getValueType();
5106 
5107   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5108 
5109   // Due to ordering in legalize types we may have a vector type that needs to
5110   // be split. Do that manually so we can get down to a legal type.
5111   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5112          TargetLowering::TypeSplitVector) {
5113     SDValue Lo, Hi;
5114     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5115     VecEVT = Lo.getValueType();
5116     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5117   }
5118 
5119   // TODO: The type may need to be widened rather than split. Or widened before
5120   // it can be split.
5121   if (!isTypeLegal(VecEVT))
5122     return SDValue();
5123 
5124   MVT VecVT = VecEVT.getSimpleVT();
5125   MVT VecEltVT = VecVT.getVectorElementType();
5126   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5127 
5128   MVT ContainerVT = VecVT;
5129   if (VecVT.isFixedLengthVector()) {
5130     ContainerVT = getContainerForFixedLengthVector(VecVT);
5131     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5132   }
5133 
5134   MVT M1VT = getLMUL1VT(ContainerVT);
5135   MVT XLenVT = Subtarget.getXLenVT();
5136 
5137   SDValue Mask, VL;
5138   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5139 
5140   SDValue NeutralElem =
5141       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5142   SDValue IdentitySplat =
5143       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5144                        M1VT, DL, DAG, Subtarget);
5145   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5146                                   IdentitySplat, Mask, VL);
5147   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5148                              DAG.getConstant(0, DL, XLenVT));
5149   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5150 }
5151 
5152 // Given a reduction op, this function returns the matching reduction opcode,
5153 // the vector SDValue and the scalar SDValue required to lower this to a
5154 // RISCVISD node.
5155 static std::tuple<unsigned, SDValue, SDValue>
5156 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5157   SDLoc DL(Op);
5158   auto Flags = Op->getFlags();
5159   unsigned Opcode = Op.getOpcode();
5160   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5161   switch (Opcode) {
5162   default:
5163     llvm_unreachable("Unhandled reduction");
5164   case ISD::VECREDUCE_FADD: {
5165     // Use positive zero if we can. It is cheaper to materialize.
5166     SDValue Zero =
5167         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5168     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5169   }
5170   case ISD::VECREDUCE_SEQ_FADD:
5171     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5172                            Op.getOperand(0));
5173   case ISD::VECREDUCE_FMIN:
5174     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5175                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5176   case ISD::VECREDUCE_FMAX:
5177     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5178                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5179   }
5180 }
5181 
5182 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5183                                               SelectionDAG &DAG) const {
5184   SDLoc DL(Op);
5185   MVT VecEltVT = Op.getSimpleValueType();
5186 
5187   unsigned RVVOpcode;
5188   SDValue VectorVal, ScalarVal;
5189   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5190       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5191   MVT VecVT = VectorVal.getSimpleValueType();
5192 
5193   MVT ContainerVT = VecVT;
5194   if (VecVT.isFixedLengthVector()) {
5195     ContainerVT = getContainerForFixedLengthVector(VecVT);
5196     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5197   }
5198 
5199   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5200   MVT XLenVT = Subtarget.getXLenVT();
5201 
5202   SDValue Mask, VL;
5203   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5204 
5205   SDValue ScalarSplat =
5206       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5207                        M1VT, DL, DAG, Subtarget);
5208   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5209                                   VectorVal, ScalarSplat, Mask, VL);
5210   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5211                      DAG.getConstant(0, DL, XLenVT));
5212 }
5213 
5214 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5215   switch (ISDOpcode) {
5216   default:
5217     llvm_unreachable("Unhandled reduction");
5218   case ISD::VP_REDUCE_ADD:
5219     return RISCVISD::VECREDUCE_ADD_VL;
5220   case ISD::VP_REDUCE_UMAX:
5221     return RISCVISD::VECREDUCE_UMAX_VL;
5222   case ISD::VP_REDUCE_SMAX:
5223     return RISCVISD::VECREDUCE_SMAX_VL;
5224   case ISD::VP_REDUCE_UMIN:
5225     return RISCVISD::VECREDUCE_UMIN_VL;
5226   case ISD::VP_REDUCE_SMIN:
5227     return RISCVISD::VECREDUCE_SMIN_VL;
5228   case ISD::VP_REDUCE_AND:
5229     return RISCVISD::VECREDUCE_AND_VL;
5230   case ISD::VP_REDUCE_OR:
5231     return RISCVISD::VECREDUCE_OR_VL;
5232   case ISD::VP_REDUCE_XOR:
5233     return RISCVISD::VECREDUCE_XOR_VL;
5234   case ISD::VP_REDUCE_FADD:
5235     return RISCVISD::VECREDUCE_FADD_VL;
5236   case ISD::VP_REDUCE_SEQ_FADD:
5237     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5238   case ISD::VP_REDUCE_FMAX:
5239     return RISCVISD::VECREDUCE_FMAX_VL;
5240   case ISD::VP_REDUCE_FMIN:
5241     return RISCVISD::VECREDUCE_FMIN_VL;
5242   }
5243 }
5244 
5245 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5246                                            SelectionDAG &DAG) const {
5247   SDLoc DL(Op);
5248   SDValue Vec = Op.getOperand(1);
5249   EVT VecEVT = Vec.getValueType();
5250 
5251   // TODO: The type may need to be widened rather than split. Or widened before
5252   // it can be split.
5253   if (!isTypeLegal(VecEVT))
5254     return SDValue();
5255 
5256   MVT VecVT = VecEVT.getSimpleVT();
5257   MVT VecEltVT = VecVT.getVectorElementType();
5258   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5259 
5260   MVT ContainerVT = VecVT;
5261   if (VecVT.isFixedLengthVector()) {
5262     ContainerVT = getContainerForFixedLengthVector(VecVT);
5263     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5264   }
5265 
5266   SDValue VL = Op.getOperand(3);
5267   SDValue Mask = Op.getOperand(2);
5268 
5269   MVT M1VT = getLMUL1VT(ContainerVT);
5270   MVT XLenVT = Subtarget.getXLenVT();
5271   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5272 
5273   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5274                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5275                                         DL, DAG, Subtarget);
5276   SDValue Reduction =
5277       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5278   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5279                              DAG.getConstant(0, DL, XLenVT));
5280   if (!VecVT.isInteger())
5281     return Elt0;
5282   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5283 }
5284 
5285 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5286                                                    SelectionDAG &DAG) const {
5287   SDValue Vec = Op.getOperand(0);
5288   SDValue SubVec = Op.getOperand(1);
5289   MVT VecVT = Vec.getSimpleValueType();
5290   MVT SubVecVT = SubVec.getSimpleValueType();
5291 
5292   SDLoc DL(Op);
5293   MVT XLenVT = Subtarget.getXLenVT();
5294   unsigned OrigIdx = Op.getConstantOperandVal(2);
5295   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5296 
5297   // We don't have the ability to slide mask vectors up indexed by their i1
5298   // elements; the smallest we can do is i8. Often we are able to bitcast to
5299   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5300   // into a scalable one, we might not necessarily have enough scalable
5301   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5302   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5303       (OrigIdx != 0 || !Vec.isUndef())) {
5304     if (VecVT.getVectorMinNumElements() >= 8 &&
5305         SubVecVT.getVectorMinNumElements() >= 8) {
5306       assert(OrigIdx % 8 == 0 && "Invalid index");
5307       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5308              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5309              "Unexpected mask vector lowering");
5310       OrigIdx /= 8;
5311       SubVecVT =
5312           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5313                            SubVecVT.isScalableVector());
5314       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5315                                VecVT.isScalableVector());
5316       Vec = DAG.getBitcast(VecVT, Vec);
5317       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5318     } else {
5319       // We can't slide this mask vector up indexed by its i1 elements.
5320       // This poses a problem when we wish to insert a scalable vector which
5321       // can't be re-expressed as a larger type. Just choose the slow path and
5322       // extend to a larger type, then truncate back down.
5323       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5324       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5325       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5326       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5327       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5328                         Op.getOperand(2));
5329       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5330       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5331     }
5332   }
5333 
5334   // If the subvector vector is a fixed-length type, we cannot use subregister
5335   // manipulation to simplify the codegen; we don't know which register of a
5336   // LMUL group contains the specific subvector as we only know the minimum
5337   // register size. Therefore we must slide the vector group up the full
5338   // amount.
5339   if (SubVecVT.isFixedLengthVector()) {
5340     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5341       return Op;
5342     MVT ContainerVT = VecVT;
5343     if (VecVT.isFixedLengthVector()) {
5344       ContainerVT = getContainerForFixedLengthVector(VecVT);
5345       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5346     }
5347     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5348                          DAG.getUNDEF(ContainerVT), SubVec,
5349                          DAG.getConstant(0, DL, XLenVT));
5350     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5351       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5352       return DAG.getBitcast(Op.getValueType(), SubVec);
5353     }
5354     SDValue Mask =
5355         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5356     // Set the vector length to only the number of elements we care about. Note
5357     // that for slideup this includes the offset.
5358     SDValue VL =
5359         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5360     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5361     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5362                                   SubVec, SlideupAmt, Mask, VL);
5363     if (VecVT.isFixedLengthVector())
5364       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5365     return DAG.getBitcast(Op.getValueType(), Slideup);
5366   }
5367 
5368   unsigned SubRegIdx, RemIdx;
5369   std::tie(SubRegIdx, RemIdx) =
5370       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5371           VecVT, SubVecVT, OrigIdx, TRI);
5372 
5373   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5374   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5375                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5376                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5377 
5378   // 1. If the Idx has been completely eliminated and this subvector's size is
5379   // a vector register or a multiple thereof, or the surrounding elements are
5380   // undef, then this is a subvector insert which naturally aligns to a vector
5381   // register. These can easily be handled using subregister manipulation.
5382   // 2. If the subvector is smaller than a vector register, then the insertion
5383   // must preserve the undisturbed elements of the register. We do this by
5384   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5385   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5386   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5387   // LMUL=1 type back into the larger vector (resolving to another subregister
5388   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5389   // to avoid allocating a large register group to hold our subvector.
5390   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5391     return Op;
5392 
5393   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5394   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5395   // (in our case undisturbed). This means we can set up a subvector insertion
5396   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5397   // size of the subvector.
5398   MVT InterSubVT = VecVT;
5399   SDValue AlignedExtract = Vec;
5400   unsigned AlignedIdx = OrigIdx - RemIdx;
5401   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5402     InterSubVT = getLMUL1VT(VecVT);
5403     // Extract a subvector equal to the nearest full vector register type. This
5404     // should resolve to a EXTRACT_SUBREG instruction.
5405     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5406                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5407   }
5408 
5409   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5410   // For scalable vectors this must be further multiplied by vscale.
5411   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5412 
5413   SDValue Mask, VL;
5414   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5415 
5416   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5417   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5418   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5419   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5420 
5421   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5422                        DAG.getUNDEF(InterSubVT), SubVec,
5423                        DAG.getConstant(0, DL, XLenVT));
5424 
5425   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5426                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5427 
5428   // If required, insert this subvector back into the correct vector register.
5429   // This should resolve to an INSERT_SUBREG instruction.
5430   if (VecVT.bitsGT(InterSubVT))
5431     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5432                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5433 
5434   // We might have bitcast from a mask type: cast back to the original type if
5435   // required.
5436   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5437 }
5438 
5439 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5440                                                     SelectionDAG &DAG) const {
5441   SDValue Vec = Op.getOperand(0);
5442   MVT SubVecVT = Op.getSimpleValueType();
5443   MVT VecVT = Vec.getSimpleValueType();
5444 
5445   SDLoc DL(Op);
5446   MVT XLenVT = Subtarget.getXLenVT();
5447   unsigned OrigIdx = Op.getConstantOperandVal(1);
5448   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5449 
5450   // We don't have the ability to slide mask vectors down indexed by their i1
5451   // elements; the smallest we can do is i8. Often we are able to bitcast to
5452   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5453   // from a scalable one, we might not necessarily have enough scalable
5454   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5455   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5456     if (VecVT.getVectorMinNumElements() >= 8 &&
5457         SubVecVT.getVectorMinNumElements() >= 8) {
5458       assert(OrigIdx % 8 == 0 && "Invalid index");
5459       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5460              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5461              "Unexpected mask vector lowering");
5462       OrigIdx /= 8;
5463       SubVecVT =
5464           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5465                            SubVecVT.isScalableVector());
5466       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5467                                VecVT.isScalableVector());
5468       Vec = DAG.getBitcast(VecVT, Vec);
5469     } else {
5470       // We can't slide this mask vector down, indexed by its i1 elements.
5471       // This poses a problem when we wish to extract a scalable vector which
5472       // can't be re-expressed as a larger type. Just choose the slow path and
5473       // extend to a larger type, then truncate back down.
5474       // TODO: We could probably improve this when extracting certain fixed
5475       // from fixed, where we can extract as i8 and shift the correct element
5476       // right to reach the desired subvector?
5477       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5478       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5479       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5480       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5481                         Op.getOperand(1));
5482       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5483       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5484     }
5485   }
5486 
5487   // If the subvector vector is a fixed-length type, we cannot use subregister
5488   // manipulation to simplify the codegen; we don't know which register of a
5489   // LMUL group contains the specific subvector as we only know the minimum
5490   // register size. Therefore we must slide the vector group down the full
5491   // amount.
5492   if (SubVecVT.isFixedLengthVector()) {
5493     // With an index of 0 this is a cast-like subvector, which can be performed
5494     // with subregister operations.
5495     if (OrigIdx == 0)
5496       return Op;
5497     MVT ContainerVT = VecVT;
5498     if (VecVT.isFixedLengthVector()) {
5499       ContainerVT = getContainerForFixedLengthVector(VecVT);
5500       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5501     }
5502     SDValue Mask =
5503         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5504     // Set the vector length to only the number of elements we care about. This
5505     // avoids sliding down elements we're going to discard straight away.
5506     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5507     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5508     SDValue Slidedown =
5509         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5510                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5511     // Now we can use a cast-like subvector extract to get the result.
5512     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5513                             DAG.getConstant(0, DL, XLenVT));
5514     return DAG.getBitcast(Op.getValueType(), Slidedown);
5515   }
5516 
5517   unsigned SubRegIdx, RemIdx;
5518   std::tie(SubRegIdx, RemIdx) =
5519       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5520           VecVT, SubVecVT, OrigIdx, TRI);
5521 
5522   // If the Idx has been completely eliminated then this is a subvector extract
5523   // which naturally aligns to a vector register. These can easily be handled
5524   // using subregister manipulation.
5525   if (RemIdx == 0)
5526     return Op;
5527 
5528   // Else we must shift our vector register directly to extract the subvector.
5529   // Do this using VSLIDEDOWN.
5530 
5531   // If the vector type is an LMUL-group type, extract a subvector equal to the
5532   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5533   // instruction.
5534   MVT InterSubVT = VecVT;
5535   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5536     InterSubVT = getLMUL1VT(VecVT);
5537     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5538                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5539   }
5540 
5541   // Slide this vector register down by the desired number of elements in order
5542   // to place the desired subvector starting at element 0.
5543   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5544   // For scalable vectors this must be further multiplied by vscale.
5545   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5546 
5547   SDValue Mask, VL;
5548   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5549   SDValue Slidedown =
5550       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5551                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5552 
5553   // Now the vector is in the right position, extract our final subvector. This
5554   // should resolve to a COPY.
5555   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5556                           DAG.getConstant(0, DL, XLenVT));
5557 
5558   // We might have bitcast from a mask type: cast back to the original type if
5559   // required.
5560   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5561 }
5562 
5563 // Lower step_vector to the vid instruction. Any non-identity step value must
5564 // be accounted for my manual expansion.
5565 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5566                                               SelectionDAG &DAG) const {
5567   SDLoc DL(Op);
5568   MVT VT = Op.getSimpleValueType();
5569   MVT XLenVT = Subtarget.getXLenVT();
5570   SDValue Mask, VL;
5571   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5572   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5573   uint64_t StepValImm = Op.getConstantOperandVal(0);
5574   if (StepValImm != 1) {
5575     if (isPowerOf2_64(StepValImm)) {
5576       SDValue StepVal =
5577           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5578                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5579       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5580     } else {
5581       SDValue StepVal = lowerScalarSplat(
5582           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5583           VL, VT, DL, DAG, Subtarget);
5584       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5585     }
5586   }
5587   return StepVec;
5588 }
5589 
5590 // Implement vector_reverse using vrgather.vv with indices determined by
5591 // subtracting the id of each element from (VLMAX-1). This will convert
5592 // the indices like so:
5593 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5594 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5595 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5596                                                  SelectionDAG &DAG) const {
5597   SDLoc DL(Op);
5598   MVT VecVT = Op.getSimpleValueType();
5599   unsigned EltSize = VecVT.getScalarSizeInBits();
5600   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5601 
5602   unsigned MaxVLMAX = 0;
5603   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5604   if (VectorBitsMax != 0)
5605     MaxVLMAX =
5606         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5607 
5608   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5609   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5610 
5611   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5612   // to use vrgatherei16.vv.
5613   // TODO: It's also possible to use vrgatherei16.vv for other types to
5614   // decrease register width for the index calculation.
5615   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5616     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5617     // Reverse each half, then reassemble them in reverse order.
5618     // NOTE: It's also possible that after splitting that VLMAX no longer
5619     // requires vrgatherei16.vv.
5620     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5621       SDValue Lo, Hi;
5622       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5623       EVT LoVT, HiVT;
5624       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5625       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5626       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5627       // Reassemble the low and high pieces reversed.
5628       // FIXME: This is a CONCAT_VECTORS.
5629       SDValue Res =
5630           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5631                       DAG.getIntPtrConstant(0, DL));
5632       return DAG.getNode(
5633           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5634           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5635     }
5636 
5637     // Just promote the int type to i16 which will double the LMUL.
5638     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5639     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5640   }
5641 
5642   MVT XLenVT = Subtarget.getXLenVT();
5643   SDValue Mask, VL;
5644   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5645 
5646   // Calculate VLMAX-1 for the desired SEW.
5647   unsigned MinElts = VecVT.getVectorMinNumElements();
5648   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5649                               DAG.getConstant(MinElts, DL, XLenVT));
5650   SDValue VLMinus1 =
5651       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5652 
5653   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5654   bool IsRV32E64 =
5655       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5656   SDValue SplatVL;
5657   if (!IsRV32E64)
5658     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5659   else
5660     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5661                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5662 
5663   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5664   SDValue Indices =
5665       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5666 
5667   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5668 }
5669 
5670 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5671                                                 SelectionDAG &DAG) const {
5672   SDLoc DL(Op);
5673   SDValue V1 = Op.getOperand(0);
5674   SDValue V2 = Op.getOperand(1);
5675   MVT XLenVT = Subtarget.getXLenVT();
5676   MVT VecVT = Op.getSimpleValueType();
5677 
5678   unsigned MinElts = VecVT.getVectorMinNumElements();
5679   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5680                               DAG.getConstant(MinElts, DL, XLenVT));
5681 
5682   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5683   SDValue DownOffset, UpOffset;
5684   if (ImmValue >= 0) {
5685     // The operand is a TargetConstant, we need to rebuild it as a regular
5686     // constant.
5687     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5688     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5689   } else {
5690     // The operand is a TargetConstant, we need to rebuild it as a regular
5691     // constant rather than negating the original operand.
5692     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5693     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5694   }
5695 
5696   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
5697 
5698   SDValue SlideDown =
5699       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5700                   DownOffset, TrueMask, UpOffset);
5701   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5702                      TrueMask,
5703                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5704 }
5705 
5706 SDValue
5707 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5708                                                      SelectionDAG &DAG) const {
5709   SDLoc DL(Op);
5710   auto *Load = cast<LoadSDNode>(Op);
5711 
5712   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5713                                         Load->getMemoryVT(),
5714                                         *Load->getMemOperand()) &&
5715          "Expecting a correctly-aligned load");
5716 
5717   MVT VT = Op.getSimpleValueType();
5718   MVT XLenVT = Subtarget.getXLenVT();
5719   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5720 
5721   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5722 
5723   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5724   SDValue IntID = DAG.getTargetConstant(
5725       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5726   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5727   if (!IsMaskOp)
5728     Ops.push_back(DAG.getUNDEF(ContainerVT));
5729   Ops.push_back(Load->getBasePtr());
5730   Ops.push_back(VL);
5731   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5732   SDValue NewLoad =
5733       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5734                               Load->getMemoryVT(), Load->getMemOperand());
5735 
5736   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5737   return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
5738 }
5739 
5740 SDValue
5741 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5742                                                       SelectionDAG &DAG) const {
5743   SDLoc DL(Op);
5744   auto *Store = cast<StoreSDNode>(Op);
5745 
5746   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5747                                         Store->getMemoryVT(),
5748                                         *Store->getMemOperand()) &&
5749          "Expecting a correctly-aligned store");
5750 
5751   SDValue StoreVal = Store->getValue();
5752   MVT VT = StoreVal.getSimpleValueType();
5753   MVT XLenVT = Subtarget.getXLenVT();
5754 
5755   // If the size less than a byte, we need to pad with zeros to make a byte.
5756   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5757     VT = MVT::v8i1;
5758     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5759                            DAG.getConstant(0, DL, VT), StoreVal,
5760                            DAG.getIntPtrConstant(0, DL));
5761   }
5762 
5763   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5764 
5765   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5766 
5767   SDValue NewValue =
5768       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5769 
5770   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5771   SDValue IntID = DAG.getTargetConstant(
5772       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5773   return DAG.getMemIntrinsicNode(
5774       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5775       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5776       Store->getMemoryVT(), Store->getMemOperand());
5777 }
5778 
5779 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5780                                              SelectionDAG &DAG) const {
5781   SDLoc DL(Op);
5782   MVT VT = Op.getSimpleValueType();
5783 
5784   const auto *MemSD = cast<MemSDNode>(Op);
5785   EVT MemVT = MemSD->getMemoryVT();
5786   MachineMemOperand *MMO = MemSD->getMemOperand();
5787   SDValue Chain = MemSD->getChain();
5788   SDValue BasePtr = MemSD->getBasePtr();
5789 
5790   SDValue Mask, PassThru, VL;
5791   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5792     Mask = VPLoad->getMask();
5793     PassThru = DAG.getUNDEF(VT);
5794     VL = VPLoad->getVectorLength();
5795   } else {
5796     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5797     Mask = MLoad->getMask();
5798     PassThru = MLoad->getPassThru();
5799   }
5800 
5801   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5802 
5803   MVT XLenVT = Subtarget.getXLenVT();
5804 
5805   MVT ContainerVT = VT;
5806   if (VT.isFixedLengthVector()) {
5807     ContainerVT = getContainerForFixedLengthVector(VT);
5808     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5809     if (!IsUnmasked) {
5810       MVT MaskVT = getMaskTypeFor(ContainerVT);
5811       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5812     }
5813   }
5814 
5815   if (!VL)
5816     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5817 
5818   unsigned IntID =
5819       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5820   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5821   if (IsUnmasked)
5822     Ops.push_back(DAG.getUNDEF(ContainerVT));
5823   else
5824     Ops.push_back(PassThru);
5825   Ops.push_back(BasePtr);
5826   if (!IsUnmasked)
5827     Ops.push_back(Mask);
5828   Ops.push_back(VL);
5829   if (!IsUnmasked)
5830     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5831 
5832   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5833 
5834   SDValue Result =
5835       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5836   Chain = Result.getValue(1);
5837 
5838   if (VT.isFixedLengthVector())
5839     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5840 
5841   return DAG.getMergeValues({Result, Chain}, DL);
5842 }
5843 
5844 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5845                                               SelectionDAG &DAG) const {
5846   SDLoc DL(Op);
5847 
5848   const auto *MemSD = cast<MemSDNode>(Op);
5849   EVT MemVT = MemSD->getMemoryVT();
5850   MachineMemOperand *MMO = MemSD->getMemOperand();
5851   SDValue Chain = MemSD->getChain();
5852   SDValue BasePtr = MemSD->getBasePtr();
5853   SDValue Val, Mask, VL;
5854 
5855   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5856     Val = VPStore->getValue();
5857     Mask = VPStore->getMask();
5858     VL = VPStore->getVectorLength();
5859   } else {
5860     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5861     Val = MStore->getValue();
5862     Mask = MStore->getMask();
5863   }
5864 
5865   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5866 
5867   MVT VT = Val.getSimpleValueType();
5868   MVT XLenVT = Subtarget.getXLenVT();
5869 
5870   MVT ContainerVT = VT;
5871   if (VT.isFixedLengthVector()) {
5872     ContainerVT = getContainerForFixedLengthVector(VT);
5873 
5874     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5875     if (!IsUnmasked) {
5876       MVT MaskVT = getMaskTypeFor(ContainerVT);
5877       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5878     }
5879   }
5880 
5881   if (!VL)
5882     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5883 
5884   unsigned IntID =
5885       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5886   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5887   Ops.push_back(Val);
5888   Ops.push_back(BasePtr);
5889   if (!IsUnmasked)
5890     Ops.push_back(Mask);
5891   Ops.push_back(VL);
5892 
5893   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5894                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5895 }
5896 
5897 SDValue
5898 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5899                                                       SelectionDAG &DAG) const {
5900   MVT InVT = Op.getOperand(0).getSimpleValueType();
5901   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5902 
5903   MVT VT = Op.getSimpleValueType();
5904 
5905   SDValue Op1 =
5906       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5907   SDValue Op2 =
5908       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5909 
5910   SDLoc DL(Op);
5911   SDValue VL =
5912       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5913 
5914   MVT MaskVT = getMaskTypeFor(ContainerVT);
5915   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
5916 
5917   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5918                             Op.getOperand(2), Mask, VL);
5919 
5920   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5921 }
5922 
5923 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5924     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5925   MVT VT = Op.getSimpleValueType();
5926 
5927   if (VT.getVectorElementType() == MVT::i1)
5928     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5929 
5930   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5931 }
5932 
5933 SDValue
5934 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5935                                                       SelectionDAG &DAG) const {
5936   unsigned Opc;
5937   switch (Op.getOpcode()) {
5938   default: llvm_unreachable("Unexpected opcode!");
5939   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5940   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5941   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5942   }
5943 
5944   return lowerToScalableOp(Op, DAG, Opc);
5945 }
5946 
5947 // Lower vector ABS to smax(X, sub(0, X)).
5948 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5949   SDLoc DL(Op);
5950   MVT VT = Op.getSimpleValueType();
5951   SDValue X = Op.getOperand(0);
5952 
5953   assert(VT.isFixedLengthVector() && "Unexpected type");
5954 
5955   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5956   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5957 
5958   SDValue Mask, VL;
5959   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5960 
5961   SDValue SplatZero = DAG.getNode(
5962       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5963       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5964   SDValue NegX =
5965       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5966   SDValue Max =
5967       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5968 
5969   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5970 }
5971 
5972 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5973     SDValue Op, SelectionDAG &DAG) const {
5974   SDLoc DL(Op);
5975   MVT VT = Op.getSimpleValueType();
5976   SDValue Mag = Op.getOperand(0);
5977   SDValue Sign = Op.getOperand(1);
5978   assert(Mag.getValueType() == Sign.getValueType() &&
5979          "Can only handle COPYSIGN with matching types.");
5980 
5981   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5982   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5983   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5984 
5985   SDValue Mask, VL;
5986   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5987 
5988   SDValue CopySign =
5989       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5990 
5991   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5992 }
5993 
5994 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5995     SDValue Op, SelectionDAG &DAG) const {
5996   MVT VT = Op.getSimpleValueType();
5997   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5998 
5999   MVT I1ContainerVT =
6000       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6001 
6002   SDValue CC =
6003       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
6004   SDValue Op1 =
6005       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6006   SDValue Op2 =
6007       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6008 
6009   SDLoc DL(Op);
6010   SDValue Mask, VL;
6011   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6012 
6013   SDValue Select =
6014       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6015 
6016   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6017 }
6018 
6019 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6020                                                unsigned NewOpc,
6021                                                bool HasMask) const {
6022   MVT VT = Op.getSimpleValueType();
6023   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6024 
6025   // Create list of operands by converting existing ones to scalable types.
6026   SmallVector<SDValue, 6> Ops;
6027   for (const SDValue &V : Op->op_values()) {
6028     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6029 
6030     // Pass through non-vector operands.
6031     if (!V.getValueType().isVector()) {
6032       Ops.push_back(V);
6033       continue;
6034     }
6035 
6036     // "cast" fixed length vector to a scalable vector.
6037     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6038            "Only fixed length vectors are supported!");
6039     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6040   }
6041 
6042   SDLoc DL(Op);
6043   SDValue Mask, VL;
6044   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6045   if (HasMask)
6046     Ops.push_back(Mask);
6047   Ops.push_back(VL);
6048 
6049   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6050   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6051 }
6052 
6053 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6054 // * Operands of each node are assumed to be in the same order.
6055 // * The EVL operand is promoted from i32 to i64 on RV64.
6056 // * Fixed-length vectors are converted to their scalable-vector container
6057 //   types.
6058 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6059                                        unsigned RISCVISDOpc) const {
6060   SDLoc DL(Op);
6061   MVT VT = Op.getSimpleValueType();
6062   SmallVector<SDValue, 4> Ops;
6063 
6064   for (const auto &OpIdx : enumerate(Op->ops())) {
6065     SDValue V = OpIdx.value();
6066     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6067     // Pass through operands which aren't fixed-length vectors.
6068     if (!V.getValueType().isFixedLengthVector()) {
6069       Ops.push_back(V);
6070       continue;
6071     }
6072     // "cast" fixed length vector to a scalable vector.
6073     MVT OpVT = V.getSimpleValueType();
6074     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6075     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6076            "Only fixed length vectors are supported!");
6077     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6078   }
6079 
6080   if (!VT.isFixedLengthVector())
6081     return DAG.getNode(RISCVISDOpc, DL, VT, Ops, Op->getFlags());
6082 
6083   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6084 
6085   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops, Op->getFlags());
6086 
6087   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6088 }
6089 
6090 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6091                                               SelectionDAG &DAG) const {
6092   SDLoc DL(Op);
6093   MVT VT = Op.getSimpleValueType();
6094 
6095   SDValue Src = Op.getOperand(0);
6096   // NOTE: Mask is dropped.
6097   SDValue VL = Op.getOperand(2);
6098 
6099   MVT ContainerVT = VT;
6100   if (VT.isFixedLengthVector()) {
6101     ContainerVT = getContainerForFixedLengthVector(VT);
6102     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6103     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6104   }
6105 
6106   MVT XLenVT = Subtarget.getXLenVT();
6107   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6108   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6109                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6110 
6111   SDValue SplatValue = DAG.getConstant(
6112       Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
6113   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6114                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6115 
6116   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6117                                Splat, ZeroSplat, VL);
6118   if (!VT.isFixedLengthVector())
6119     return Result;
6120   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6121 }
6122 
6123 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
6124                                                 SelectionDAG &DAG) const {
6125   SDLoc DL(Op);
6126   MVT VT = Op.getSimpleValueType();
6127 
6128   SDValue Op1 = Op.getOperand(0);
6129   SDValue Op2 = Op.getOperand(1);
6130   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6131   // NOTE: Mask is dropped.
6132   SDValue VL = Op.getOperand(4);
6133 
6134   MVT ContainerVT = VT;
6135   if (VT.isFixedLengthVector()) {
6136     ContainerVT = getContainerForFixedLengthVector(VT);
6137     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6138     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6139   }
6140 
6141   SDValue Result;
6142   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
6143 
6144   switch (Condition) {
6145   default:
6146     break;
6147   // X != Y  --> (X^Y)
6148   case ISD::SETNE:
6149     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6150     break;
6151   // X == Y  --> ~(X^Y)
6152   case ISD::SETEQ: {
6153     SDValue Temp =
6154         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6155     Result =
6156         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
6157     break;
6158   }
6159   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
6160   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
6161   case ISD::SETGT:
6162   case ISD::SETULT: {
6163     SDValue Temp =
6164         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6165     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
6166     break;
6167   }
6168   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
6169   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
6170   case ISD::SETLT:
6171   case ISD::SETUGT: {
6172     SDValue Temp =
6173         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6174     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
6175     break;
6176   }
6177   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
6178   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
6179   case ISD::SETGE:
6180   case ISD::SETULE: {
6181     SDValue Temp =
6182         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6183     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
6184     break;
6185   }
6186   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
6187   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
6188   case ISD::SETLE:
6189   case ISD::SETUGE: {
6190     SDValue Temp =
6191         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6192     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
6193     break;
6194   }
6195   }
6196 
6197   if (!VT.isFixedLengthVector())
6198     return Result;
6199   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6200 }
6201 
6202 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6203 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6204                                                 unsigned RISCVISDOpc) const {
6205   SDLoc DL(Op);
6206 
6207   SDValue Src = Op.getOperand(0);
6208   SDValue Mask = Op.getOperand(1);
6209   SDValue VL = Op.getOperand(2);
6210 
6211   MVT DstVT = Op.getSimpleValueType();
6212   MVT SrcVT = Src.getSimpleValueType();
6213   if (DstVT.isFixedLengthVector()) {
6214     DstVT = getContainerForFixedLengthVector(DstVT);
6215     SrcVT = getContainerForFixedLengthVector(SrcVT);
6216     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6217     MVT MaskVT = getMaskTypeFor(DstVT);
6218     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6219   }
6220 
6221   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6222                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6223                                 ? RISCVISD::VSEXT_VL
6224                                 : RISCVISD::VZEXT_VL;
6225 
6226   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6227   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6228 
6229   SDValue Result;
6230   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6231     if (SrcVT.isInteger()) {
6232       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6233 
6234       // Do we need to do any pre-widening before converting?
6235       if (SrcEltSize == 1) {
6236         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6237         MVT XLenVT = Subtarget.getXLenVT();
6238         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6239         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6240                                         DAG.getUNDEF(IntVT), Zero, VL);
6241         SDValue One = DAG.getConstant(
6242             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6243         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6244                                        DAG.getUNDEF(IntVT), One, VL);
6245         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6246                           ZeroSplat, VL);
6247       } else if (DstEltSize > (2 * SrcEltSize)) {
6248         // Widen before converting.
6249         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6250                                      DstVT.getVectorElementCount());
6251         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6252       }
6253 
6254       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6255     } else {
6256       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6257              "Wrong input/output vector types");
6258 
6259       // Convert f16 to f32 then convert f32 to i64.
6260       if (DstEltSize > (2 * SrcEltSize)) {
6261         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6262         MVT InterimFVT =
6263             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6264         Src =
6265             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6266       }
6267 
6268       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6269     }
6270   } else { // Narrowing + Conversion
6271     if (SrcVT.isInteger()) {
6272       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6273       // First do a narrowing convert to an FP type half the size, then round
6274       // the FP type to a small FP type if needed.
6275 
6276       MVT InterimFVT = DstVT;
6277       if (SrcEltSize > (2 * DstEltSize)) {
6278         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6279         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6280         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6281       }
6282 
6283       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6284 
6285       if (InterimFVT != DstVT) {
6286         Src = Result;
6287         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6288       }
6289     } else {
6290       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6291              "Wrong input/output vector types");
6292       // First do a narrowing conversion to an integer half the size, then
6293       // truncate if needed.
6294 
6295       if (DstEltSize == 1) {
6296         // First convert to the same size integer, then convert to mask using
6297         // setcc.
6298         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6299         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6300                                           DstVT.getVectorElementCount());
6301         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6302 
6303         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6304         // otherwise the conversion was undefined.
6305         MVT XLenVT = Subtarget.getXLenVT();
6306         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6307         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6308                                 DAG.getUNDEF(InterimIVT), SplatZero);
6309         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6310                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6311       } else {
6312         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6313                                           DstVT.getVectorElementCount());
6314 
6315         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6316 
6317         while (InterimIVT != DstVT) {
6318           SrcEltSize /= 2;
6319           Src = Result;
6320           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6321                                         DstVT.getVectorElementCount());
6322           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6323                                Src, Mask, VL);
6324         }
6325       }
6326     }
6327   }
6328 
6329   MVT VT = Op.getSimpleValueType();
6330   if (!VT.isFixedLengthVector())
6331     return Result;
6332   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6333 }
6334 
6335 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6336                                             unsigned MaskOpc,
6337                                             unsigned VecOpc) const {
6338   MVT VT = Op.getSimpleValueType();
6339   if (VT.getVectorElementType() != MVT::i1)
6340     return lowerVPOp(Op, DAG, VecOpc);
6341 
6342   // It is safe to drop mask parameter as masked-off elements are undef.
6343   SDValue Op1 = Op->getOperand(0);
6344   SDValue Op2 = Op->getOperand(1);
6345   SDValue VL = Op->getOperand(3);
6346 
6347   MVT ContainerVT = VT;
6348   const bool IsFixed = VT.isFixedLengthVector();
6349   if (IsFixed) {
6350     ContainerVT = getContainerForFixedLengthVector(VT);
6351     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6352     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6353   }
6354 
6355   SDLoc DL(Op);
6356   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6357   if (!IsFixed)
6358     return Val;
6359   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6360 }
6361 
6362 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6363 // matched to a RVV indexed load. The RVV indexed load instructions only
6364 // support the "unsigned unscaled" addressing mode; indices are implicitly
6365 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6366 // signed or scaled indexing is extended to the XLEN value type and scaled
6367 // accordingly.
6368 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6369                                                SelectionDAG &DAG) const {
6370   SDLoc DL(Op);
6371   MVT VT = Op.getSimpleValueType();
6372 
6373   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6374   EVT MemVT = MemSD->getMemoryVT();
6375   MachineMemOperand *MMO = MemSD->getMemOperand();
6376   SDValue Chain = MemSD->getChain();
6377   SDValue BasePtr = MemSD->getBasePtr();
6378 
6379   ISD::LoadExtType LoadExtType;
6380   SDValue Index, Mask, PassThru, VL;
6381 
6382   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6383     Index = VPGN->getIndex();
6384     Mask = VPGN->getMask();
6385     PassThru = DAG.getUNDEF(VT);
6386     VL = VPGN->getVectorLength();
6387     // VP doesn't support extending loads.
6388     LoadExtType = ISD::NON_EXTLOAD;
6389   } else {
6390     // Else it must be a MGATHER.
6391     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6392     Index = MGN->getIndex();
6393     Mask = MGN->getMask();
6394     PassThru = MGN->getPassThru();
6395     LoadExtType = MGN->getExtensionType();
6396   }
6397 
6398   MVT IndexVT = Index.getSimpleValueType();
6399   MVT XLenVT = Subtarget.getXLenVT();
6400 
6401   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6402          "Unexpected VTs!");
6403   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6404   // Targets have to explicitly opt-in for extending vector loads.
6405   assert(LoadExtType == ISD::NON_EXTLOAD &&
6406          "Unexpected extending MGATHER/VP_GATHER");
6407   (void)LoadExtType;
6408 
6409   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6410   // the selection of the masked intrinsics doesn't do this for us.
6411   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6412 
6413   MVT ContainerVT = VT;
6414   if (VT.isFixedLengthVector()) {
6415     ContainerVT = getContainerForFixedLengthVector(VT);
6416     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6417                                ContainerVT.getVectorElementCount());
6418 
6419     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6420 
6421     if (!IsUnmasked) {
6422       MVT MaskVT = getMaskTypeFor(ContainerVT);
6423       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6424       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6425     }
6426   }
6427 
6428   if (!VL)
6429     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6430 
6431   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6432     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6433     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6434                                    VL);
6435     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6436                         TrueMask, VL);
6437   }
6438 
6439   unsigned IntID =
6440       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6441   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6442   if (IsUnmasked)
6443     Ops.push_back(DAG.getUNDEF(ContainerVT));
6444   else
6445     Ops.push_back(PassThru);
6446   Ops.push_back(BasePtr);
6447   Ops.push_back(Index);
6448   if (!IsUnmasked)
6449     Ops.push_back(Mask);
6450   Ops.push_back(VL);
6451   if (!IsUnmasked)
6452     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6453 
6454   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6455   SDValue Result =
6456       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6457   Chain = Result.getValue(1);
6458 
6459   if (VT.isFixedLengthVector())
6460     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6461 
6462   return DAG.getMergeValues({Result, Chain}, DL);
6463 }
6464 
6465 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6466 // matched to a RVV indexed store. The RVV indexed store instructions only
6467 // support the "unsigned unscaled" addressing mode; indices are implicitly
6468 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6469 // signed or scaled indexing is extended to the XLEN value type and scaled
6470 // accordingly.
6471 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6472                                                 SelectionDAG &DAG) const {
6473   SDLoc DL(Op);
6474   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6475   EVT MemVT = MemSD->getMemoryVT();
6476   MachineMemOperand *MMO = MemSD->getMemOperand();
6477   SDValue Chain = MemSD->getChain();
6478   SDValue BasePtr = MemSD->getBasePtr();
6479 
6480   bool IsTruncatingStore = false;
6481   SDValue Index, Mask, Val, VL;
6482 
6483   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6484     Index = VPSN->getIndex();
6485     Mask = VPSN->getMask();
6486     Val = VPSN->getValue();
6487     VL = VPSN->getVectorLength();
6488     // VP doesn't support truncating stores.
6489     IsTruncatingStore = false;
6490   } else {
6491     // Else it must be a MSCATTER.
6492     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6493     Index = MSN->getIndex();
6494     Mask = MSN->getMask();
6495     Val = MSN->getValue();
6496     IsTruncatingStore = MSN->isTruncatingStore();
6497   }
6498 
6499   MVT VT = Val.getSimpleValueType();
6500   MVT IndexVT = Index.getSimpleValueType();
6501   MVT XLenVT = Subtarget.getXLenVT();
6502 
6503   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6504          "Unexpected VTs!");
6505   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6506   // Targets have to explicitly opt-in for extending vector loads and
6507   // truncating vector stores.
6508   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6509   (void)IsTruncatingStore;
6510 
6511   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6512   // the selection of the masked intrinsics doesn't do this for us.
6513   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6514 
6515   MVT ContainerVT = VT;
6516   if (VT.isFixedLengthVector()) {
6517     ContainerVT = getContainerForFixedLengthVector(VT);
6518     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6519                                ContainerVT.getVectorElementCount());
6520 
6521     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6522     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6523 
6524     if (!IsUnmasked) {
6525       MVT MaskVT = getMaskTypeFor(ContainerVT);
6526       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6527     }
6528   }
6529 
6530   if (!VL)
6531     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6532 
6533   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6534     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6535     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6536                                    VL);
6537     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6538                         TrueMask, VL);
6539   }
6540 
6541   unsigned IntID =
6542       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6543   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6544   Ops.push_back(Val);
6545   Ops.push_back(BasePtr);
6546   Ops.push_back(Index);
6547   if (!IsUnmasked)
6548     Ops.push_back(Mask);
6549   Ops.push_back(VL);
6550 
6551   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6552                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6553 }
6554 
6555 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6556                                                SelectionDAG &DAG) const {
6557   const MVT XLenVT = Subtarget.getXLenVT();
6558   SDLoc DL(Op);
6559   SDValue Chain = Op->getOperand(0);
6560   SDValue SysRegNo = DAG.getTargetConstant(
6561       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6562   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6563   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6564 
6565   // Encoding used for rounding mode in RISCV differs from that used in
6566   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6567   // table, which consists of a sequence of 4-bit fields, each representing
6568   // corresponding FLT_ROUNDS mode.
6569   static const int Table =
6570       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6571       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6572       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6573       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6574       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6575 
6576   SDValue Shift =
6577       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6578   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6579                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6580   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6581                                DAG.getConstant(7, DL, XLenVT));
6582 
6583   return DAG.getMergeValues({Masked, Chain}, DL);
6584 }
6585 
6586 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6587                                                SelectionDAG &DAG) const {
6588   const MVT XLenVT = Subtarget.getXLenVT();
6589   SDLoc DL(Op);
6590   SDValue Chain = Op->getOperand(0);
6591   SDValue RMValue = Op->getOperand(1);
6592   SDValue SysRegNo = DAG.getTargetConstant(
6593       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6594 
6595   // Encoding used for rounding mode in RISCV differs from that used in
6596   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6597   // a table, which consists of a sequence of 4-bit fields, each representing
6598   // corresponding RISCV mode.
6599   static const unsigned Table =
6600       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6601       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6602       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6603       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6604       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6605 
6606   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6607                               DAG.getConstant(2, DL, XLenVT));
6608   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6609                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6610   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6611                         DAG.getConstant(0x7, DL, XLenVT));
6612   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6613                      RMValue);
6614 }
6615 
6616 SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
6617                                                SelectionDAG &DAG) const {
6618   MachineFunction &MF = DAG.getMachineFunction();
6619 
6620   bool isRISCV64 = Subtarget.is64Bit();
6621   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6622 
6623   int FI = MF.getFrameInfo().CreateFixedObject(isRISCV64 ? 8 : 4, 0, false);
6624   return DAG.getFrameIndex(FI, PtrVT);
6625 }
6626 
6627 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6628   switch (IntNo) {
6629   default:
6630     llvm_unreachable("Unexpected Intrinsic");
6631   case Intrinsic::riscv_bcompress:
6632     return RISCVISD::BCOMPRESSW;
6633   case Intrinsic::riscv_bdecompress:
6634     return RISCVISD::BDECOMPRESSW;
6635   case Intrinsic::riscv_bfp:
6636     return RISCVISD::BFPW;
6637   case Intrinsic::riscv_fsl:
6638     return RISCVISD::FSLW;
6639   case Intrinsic::riscv_fsr:
6640     return RISCVISD::FSRW;
6641   }
6642 }
6643 
6644 // Converts the given intrinsic to a i64 operation with any extension.
6645 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6646                                          unsigned IntNo) {
6647   SDLoc DL(N);
6648   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6649   // Deal with the Instruction Operands
6650   SmallVector<SDValue, 3> NewOps;
6651   for (SDValue Op : drop_begin(N->ops()))
6652     // Promote the operand to i64 type
6653     NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op));
6654   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps);
6655   // ReplaceNodeResults requires we maintain the same type for the return value.
6656   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6657 }
6658 
6659 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6660 // form of the given Opcode.
6661 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6662   switch (Opcode) {
6663   default:
6664     llvm_unreachable("Unexpected opcode");
6665   case ISD::SHL:
6666     return RISCVISD::SLLW;
6667   case ISD::SRA:
6668     return RISCVISD::SRAW;
6669   case ISD::SRL:
6670     return RISCVISD::SRLW;
6671   case ISD::SDIV:
6672     return RISCVISD::DIVW;
6673   case ISD::UDIV:
6674     return RISCVISD::DIVUW;
6675   case ISD::UREM:
6676     return RISCVISD::REMUW;
6677   case ISD::ROTL:
6678     return RISCVISD::ROLW;
6679   case ISD::ROTR:
6680     return RISCVISD::RORW;
6681   }
6682 }
6683 
6684 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6685 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6686 // otherwise be promoted to i64, making it difficult to select the
6687 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6688 // type i8/i16/i32 is lost.
6689 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6690                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6691   SDLoc DL(N);
6692   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6693   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6694   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6695   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6696   // ReplaceNodeResults requires we maintain the same type for the return value.
6697   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6698 }
6699 
6700 // Converts the given 32-bit operation to a i64 operation with signed extension
6701 // semantic to reduce the signed extension instructions.
6702 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6703   SDLoc DL(N);
6704   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6705   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6706   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6707   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6708                                DAG.getValueType(MVT::i32));
6709   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6710 }
6711 
6712 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6713                                              SmallVectorImpl<SDValue> &Results,
6714                                              SelectionDAG &DAG) const {
6715   SDLoc DL(N);
6716   switch (N->getOpcode()) {
6717   default:
6718     llvm_unreachable("Don't know how to custom type legalize this operation!");
6719   case ISD::STRICT_FP_TO_SINT:
6720   case ISD::STRICT_FP_TO_UINT:
6721   case ISD::FP_TO_SINT:
6722   case ISD::FP_TO_UINT: {
6723     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6724            "Unexpected custom legalisation");
6725     bool IsStrict = N->isStrictFPOpcode();
6726     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6727                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6728     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6729     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6730         TargetLowering::TypeSoftenFloat) {
6731       if (!isTypeLegal(Op0.getValueType()))
6732         return;
6733       if (IsStrict) {
6734         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6735                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6736         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6737         SDValue Res = DAG.getNode(
6738             Opc, DL, VTs, N->getOperand(0), Op0,
6739             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6740         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6741         Results.push_back(Res.getValue(1));
6742         return;
6743       }
6744       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6745       SDValue Res =
6746           DAG.getNode(Opc, DL, MVT::i64, Op0,
6747                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6748       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6749       return;
6750     }
6751     // If the FP type needs to be softened, emit a library call using the 'si'
6752     // version. If we left it to default legalization we'd end up with 'di'. If
6753     // the FP type doesn't need to be softened just let generic type
6754     // legalization promote the result type.
6755     RTLIB::Libcall LC;
6756     if (IsSigned)
6757       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6758     else
6759       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6760     MakeLibCallOptions CallOptions;
6761     EVT OpVT = Op0.getValueType();
6762     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6763     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6764     SDValue Result;
6765     std::tie(Result, Chain) =
6766         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6767     Results.push_back(Result);
6768     if (IsStrict)
6769       Results.push_back(Chain);
6770     break;
6771   }
6772   case ISD::READCYCLECOUNTER: {
6773     assert(!Subtarget.is64Bit() &&
6774            "READCYCLECOUNTER only has custom type legalization on riscv32");
6775 
6776     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6777     SDValue RCW =
6778         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6779 
6780     Results.push_back(
6781         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6782     Results.push_back(RCW.getValue(2));
6783     break;
6784   }
6785   case ISD::MUL: {
6786     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6787     unsigned XLen = Subtarget.getXLen();
6788     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6789     if (Size > XLen) {
6790       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6791       SDValue LHS = N->getOperand(0);
6792       SDValue RHS = N->getOperand(1);
6793       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6794 
6795       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6796       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6797       // We need exactly one side to be unsigned.
6798       if (LHSIsU == RHSIsU)
6799         return;
6800 
6801       auto MakeMULPair = [&](SDValue S, SDValue U) {
6802         MVT XLenVT = Subtarget.getXLenVT();
6803         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6804         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6805         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6806         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6807         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6808       };
6809 
6810       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6811       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6812 
6813       // The other operand should be signed, but still prefer MULH when
6814       // possible.
6815       if (RHSIsU && LHSIsS && !RHSIsS)
6816         Results.push_back(MakeMULPair(LHS, RHS));
6817       else if (LHSIsU && RHSIsS && !LHSIsS)
6818         Results.push_back(MakeMULPair(RHS, LHS));
6819 
6820       return;
6821     }
6822     LLVM_FALLTHROUGH;
6823   }
6824   case ISD::ADD:
6825   case ISD::SUB:
6826     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6827            "Unexpected custom legalisation");
6828     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6829     break;
6830   case ISD::SHL:
6831   case ISD::SRA:
6832   case ISD::SRL:
6833     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6834            "Unexpected custom legalisation");
6835     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6836       // If we can use a BSET instruction, allow default promotion to apply.
6837       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
6838           isOneConstant(N->getOperand(0)))
6839         break;
6840       Results.push_back(customLegalizeToWOp(N, DAG));
6841       break;
6842     }
6843 
6844     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6845     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6846     // shift amount.
6847     if (N->getOpcode() == ISD::SHL) {
6848       SDLoc DL(N);
6849       SDValue NewOp0 =
6850           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6851       SDValue NewOp1 =
6852           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6853       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6854       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6855                                    DAG.getValueType(MVT::i32));
6856       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6857     }
6858 
6859     break;
6860   case ISD::ROTL:
6861   case ISD::ROTR:
6862     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6863            "Unexpected custom legalisation");
6864     Results.push_back(customLegalizeToWOp(N, DAG));
6865     break;
6866   case ISD::CTTZ:
6867   case ISD::CTTZ_ZERO_UNDEF:
6868   case ISD::CTLZ:
6869   case ISD::CTLZ_ZERO_UNDEF: {
6870     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6871            "Unexpected custom legalisation");
6872 
6873     SDValue NewOp0 =
6874         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6875     bool IsCTZ =
6876         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6877     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6878     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6879     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6880     return;
6881   }
6882   case ISD::SDIV:
6883   case ISD::UDIV:
6884   case ISD::UREM: {
6885     MVT VT = N->getSimpleValueType(0);
6886     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6887            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6888            "Unexpected custom legalisation");
6889     // Don't promote division/remainder by constant since we should expand those
6890     // to multiply by magic constant.
6891     // FIXME: What if the expansion is disabled for minsize.
6892     if (N->getOperand(1).getOpcode() == ISD::Constant)
6893       return;
6894 
6895     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6896     // the upper 32 bits. For other types we need to sign or zero extend
6897     // based on the opcode.
6898     unsigned ExtOpc = ISD::ANY_EXTEND;
6899     if (VT != MVT::i32)
6900       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6901                                            : ISD::ZERO_EXTEND;
6902 
6903     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6904     break;
6905   }
6906   case ISD::UADDO:
6907   case ISD::USUBO: {
6908     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6909            "Unexpected custom legalisation");
6910     bool IsAdd = N->getOpcode() == ISD::UADDO;
6911     // Create an ADDW or SUBW.
6912     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6913     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6914     SDValue Res =
6915         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6916     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6917                       DAG.getValueType(MVT::i32));
6918 
6919     SDValue Overflow;
6920     if (IsAdd && isOneConstant(RHS)) {
6921       // Special case uaddo X, 1 overflowed if the addition result is 0.
6922       // The general case (X + C) < C is not necessarily beneficial. Although we
6923       // reduce the live range of X, we may introduce the materialization of
6924       // constant C, especially when the setcc result is used by branch. We have
6925       // no compare with constant and branch instructions.
6926       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6927                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6928     } else {
6929       // Sign extend the LHS and perform an unsigned compare with the ADDW
6930       // result. Since the inputs are sign extended from i32, this is equivalent
6931       // to comparing the lower 32 bits.
6932       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6933       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6934                               IsAdd ? ISD::SETULT : ISD::SETUGT);
6935     }
6936 
6937     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6938     Results.push_back(Overflow);
6939     return;
6940   }
6941   case ISD::UADDSAT:
6942   case ISD::USUBSAT: {
6943     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6944            "Unexpected custom legalisation");
6945     if (Subtarget.hasStdExtZbb()) {
6946       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6947       // sign extend allows overflow of the lower 32 bits to be detected on
6948       // the promoted size.
6949       SDValue LHS =
6950           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6951       SDValue RHS =
6952           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6953       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6954       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6955       return;
6956     }
6957 
6958     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6959     // promotion for UADDO/USUBO.
6960     Results.push_back(expandAddSubSat(N, DAG));
6961     return;
6962   }
6963   case ISD::ABS: {
6964     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6965            "Unexpected custom legalisation");
6966           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6967 
6968     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6969 
6970     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6971 
6972     // Freeze the source so we can increase it's use count.
6973     Src = DAG.getFreeze(Src);
6974 
6975     // Copy sign bit to all bits using the sraiw pattern.
6976     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6977                                    DAG.getValueType(MVT::i32));
6978     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6979                            DAG.getConstant(31, DL, MVT::i64));
6980 
6981     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6982     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6983 
6984     // NOTE: The result is only required to be anyextended, but sext is
6985     // consistent with type legalization of sub.
6986     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6987                          DAG.getValueType(MVT::i32));
6988     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6989     return;
6990   }
6991   case ISD::BITCAST: {
6992     EVT VT = N->getValueType(0);
6993     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6994     SDValue Op0 = N->getOperand(0);
6995     EVT Op0VT = Op0.getValueType();
6996     MVT XLenVT = Subtarget.getXLenVT();
6997     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6998       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6999       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
7000     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
7001                Subtarget.hasStdExtF()) {
7002       SDValue FPConv =
7003           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
7004       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
7005     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
7006                isTypeLegal(Op0VT)) {
7007       // Custom-legalize bitcasts from fixed-length vector types to illegal
7008       // scalar types in order to improve codegen. Bitcast the vector to a
7009       // one-element vector type whose element type is the same as the result
7010       // type, and extract the first element.
7011       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
7012       if (isTypeLegal(BVT)) {
7013         SDValue BVec = DAG.getBitcast(BVT, Op0);
7014         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
7015                                       DAG.getConstant(0, DL, XLenVT)));
7016       }
7017     }
7018     break;
7019   }
7020   case RISCVISD::GREV:
7021   case RISCVISD::GORC:
7022   case RISCVISD::SHFL: {
7023     MVT VT = N->getSimpleValueType(0);
7024     MVT XLenVT = Subtarget.getXLenVT();
7025     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
7026            "Unexpected custom legalisation");
7027     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
7028     assert((Subtarget.hasStdExtZbp() ||
7029             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
7030              N->getConstantOperandVal(1) == 7)) &&
7031            "Unexpected extension");
7032     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7033     SDValue NewOp1 =
7034         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7035     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7036     // ReplaceNodeResults requires we maintain the same type for the return
7037     // value.
7038     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7039     break;
7040   }
7041   case ISD::BSWAP:
7042   case ISD::BITREVERSE: {
7043     MVT VT = N->getSimpleValueType(0);
7044     MVT XLenVT = Subtarget.getXLenVT();
7045     assert((VT == MVT::i8 || VT == MVT::i16 ||
7046             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7047            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7048     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7049     unsigned Imm = VT.getSizeInBits() - 1;
7050     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7051     if (N->getOpcode() == ISD::BSWAP)
7052       Imm &= ~0x7U;
7053     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7054                                 DAG.getConstant(Imm, DL, XLenVT));
7055     // ReplaceNodeResults requires we maintain the same type for the return
7056     // value.
7057     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7058     break;
7059   }
7060   case ISD::FSHL:
7061   case ISD::FSHR: {
7062     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7063            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7064     SDValue NewOp0 =
7065         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7066     SDValue NewOp1 =
7067         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7068     SDValue NewShAmt =
7069         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7070     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7071     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7072     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7073                            DAG.getConstant(0x1f, DL, MVT::i64));
7074     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7075     // instruction use different orders. fshl will return its first operand for
7076     // shift of zero, fshr will return its second operand. fsl and fsr both
7077     // return rs1 so the ISD nodes need to have different operand orders.
7078     // Shift amount is in rs2.
7079     unsigned Opc = RISCVISD::FSLW;
7080     if (N->getOpcode() == ISD::FSHR) {
7081       std::swap(NewOp0, NewOp1);
7082       Opc = RISCVISD::FSRW;
7083     }
7084     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7085     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7086     break;
7087   }
7088   case ISD::EXTRACT_VECTOR_ELT: {
7089     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7090     // type is illegal (currently only vXi64 RV32).
7091     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7092     // transferred to the destination register. We issue two of these from the
7093     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7094     // first element.
7095     SDValue Vec = N->getOperand(0);
7096     SDValue Idx = N->getOperand(1);
7097 
7098     // The vector type hasn't been legalized yet so we can't issue target
7099     // specific nodes if it needs legalization.
7100     // FIXME: We would manually legalize if it's important.
7101     if (!isTypeLegal(Vec.getValueType()))
7102       return;
7103 
7104     MVT VecVT = Vec.getSimpleValueType();
7105 
7106     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7107            VecVT.getVectorElementType() == MVT::i64 &&
7108            "Unexpected EXTRACT_VECTOR_ELT legalization");
7109 
7110     // If this is a fixed vector, we need to convert it to a scalable vector.
7111     MVT ContainerVT = VecVT;
7112     if (VecVT.isFixedLengthVector()) {
7113       ContainerVT = getContainerForFixedLengthVector(VecVT);
7114       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7115     }
7116 
7117     MVT XLenVT = Subtarget.getXLenVT();
7118 
7119     // Use a VL of 1 to avoid processing more elements than we need.
7120     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7121     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
7122 
7123     // Unless the index is known to be 0, we must slide the vector down to get
7124     // the desired element into index 0.
7125     if (!isNullConstant(Idx)) {
7126       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7127                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7128     }
7129 
7130     // Extract the lower XLEN bits of the correct vector element.
7131     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7132 
7133     // To extract the upper XLEN bits of the vector element, shift the first
7134     // element right by 32 bits and re-extract the lower XLEN bits.
7135     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7136                                      DAG.getUNDEF(ContainerVT),
7137                                      DAG.getConstant(32, DL, XLenVT), VL);
7138     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7139                                  ThirtyTwoV, Mask, VL);
7140 
7141     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7142 
7143     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7144     break;
7145   }
7146   case ISD::INTRINSIC_WO_CHAIN: {
7147     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7148     switch (IntNo) {
7149     default:
7150       llvm_unreachable(
7151           "Don't know how to custom type legalize this intrinsic!");
7152     case Intrinsic::riscv_grev:
7153     case Intrinsic::riscv_gorc: {
7154       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7155              "Unexpected custom legalisation");
7156       SDValue NewOp1 =
7157           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7158       SDValue NewOp2 =
7159           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7160       unsigned Opc =
7161           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7162       // If the control is a constant, promote the node by clearing any extra
7163       // bits bits in the control. isel will form greviw/gorciw if the result is
7164       // sign extended.
7165       if (isa<ConstantSDNode>(NewOp2)) {
7166         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7167                              DAG.getConstant(0x1f, DL, MVT::i64));
7168         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7169       }
7170       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7171       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7172       break;
7173     }
7174     case Intrinsic::riscv_bcompress:
7175     case Intrinsic::riscv_bdecompress:
7176     case Intrinsic::riscv_bfp:
7177     case Intrinsic::riscv_fsl:
7178     case Intrinsic::riscv_fsr: {
7179       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7180              "Unexpected custom legalisation");
7181       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7182       break;
7183     }
7184     case Intrinsic::riscv_orc_b: {
7185       // Lower to the GORCI encoding for orc.b with the operand extended.
7186       SDValue NewOp =
7187           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7188       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7189                                 DAG.getConstant(7, DL, MVT::i64));
7190       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7191       return;
7192     }
7193     case Intrinsic::riscv_shfl:
7194     case Intrinsic::riscv_unshfl: {
7195       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7196              "Unexpected custom legalisation");
7197       SDValue NewOp1 =
7198           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7199       SDValue NewOp2 =
7200           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7201       unsigned Opc =
7202           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7203       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7204       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7205       // will be shuffled the same way as the lower 32 bit half, but the two
7206       // halves won't cross.
7207       if (isa<ConstantSDNode>(NewOp2)) {
7208         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7209                              DAG.getConstant(0xf, DL, MVT::i64));
7210         Opc =
7211             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7212       }
7213       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7214       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7215       break;
7216     }
7217     case Intrinsic::riscv_vmv_x_s: {
7218       EVT VT = N->getValueType(0);
7219       MVT XLenVT = Subtarget.getXLenVT();
7220       if (VT.bitsLT(XLenVT)) {
7221         // Simple case just extract using vmv.x.s and truncate.
7222         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7223                                       Subtarget.getXLenVT(), N->getOperand(1));
7224         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7225         return;
7226       }
7227 
7228       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7229              "Unexpected custom legalization");
7230 
7231       // We need to do the move in two steps.
7232       SDValue Vec = N->getOperand(1);
7233       MVT VecVT = Vec.getSimpleValueType();
7234 
7235       // First extract the lower XLEN bits of the element.
7236       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7237 
7238       // To extract the upper XLEN bits of the vector element, shift the first
7239       // element right by 32 bits and re-extract the lower XLEN bits.
7240       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7241       SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
7242 
7243       SDValue ThirtyTwoV =
7244           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7245                       DAG.getConstant(32, DL, XLenVT), VL);
7246       SDValue LShr32 =
7247           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7248       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7249 
7250       Results.push_back(
7251           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7252       break;
7253     }
7254     }
7255     break;
7256   }
7257   case ISD::VECREDUCE_ADD:
7258   case ISD::VECREDUCE_AND:
7259   case ISD::VECREDUCE_OR:
7260   case ISD::VECREDUCE_XOR:
7261   case ISD::VECREDUCE_SMAX:
7262   case ISD::VECREDUCE_UMAX:
7263   case ISD::VECREDUCE_SMIN:
7264   case ISD::VECREDUCE_UMIN:
7265     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7266       Results.push_back(V);
7267     break;
7268   case ISD::VP_REDUCE_ADD:
7269   case ISD::VP_REDUCE_AND:
7270   case ISD::VP_REDUCE_OR:
7271   case ISD::VP_REDUCE_XOR:
7272   case ISD::VP_REDUCE_SMAX:
7273   case ISD::VP_REDUCE_UMAX:
7274   case ISD::VP_REDUCE_SMIN:
7275   case ISD::VP_REDUCE_UMIN:
7276     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7277       Results.push_back(V);
7278     break;
7279   case ISD::FLT_ROUNDS_: {
7280     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7281     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7282     Results.push_back(Res.getValue(0));
7283     Results.push_back(Res.getValue(1));
7284     break;
7285   }
7286   }
7287 }
7288 
7289 // A structure to hold one of the bit-manipulation patterns below. Together, a
7290 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7291 //   (or (and (shl x, 1), 0xAAAAAAAA),
7292 //       (and (srl x, 1), 0x55555555))
7293 struct RISCVBitmanipPat {
7294   SDValue Op;
7295   unsigned ShAmt;
7296   bool IsSHL;
7297 
7298   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7299     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7300   }
7301 };
7302 
7303 // Matches patterns of the form
7304 //   (and (shl x, C2), (C1 << C2))
7305 //   (and (srl x, C2), C1)
7306 //   (shl (and x, C1), C2)
7307 //   (srl (and x, (C1 << C2)), C2)
7308 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7309 // The expected masks for each shift amount are specified in BitmanipMasks where
7310 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7311 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7312 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7313 // XLen is 64.
7314 static Optional<RISCVBitmanipPat>
7315 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7316   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7317          "Unexpected number of masks");
7318   Optional<uint64_t> Mask;
7319   // Optionally consume a mask around the shift operation.
7320   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7321     Mask = Op.getConstantOperandVal(1);
7322     Op = Op.getOperand(0);
7323   }
7324   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7325     return None;
7326   bool IsSHL = Op.getOpcode() == ISD::SHL;
7327 
7328   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7329     return None;
7330   uint64_t ShAmt = Op.getConstantOperandVal(1);
7331 
7332   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7333   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7334     return None;
7335   // If we don't have enough masks for 64 bit, then we must be trying to
7336   // match SHFL so we're only allowed to shift 1/4 of the width.
7337   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7338     return None;
7339 
7340   SDValue Src = Op.getOperand(0);
7341 
7342   // The expected mask is shifted left when the AND is found around SHL
7343   // patterns.
7344   //   ((x >> 1) & 0x55555555)
7345   //   ((x << 1) & 0xAAAAAAAA)
7346   bool SHLExpMask = IsSHL;
7347 
7348   if (!Mask) {
7349     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7350     // the mask is all ones: consume that now.
7351     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7352       Mask = Src.getConstantOperandVal(1);
7353       Src = Src.getOperand(0);
7354       // The expected mask is now in fact shifted left for SRL, so reverse the
7355       // decision.
7356       //   ((x & 0xAAAAAAAA) >> 1)
7357       //   ((x & 0x55555555) << 1)
7358       SHLExpMask = !SHLExpMask;
7359     } else {
7360       // Use a default shifted mask of all-ones if there's no AND, truncated
7361       // down to the expected width. This simplifies the logic later on.
7362       Mask = maskTrailingOnes<uint64_t>(Width);
7363       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7364     }
7365   }
7366 
7367   unsigned MaskIdx = Log2_32(ShAmt);
7368   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7369 
7370   if (SHLExpMask)
7371     ExpMask <<= ShAmt;
7372 
7373   if (Mask != ExpMask)
7374     return None;
7375 
7376   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7377 }
7378 
7379 // Matches any of the following bit-manipulation patterns:
7380 //   (and (shl x, 1), (0x55555555 << 1))
7381 //   (and (srl x, 1), 0x55555555)
7382 //   (shl (and x, 0x55555555), 1)
7383 //   (srl (and x, (0x55555555 << 1)), 1)
7384 // where the shift amount and mask may vary thus:
7385 //   [1]  = 0x55555555 / 0xAAAAAAAA
7386 //   [2]  = 0x33333333 / 0xCCCCCCCC
7387 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7388 //   [8]  = 0x00FF00FF / 0xFF00FF00
7389 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7390 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7391 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7392   // These are the unshifted masks which we use to match bit-manipulation
7393   // patterns. They may be shifted left in certain circumstances.
7394   static const uint64_t BitmanipMasks[] = {
7395       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7396       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7397 
7398   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7399 }
7400 
7401 // Try to fold (<bop> x, (reduction.<bop> vec, start))
7402 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
7403   auto BinOpToRVVReduce = [](unsigned Opc) {
7404     switch (Opc) {
7405     default:
7406       llvm_unreachable("Unhandled binary to transfrom reduction");
7407     case ISD::ADD:
7408       return RISCVISD::VECREDUCE_ADD_VL;
7409     case ISD::UMAX:
7410       return RISCVISD::VECREDUCE_UMAX_VL;
7411     case ISD::SMAX:
7412       return RISCVISD::VECREDUCE_SMAX_VL;
7413     case ISD::UMIN:
7414       return RISCVISD::VECREDUCE_UMIN_VL;
7415     case ISD::SMIN:
7416       return RISCVISD::VECREDUCE_SMIN_VL;
7417     case ISD::AND:
7418       return RISCVISD::VECREDUCE_AND_VL;
7419     case ISD::OR:
7420       return RISCVISD::VECREDUCE_OR_VL;
7421     case ISD::XOR:
7422       return RISCVISD::VECREDUCE_XOR_VL;
7423     case ISD::FADD:
7424       return RISCVISD::VECREDUCE_FADD_VL;
7425     case ISD::FMAXNUM:
7426       return RISCVISD::VECREDUCE_FMAX_VL;
7427     case ISD::FMINNUM:
7428       return RISCVISD::VECREDUCE_FMIN_VL;
7429     }
7430   };
7431 
7432   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
7433     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7434            isNullConstant(V.getOperand(1)) &&
7435            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
7436   };
7437 
7438   unsigned Opc = N->getOpcode();
7439   unsigned ReduceIdx;
7440   if (IsReduction(N->getOperand(0), Opc))
7441     ReduceIdx = 0;
7442   else if (IsReduction(N->getOperand(1), Opc))
7443     ReduceIdx = 1;
7444   else
7445     return SDValue();
7446 
7447   // Skip if FADD disallows reassociation but the combiner needs.
7448   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
7449     return SDValue();
7450 
7451   SDValue Extract = N->getOperand(ReduceIdx);
7452   SDValue Reduce = Extract.getOperand(0);
7453   if (!Reduce.hasOneUse())
7454     return SDValue();
7455 
7456   SDValue ScalarV = Reduce.getOperand(2);
7457 
7458   // Make sure that ScalarV is a splat with VL=1.
7459   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
7460       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
7461       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
7462     return SDValue();
7463 
7464   if (!isOneConstant(ScalarV.getOperand(2)))
7465     return SDValue();
7466 
7467   // TODO: Deal with value other than neutral element.
7468   auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) {
7469     if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() &&
7470         isNullFPConstant(V))
7471       return true;
7472     return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(),
7473                                  N->getFlags()) == V;
7474   };
7475 
7476   // Check the scalar of ScalarV is neutral element
7477   if (!IsRVVNeutralElement(N, ScalarV.getOperand(1)))
7478     return SDValue();
7479 
7480   if (!ScalarV.hasOneUse())
7481     return SDValue();
7482 
7483   EVT SplatVT = ScalarV.getValueType();
7484   SDValue NewStart = N->getOperand(1 - ReduceIdx);
7485   unsigned SplatOpc = RISCVISD::VFMV_S_F_VL;
7486   if (SplatVT.isInteger()) {
7487     auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode());
7488     if (!C || C->isZero() || !isInt<5>(C->getSExtValue()))
7489       SplatOpc = RISCVISD::VMV_S_X_VL;
7490     else
7491       SplatOpc = RISCVISD::VMV_V_X_VL;
7492   }
7493 
7494   SDValue NewScalarV =
7495       DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart,
7496                   ScalarV.getOperand(2));
7497   SDValue NewReduce =
7498       DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(),
7499                   Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
7500                   Reduce.getOperand(3), Reduce.getOperand(4));
7501   return DAG.getNode(Extract.getOpcode(), SDLoc(Extract),
7502                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
7503 }
7504 
7505 // Match the following pattern as a GREVI(W) operation
7506 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7507 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7508                                const RISCVSubtarget &Subtarget) {
7509   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7510   EVT VT = Op.getValueType();
7511 
7512   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7513     auto LHS = matchGREVIPat(Op.getOperand(0));
7514     auto RHS = matchGREVIPat(Op.getOperand(1));
7515     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7516       SDLoc DL(Op);
7517       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7518                          DAG.getConstant(LHS->ShAmt, DL, VT));
7519     }
7520   }
7521   return SDValue();
7522 }
7523 
7524 // Matches any the following pattern as a GORCI(W) operation
7525 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7526 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7527 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7528 // Note that with the variant of 3.,
7529 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7530 // the inner pattern will first be matched as GREVI and then the outer
7531 // pattern will be matched to GORC via the first rule above.
7532 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7533 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7534                                const RISCVSubtarget &Subtarget) {
7535   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7536   EVT VT = Op.getValueType();
7537 
7538   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7539     SDLoc DL(Op);
7540     SDValue Op0 = Op.getOperand(0);
7541     SDValue Op1 = Op.getOperand(1);
7542 
7543     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7544       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7545           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7546           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7547         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7548       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7549       if ((Reverse.getOpcode() == ISD::ROTL ||
7550            Reverse.getOpcode() == ISD::ROTR) &&
7551           Reverse.getOperand(0) == X &&
7552           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7553         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7554         if (RotAmt == (VT.getSizeInBits() / 2))
7555           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7556                              DAG.getConstant(RotAmt, DL, VT));
7557       }
7558       return SDValue();
7559     };
7560 
7561     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7562     if (SDValue V = MatchOROfReverse(Op0, Op1))
7563       return V;
7564     if (SDValue V = MatchOROfReverse(Op1, Op0))
7565       return V;
7566 
7567     // OR is commutable so canonicalize its OR operand to the left
7568     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7569       std::swap(Op0, Op1);
7570     if (Op0.getOpcode() != ISD::OR)
7571       return SDValue();
7572     SDValue OrOp0 = Op0.getOperand(0);
7573     SDValue OrOp1 = Op0.getOperand(1);
7574     auto LHS = matchGREVIPat(OrOp0);
7575     // OR is commutable so swap the operands and try again: x might have been
7576     // on the left
7577     if (!LHS) {
7578       std::swap(OrOp0, OrOp1);
7579       LHS = matchGREVIPat(OrOp0);
7580     }
7581     auto RHS = matchGREVIPat(Op1);
7582     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7583       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7584                          DAG.getConstant(LHS->ShAmt, DL, VT));
7585     }
7586   }
7587   return SDValue();
7588 }
7589 
7590 // Matches any of the following bit-manipulation patterns:
7591 //   (and (shl x, 1), (0x22222222 << 1))
7592 //   (and (srl x, 1), 0x22222222)
7593 //   (shl (and x, 0x22222222), 1)
7594 //   (srl (and x, (0x22222222 << 1)), 1)
7595 // where the shift amount and mask may vary thus:
7596 //   [1]  = 0x22222222 / 0x44444444
7597 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7598 //   [4]  = 0x00F000F0 / 0x0F000F00
7599 //   [8]  = 0x0000FF00 / 0x00FF0000
7600 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7601 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7602   // These are the unshifted masks which we use to match bit-manipulation
7603   // patterns. They may be shifted left in certain circumstances.
7604   static const uint64_t BitmanipMasks[] = {
7605       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7606       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7607 
7608   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7609 }
7610 
7611 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7612 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7613                                const RISCVSubtarget &Subtarget) {
7614   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7615   EVT VT = Op.getValueType();
7616 
7617   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7618     return SDValue();
7619 
7620   SDValue Op0 = Op.getOperand(0);
7621   SDValue Op1 = Op.getOperand(1);
7622 
7623   // Or is commutable so canonicalize the second OR to the LHS.
7624   if (Op0.getOpcode() != ISD::OR)
7625     std::swap(Op0, Op1);
7626   if (Op0.getOpcode() != ISD::OR)
7627     return SDValue();
7628 
7629   // We found an inner OR, so our operands are the operands of the inner OR
7630   // and the other operand of the outer OR.
7631   SDValue A = Op0.getOperand(0);
7632   SDValue B = Op0.getOperand(1);
7633   SDValue C = Op1;
7634 
7635   auto Match1 = matchSHFLPat(A);
7636   auto Match2 = matchSHFLPat(B);
7637 
7638   // If neither matched, we failed.
7639   if (!Match1 && !Match2)
7640     return SDValue();
7641 
7642   // We had at least one match. if one failed, try the remaining C operand.
7643   if (!Match1) {
7644     std::swap(A, C);
7645     Match1 = matchSHFLPat(A);
7646     if (!Match1)
7647       return SDValue();
7648   } else if (!Match2) {
7649     std::swap(B, C);
7650     Match2 = matchSHFLPat(B);
7651     if (!Match2)
7652       return SDValue();
7653   }
7654   assert(Match1 && Match2);
7655 
7656   // Make sure our matches pair up.
7657   if (!Match1->formsPairWith(*Match2))
7658     return SDValue();
7659 
7660   // All the remains is to make sure C is an AND with the same input, that masks
7661   // out the bits that are being shuffled.
7662   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7663       C.getOperand(0) != Match1->Op)
7664     return SDValue();
7665 
7666   uint64_t Mask = C.getConstantOperandVal(1);
7667 
7668   static const uint64_t BitmanipMasks[] = {
7669       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7670       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7671   };
7672 
7673   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7674   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7675   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7676 
7677   if (Mask != ExpMask)
7678     return SDValue();
7679 
7680   SDLoc DL(Op);
7681   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7682                      DAG.getConstant(Match1->ShAmt, DL, VT));
7683 }
7684 
7685 // Optimize (add (shl x, c0), (shl y, c1)) ->
7686 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7687 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7688                                   const RISCVSubtarget &Subtarget) {
7689   // Perform this optimization only in the zba extension.
7690   if (!Subtarget.hasStdExtZba())
7691     return SDValue();
7692 
7693   // Skip for vector types and larger types.
7694   EVT VT = N->getValueType(0);
7695   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7696     return SDValue();
7697 
7698   // The two operand nodes must be SHL and have no other use.
7699   SDValue N0 = N->getOperand(0);
7700   SDValue N1 = N->getOperand(1);
7701   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7702       !N0->hasOneUse() || !N1->hasOneUse())
7703     return SDValue();
7704 
7705   // Check c0 and c1.
7706   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7707   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7708   if (!N0C || !N1C)
7709     return SDValue();
7710   int64_t C0 = N0C->getSExtValue();
7711   int64_t C1 = N1C->getSExtValue();
7712   if (C0 <= 0 || C1 <= 0)
7713     return SDValue();
7714 
7715   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7716   int64_t Bits = std::min(C0, C1);
7717   int64_t Diff = std::abs(C0 - C1);
7718   if (Diff != 1 && Diff != 2 && Diff != 3)
7719     return SDValue();
7720 
7721   // Build nodes.
7722   SDLoc DL(N);
7723   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7724   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7725   SDValue NA0 =
7726       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7727   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7728   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7729 }
7730 
7731 // Combine
7732 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7733 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7734 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7735 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7736 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7737 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7738 // The grev patterns represents BSWAP.
7739 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7740 // off the grev.
7741 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7742                                           const RISCVSubtarget &Subtarget) {
7743   bool IsWInstruction =
7744       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7745   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7746           IsWInstruction) &&
7747          "Unexpected opcode!");
7748   SDValue Src = N->getOperand(0);
7749   EVT VT = N->getValueType(0);
7750   SDLoc DL(N);
7751 
7752   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7753     return SDValue();
7754 
7755   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7756       !isa<ConstantSDNode>(Src.getOperand(1)))
7757     return SDValue();
7758 
7759   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7760   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7761 
7762   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7763   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7764   unsigned ShAmt1 = N->getConstantOperandVal(1);
7765   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7766   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7767     return SDValue();
7768 
7769   Src = Src.getOperand(0);
7770 
7771   // Toggle bit the MSB of the shift.
7772   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7773   if (CombinedShAmt == 0)
7774     return Src;
7775 
7776   SDValue Res = DAG.getNode(
7777       RISCVISD::GREV, DL, VT, Src,
7778       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7779   if (!IsWInstruction)
7780     return Res;
7781 
7782   // Sign extend the result to match the behavior of the rotate. This will be
7783   // selected to GREVIW in isel.
7784   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7785                      DAG.getValueType(MVT::i32));
7786 }
7787 
7788 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7789 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7790 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7791 // not undo itself, but they are redundant.
7792 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7793   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7794   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7795   SDValue Src = N->getOperand(0);
7796 
7797   if (Src.getOpcode() != N->getOpcode())
7798     return SDValue();
7799 
7800   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7801       !isa<ConstantSDNode>(Src.getOperand(1)))
7802     return SDValue();
7803 
7804   unsigned ShAmt1 = N->getConstantOperandVal(1);
7805   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7806   Src = Src.getOperand(0);
7807 
7808   unsigned CombinedShAmt;
7809   if (IsGORC)
7810     CombinedShAmt = ShAmt1 | ShAmt2;
7811   else
7812     CombinedShAmt = ShAmt1 ^ ShAmt2;
7813 
7814   if (CombinedShAmt == 0)
7815     return Src;
7816 
7817   SDLoc DL(N);
7818   return DAG.getNode(
7819       N->getOpcode(), DL, N->getValueType(0), Src,
7820       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7821 }
7822 
7823 // Combine a constant select operand into its use:
7824 //
7825 // (and (select cond, -1, c), x)
7826 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7827 // (or  (select cond, 0, c), x)
7828 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7829 // (xor (select cond, 0, c), x)
7830 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7831 // (add (select cond, 0, c), x)
7832 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7833 // (sub x, (select cond, 0, c))
7834 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7835 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7836                                    SelectionDAG &DAG, bool AllOnes) {
7837   EVT VT = N->getValueType(0);
7838 
7839   // Skip vectors.
7840   if (VT.isVector())
7841     return SDValue();
7842 
7843   if ((Slct.getOpcode() != ISD::SELECT &&
7844        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7845       !Slct.hasOneUse())
7846     return SDValue();
7847 
7848   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7849     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7850   };
7851 
7852   bool SwapSelectOps;
7853   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7854   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7855   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7856   SDValue NonConstantVal;
7857   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7858     SwapSelectOps = false;
7859     NonConstantVal = FalseVal;
7860   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7861     SwapSelectOps = true;
7862     NonConstantVal = TrueVal;
7863   } else
7864     return SDValue();
7865 
7866   // Slct is now know to be the desired identity constant when CC is true.
7867   TrueVal = OtherOp;
7868   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7869   // Unless SwapSelectOps says the condition should be false.
7870   if (SwapSelectOps)
7871     std::swap(TrueVal, FalseVal);
7872 
7873   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7874     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7875                        {Slct.getOperand(0), Slct.getOperand(1),
7876                         Slct.getOperand(2), TrueVal, FalseVal});
7877 
7878   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7879                      {Slct.getOperand(0), TrueVal, FalseVal});
7880 }
7881 
7882 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7883 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7884                                               bool AllOnes) {
7885   SDValue N0 = N->getOperand(0);
7886   SDValue N1 = N->getOperand(1);
7887   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7888     return Result;
7889   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7890     return Result;
7891   return SDValue();
7892 }
7893 
7894 // Transform (add (mul x, c0), c1) ->
7895 //           (add (mul (add x, c1/c0), c0), c1%c0).
7896 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7897 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7898 // to an infinite loop in DAGCombine if transformed.
7899 // Or transform (add (mul x, c0), c1) ->
7900 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7901 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7902 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7903 // lead to an infinite loop in DAGCombine if transformed.
7904 // Or transform (add (mul x, c0), c1) ->
7905 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7906 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7907 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7908 // lead to an infinite loop in DAGCombine if transformed.
7909 // Or transform (add (mul x, c0), c1) ->
7910 //              (mul (add x, c1/c0), c0).
7911 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7912 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7913                                      const RISCVSubtarget &Subtarget) {
7914   // Skip for vector types and larger types.
7915   EVT VT = N->getValueType(0);
7916   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7917     return SDValue();
7918   // The first operand node must be a MUL and has no other use.
7919   SDValue N0 = N->getOperand(0);
7920   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7921     return SDValue();
7922   // Check if c0 and c1 match above conditions.
7923   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7924   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7925   if (!N0C || !N1C)
7926     return SDValue();
7927   // If N0C has multiple uses it's possible one of the cases in
7928   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7929   // in an infinite loop.
7930   if (!N0C->hasOneUse())
7931     return SDValue();
7932   int64_t C0 = N0C->getSExtValue();
7933   int64_t C1 = N1C->getSExtValue();
7934   int64_t CA, CB;
7935   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7936     return SDValue();
7937   // Search for proper CA (non-zero) and CB that both are simm12.
7938   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7939       !isInt<12>(C0 * (C1 / C0))) {
7940     CA = C1 / C0;
7941     CB = C1 % C0;
7942   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7943              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7944     CA = C1 / C0 + 1;
7945     CB = C1 % C0 - C0;
7946   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7947              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7948     CA = C1 / C0 - 1;
7949     CB = C1 % C0 + C0;
7950   } else
7951     return SDValue();
7952   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7953   SDLoc DL(N);
7954   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7955                              DAG.getConstant(CA, DL, VT));
7956   SDValue New1 =
7957       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7958   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7959 }
7960 
7961 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7962                                  const RISCVSubtarget &Subtarget) {
7963   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7964     return V;
7965   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7966     return V;
7967   if (SDValue V = combineBinOpToReduce(N, DAG))
7968     return V;
7969   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7970   //      (select lhs, rhs, cc, x, (add x, y))
7971   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7972 }
7973 
7974 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7975   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7976   //      (select lhs, rhs, cc, x, (sub x, y))
7977   SDValue N0 = N->getOperand(0);
7978   SDValue N1 = N->getOperand(1);
7979   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7980 }
7981 
7982 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
7983                                  const RISCVSubtarget &Subtarget) {
7984   SDValue N0 = N->getOperand(0);
7985   // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
7986   // extending X. This is safe since we only need the LSB after the shift and
7987   // shift amounts larger than 31 would produce poison. If we wait until
7988   // type legalization, we'll create RISCVISD::SRLW and we can't recover it
7989   // to use a BEXT instruction.
7990   if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
7991       N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
7992       N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
7993       N0.hasOneUse()) {
7994     SDLoc DL(N);
7995     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
7996     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
7997     SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
7998     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl,
7999                               DAG.getConstant(1, DL, MVT::i64));
8000     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
8001   }
8002 
8003   if (SDValue V = combineBinOpToReduce(N, DAG))
8004     return V;
8005 
8006   // fold (and (select lhs, rhs, cc, -1, y), x) ->
8007   //      (select lhs, rhs, cc, x, (and x, y))
8008   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
8009 }
8010 
8011 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
8012                                 const RISCVSubtarget &Subtarget) {
8013   if (Subtarget.hasStdExtZbp()) {
8014     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
8015       return GREV;
8016     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
8017       return GORC;
8018     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
8019       return SHFL;
8020   }
8021 
8022   if (SDValue V = combineBinOpToReduce(N, DAG))
8023     return V;
8024   // fold (or (select cond, 0, y), x) ->
8025   //      (select cond, x, (or x, y))
8026   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8027 }
8028 
8029 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
8030   SDValue N0 = N->getOperand(0);
8031   SDValue N1 = N->getOperand(1);
8032 
8033   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
8034   // NOTE: Assumes ROL being legal means ROLW is legal.
8035   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8036   if (N0.getOpcode() == RISCVISD::SLLW &&
8037       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
8038       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
8039     SDLoc DL(N);
8040     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
8041                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
8042   }
8043 
8044   if (SDValue V = combineBinOpToReduce(N, DAG))
8045     return V;
8046   // fold (xor (select cond, 0, y), x) ->
8047   //      (select cond, x, (xor x, y))
8048   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8049 }
8050 
8051 static SDValue
8052 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
8053                                 const RISCVSubtarget &Subtarget) {
8054   SDValue Src = N->getOperand(0);
8055   EVT VT = N->getValueType(0);
8056 
8057   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
8058   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8059       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
8060     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
8061                        Src.getOperand(0));
8062 
8063   // Fold (i64 (sext_inreg (abs X), i32)) ->
8064   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
8065   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
8066   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
8067   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
8068   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
8069   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
8070   // may get combined into an earlier operation so we need to use
8071   // ComputeNumSignBits.
8072   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
8073   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
8074   // we can't assume that X has 33 sign bits. We must check.
8075   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
8076       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
8077       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
8078       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
8079     SDLoc DL(N);
8080     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
8081     SDValue Neg =
8082         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
8083     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
8084                       DAG.getValueType(MVT::i32));
8085     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
8086   }
8087 
8088   return SDValue();
8089 }
8090 
8091 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
8092 // vwadd(u).vv/vx or vwsub(u).vv/vx.
8093 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
8094                                              bool Commute = false) {
8095   assert((N->getOpcode() == RISCVISD::ADD_VL ||
8096           N->getOpcode() == RISCVISD::SUB_VL) &&
8097          "Unexpected opcode");
8098   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
8099   SDValue Op0 = N->getOperand(0);
8100   SDValue Op1 = N->getOperand(1);
8101   if (Commute)
8102     std::swap(Op0, Op1);
8103 
8104   MVT VT = N->getSimpleValueType(0);
8105 
8106   // Determine the narrow size for a widening add/sub.
8107   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8108   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8109                                   VT.getVectorElementCount());
8110 
8111   SDValue Mask = N->getOperand(2);
8112   SDValue VL = N->getOperand(3);
8113 
8114   SDLoc DL(N);
8115 
8116   // If the RHS is a sext or zext, we can form a widening op.
8117   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
8118        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
8119       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
8120     unsigned ExtOpc = Op1.getOpcode();
8121     Op1 = Op1.getOperand(0);
8122     // Re-introduce narrower extends if needed.
8123     if (Op1.getValueType() != NarrowVT)
8124       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8125 
8126     unsigned WOpc;
8127     if (ExtOpc == RISCVISD::VSEXT_VL)
8128       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
8129     else
8130       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
8131 
8132     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
8133   }
8134 
8135   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
8136   // sext/zext?
8137 
8138   return SDValue();
8139 }
8140 
8141 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
8142 // vwsub(u).vv/vx.
8143 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
8144   SDValue Op0 = N->getOperand(0);
8145   SDValue Op1 = N->getOperand(1);
8146   SDValue Mask = N->getOperand(2);
8147   SDValue VL = N->getOperand(3);
8148 
8149   MVT VT = N->getSimpleValueType(0);
8150   MVT NarrowVT = Op1.getSimpleValueType();
8151   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
8152 
8153   unsigned VOpc;
8154   switch (N->getOpcode()) {
8155   default: llvm_unreachable("Unexpected opcode");
8156   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
8157   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8158   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8159   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8160   }
8161 
8162   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8163                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8164 
8165   SDLoc DL(N);
8166 
8167   // If the LHS is a sext or zext, we can narrow this op to the same size as
8168   // the RHS.
8169   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8170        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8171       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8172     unsigned ExtOpc = Op0.getOpcode();
8173     Op0 = Op0.getOperand(0);
8174     // Re-introduce narrower extends if needed.
8175     if (Op0.getValueType() != NarrowVT)
8176       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8177     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8178   }
8179 
8180   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8181                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8182 
8183   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8184   // to commute and use a vwadd(u).vx instead.
8185   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8186       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8187     Op0 = Op0.getOperand(1);
8188 
8189     // See if have enough sign bits or zero bits in the scalar to use a
8190     // widening add/sub by splatting to smaller element size.
8191     unsigned EltBits = VT.getScalarSizeInBits();
8192     unsigned ScalarBits = Op0.getValueSizeInBits();
8193     // Make sure we're getting all element bits from the scalar register.
8194     // FIXME: Support implicit sign extension of vmv.v.x?
8195     if (ScalarBits < EltBits)
8196       return SDValue();
8197 
8198     if (IsSigned) {
8199       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8200         return SDValue();
8201     } else {
8202       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8203       if (!DAG.MaskedValueIsZero(Op0, Mask))
8204         return SDValue();
8205     }
8206 
8207     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8208                       DAG.getUNDEF(NarrowVT), Op0, VL);
8209     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8210   }
8211 
8212   return SDValue();
8213 }
8214 
8215 // Try to form VWMUL, VWMULU or VWMULSU.
8216 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8217 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8218                                        bool Commute) {
8219   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8220   SDValue Op0 = N->getOperand(0);
8221   SDValue Op1 = N->getOperand(1);
8222   if (Commute)
8223     std::swap(Op0, Op1);
8224 
8225   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8226   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8227   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8228   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8229     return SDValue();
8230 
8231   SDValue Mask = N->getOperand(2);
8232   SDValue VL = N->getOperand(3);
8233 
8234   // Make sure the mask and VL match.
8235   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8236     return SDValue();
8237 
8238   MVT VT = N->getSimpleValueType(0);
8239 
8240   // Determine the narrow size for a widening multiply.
8241   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8242   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8243                                   VT.getVectorElementCount());
8244 
8245   SDLoc DL(N);
8246 
8247   // See if the other operand is the same opcode.
8248   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8249     if (!Op1.hasOneUse())
8250       return SDValue();
8251 
8252     // Make sure the mask and VL match.
8253     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8254       return SDValue();
8255 
8256     Op1 = Op1.getOperand(0);
8257   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8258     // The operand is a splat of a scalar.
8259 
8260     // The pasthru must be undef for tail agnostic
8261     if (!Op1.getOperand(0).isUndef())
8262       return SDValue();
8263     // The VL must be the same.
8264     if (Op1.getOperand(2) != VL)
8265       return SDValue();
8266 
8267     // Get the scalar value.
8268     Op1 = Op1.getOperand(1);
8269 
8270     // See if have enough sign bits or zero bits in the scalar to use a
8271     // widening multiply by splatting to smaller element size.
8272     unsigned EltBits = VT.getScalarSizeInBits();
8273     unsigned ScalarBits = Op1.getValueSizeInBits();
8274     // Make sure we're getting all element bits from the scalar register.
8275     // FIXME: Support implicit sign extension of vmv.v.x?
8276     if (ScalarBits < EltBits)
8277       return SDValue();
8278 
8279     // If the LHS is a sign extend, try to use vwmul.
8280     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8281       // Can use vwmul.
8282     } else {
8283       // Otherwise try to use vwmulu or vwmulsu.
8284       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8285       if (DAG.MaskedValueIsZero(Op1, Mask))
8286         IsVWMULSU = IsSignExt;
8287       else
8288         return SDValue();
8289     }
8290 
8291     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8292                       DAG.getUNDEF(NarrowVT), Op1, VL);
8293   } else
8294     return SDValue();
8295 
8296   Op0 = Op0.getOperand(0);
8297 
8298   // Re-introduce narrower extends if needed.
8299   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8300   if (Op0.getValueType() != NarrowVT)
8301     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8302   // vwmulsu requires second operand to be zero extended.
8303   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8304   if (Op1.getValueType() != NarrowVT)
8305     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8306 
8307   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8308   if (!IsVWMULSU)
8309     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8310   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8311 }
8312 
8313 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8314   switch (Op.getOpcode()) {
8315   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8316   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8317   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8318   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8319   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8320   }
8321 
8322   return RISCVFPRndMode::Invalid;
8323 }
8324 
8325 // Fold
8326 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8327 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8328 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8329 //   (fp_to_int (fceil X))      -> fcvt X, rup
8330 //   (fp_to_int (fround X))     -> fcvt X, rmm
8331 static SDValue performFP_TO_INTCombine(SDNode *N,
8332                                        TargetLowering::DAGCombinerInfo &DCI,
8333                                        const RISCVSubtarget &Subtarget) {
8334   SelectionDAG &DAG = DCI.DAG;
8335   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8336   MVT XLenVT = Subtarget.getXLenVT();
8337 
8338   // Only handle XLen or i32 types. Other types narrower than XLen will
8339   // eventually be legalized to XLenVT.
8340   EVT VT = N->getValueType(0);
8341   if (VT != MVT::i32 && VT != XLenVT)
8342     return SDValue();
8343 
8344   SDValue Src = N->getOperand(0);
8345 
8346   // Ensure the FP type is also legal.
8347   if (!TLI.isTypeLegal(Src.getValueType()))
8348     return SDValue();
8349 
8350   // Don't do this for f16 with Zfhmin and not Zfh.
8351   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8352     return SDValue();
8353 
8354   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8355   if (FRM == RISCVFPRndMode::Invalid)
8356     return SDValue();
8357 
8358   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8359 
8360   unsigned Opc;
8361   if (VT == XLenVT)
8362     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8363   else
8364     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8365 
8366   SDLoc DL(N);
8367   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8368                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8369   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8370 }
8371 
8372 // Fold
8373 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8374 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8375 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8376 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8377 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8378 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8379                                        TargetLowering::DAGCombinerInfo &DCI,
8380                                        const RISCVSubtarget &Subtarget) {
8381   SelectionDAG &DAG = DCI.DAG;
8382   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8383   MVT XLenVT = Subtarget.getXLenVT();
8384 
8385   // Only handle XLen types. Other types narrower than XLen will eventually be
8386   // legalized to XLenVT.
8387   EVT DstVT = N->getValueType(0);
8388   if (DstVT != XLenVT)
8389     return SDValue();
8390 
8391   SDValue Src = N->getOperand(0);
8392 
8393   // Ensure the FP type is also legal.
8394   if (!TLI.isTypeLegal(Src.getValueType()))
8395     return SDValue();
8396 
8397   // Don't do this for f16 with Zfhmin and not Zfh.
8398   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8399     return SDValue();
8400 
8401   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8402 
8403   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8404   if (FRM == RISCVFPRndMode::Invalid)
8405     return SDValue();
8406 
8407   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8408 
8409   unsigned Opc;
8410   if (SatVT == DstVT)
8411     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8412   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8413     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8414   else
8415     return SDValue();
8416   // FIXME: Support other SatVTs by clamping before or after the conversion.
8417 
8418   Src = Src.getOperand(0);
8419 
8420   SDLoc DL(N);
8421   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8422                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8423 
8424   // RISCV FP-to-int conversions saturate to the destination register size, but
8425   // don't produce 0 for nan.
8426   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8427   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8428 }
8429 
8430 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8431 // smaller than XLenVT.
8432 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8433                                         const RISCVSubtarget &Subtarget) {
8434   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8435 
8436   SDValue Src = N->getOperand(0);
8437   if (Src.getOpcode() != ISD::BSWAP)
8438     return SDValue();
8439 
8440   EVT VT = N->getValueType(0);
8441   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8442       !isPowerOf2_32(VT.getSizeInBits()))
8443     return SDValue();
8444 
8445   SDLoc DL(N);
8446   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8447                      DAG.getConstant(7, DL, VT));
8448 }
8449 
8450 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8451                                                DAGCombinerInfo &DCI) const {
8452   SelectionDAG &DAG = DCI.DAG;
8453 
8454   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8455   // bits are demanded. N will be added to the Worklist if it was not deleted.
8456   // Caller should return SDValue(N, 0) if this returns true.
8457   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8458     SDValue Op = N->getOperand(OpNo);
8459     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8460     if (!SimplifyDemandedBits(Op, Mask, DCI))
8461       return false;
8462 
8463     if (N->getOpcode() != ISD::DELETED_NODE)
8464       DCI.AddToWorklist(N);
8465     return true;
8466   };
8467 
8468   switch (N->getOpcode()) {
8469   default:
8470     break;
8471   case RISCVISD::SplitF64: {
8472     SDValue Op0 = N->getOperand(0);
8473     // If the input to SplitF64 is just BuildPairF64 then the operation is
8474     // redundant. Instead, use BuildPairF64's operands directly.
8475     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8476       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8477 
8478     if (Op0->isUndef()) {
8479       SDValue Lo = DAG.getUNDEF(MVT::i32);
8480       SDValue Hi = DAG.getUNDEF(MVT::i32);
8481       return DCI.CombineTo(N, Lo, Hi);
8482     }
8483 
8484     SDLoc DL(N);
8485 
8486     // It's cheaper to materialise two 32-bit integers than to load a double
8487     // from the constant pool and transfer it to integer registers through the
8488     // stack.
8489     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8490       APInt V = C->getValueAPF().bitcastToAPInt();
8491       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8492       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8493       return DCI.CombineTo(N, Lo, Hi);
8494     }
8495 
8496     // This is a target-specific version of a DAGCombine performed in
8497     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8498     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8499     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8500     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8501         !Op0.getNode()->hasOneUse())
8502       break;
8503     SDValue NewSplitF64 =
8504         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8505                     Op0.getOperand(0));
8506     SDValue Lo = NewSplitF64.getValue(0);
8507     SDValue Hi = NewSplitF64.getValue(1);
8508     APInt SignBit = APInt::getSignMask(32);
8509     if (Op0.getOpcode() == ISD::FNEG) {
8510       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8511                                   DAG.getConstant(SignBit, DL, MVT::i32));
8512       return DCI.CombineTo(N, Lo, NewHi);
8513     }
8514     assert(Op0.getOpcode() == ISD::FABS);
8515     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8516                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8517     return DCI.CombineTo(N, Lo, NewHi);
8518   }
8519   case RISCVISD::SLLW:
8520   case RISCVISD::SRAW:
8521   case RISCVISD::SRLW: {
8522     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8523     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8524         SimplifyDemandedLowBitsHelper(1, 5))
8525       return SDValue(N, 0);
8526 
8527     break;
8528   }
8529   case ISD::ROTR:
8530   case ISD::ROTL:
8531   case RISCVISD::RORW:
8532   case RISCVISD::ROLW: {
8533     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8534       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8535       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8536           SimplifyDemandedLowBitsHelper(1, 5))
8537         return SDValue(N, 0);
8538     }
8539 
8540     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8541   }
8542   case RISCVISD::CLZW:
8543   case RISCVISD::CTZW: {
8544     // Only the lower 32 bits of the first operand are read
8545     if (SimplifyDemandedLowBitsHelper(0, 32))
8546       return SDValue(N, 0);
8547     break;
8548   }
8549   case RISCVISD::GREV:
8550   case RISCVISD::GORC: {
8551     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8552     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8553     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8554     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8555       return SDValue(N, 0);
8556 
8557     return combineGREVI_GORCI(N, DAG);
8558   }
8559   case RISCVISD::GREVW:
8560   case RISCVISD::GORCW: {
8561     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8562     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8563         SimplifyDemandedLowBitsHelper(1, 5))
8564       return SDValue(N, 0);
8565 
8566     break;
8567   }
8568   case RISCVISD::SHFL:
8569   case RISCVISD::UNSHFL: {
8570     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8571     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8572     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8573     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8574       return SDValue(N, 0);
8575 
8576     break;
8577   }
8578   case RISCVISD::SHFLW:
8579   case RISCVISD::UNSHFLW: {
8580     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8581     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8582         SimplifyDemandedLowBitsHelper(1, 4))
8583       return SDValue(N, 0);
8584 
8585     break;
8586   }
8587   case RISCVISD::BCOMPRESSW:
8588   case RISCVISD::BDECOMPRESSW: {
8589     // Only the lower 32 bits of LHS and RHS are read.
8590     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8591         SimplifyDemandedLowBitsHelper(1, 32))
8592       return SDValue(N, 0);
8593 
8594     break;
8595   }
8596   case RISCVISD::FSR:
8597   case RISCVISD::FSL:
8598   case RISCVISD::FSRW:
8599   case RISCVISD::FSLW: {
8600     bool IsWInstruction =
8601         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8602     unsigned BitWidth =
8603         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8604     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8605     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8606     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8607       return SDValue(N, 0);
8608 
8609     break;
8610   }
8611   case RISCVISD::FMV_X_ANYEXTH:
8612   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8613     SDLoc DL(N);
8614     SDValue Op0 = N->getOperand(0);
8615     MVT VT = N->getSimpleValueType(0);
8616     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8617     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8618     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8619     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8620          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8621         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8622          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8623       assert(Op0.getOperand(0).getValueType() == VT &&
8624              "Unexpected value type!");
8625       return Op0.getOperand(0);
8626     }
8627 
8628     // This is a target-specific version of a DAGCombine performed in
8629     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8630     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8631     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8632     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8633         !Op0.getNode()->hasOneUse())
8634       break;
8635     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8636     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8637     APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits());
8638     if (Op0.getOpcode() == ISD::FNEG)
8639       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8640                          DAG.getConstant(SignBit, DL, VT));
8641 
8642     assert(Op0.getOpcode() == ISD::FABS);
8643     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8644                        DAG.getConstant(~SignBit, DL, VT));
8645   }
8646   case ISD::ADD:
8647     return performADDCombine(N, DAG, Subtarget);
8648   case ISD::SUB:
8649     return performSUBCombine(N, DAG);
8650   case ISD::AND:
8651     return performANDCombine(N, DAG, Subtarget);
8652   case ISD::OR:
8653     return performORCombine(N, DAG, Subtarget);
8654   case ISD::XOR:
8655     return performXORCombine(N, DAG);
8656   case ISD::FADD:
8657   case ISD::UMAX:
8658   case ISD::UMIN:
8659   case ISD::SMAX:
8660   case ISD::SMIN:
8661   case ISD::FMAXNUM:
8662   case ISD::FMINNUM:
8663     return combineBinOpToReduce(N, DAG);
8664   case ISD::SIGN_EXTEND_INREG:
8665     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8666   case ISD::ZERO_EXTEND:
8667     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8668     // type legalization. This is safe because fp_to_uint produces poison if
8669     // it overflows.
8670     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8671       SDValue Src = N->getOperand(0);
8672       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8673           isTypeLegal(Src.getOperand(0).getValueType()))
8674         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8675                            Src.getOperand(0));
8676       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8677           isTypeLegal(Src.getOperand(1).getValueType())) {
8678         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8679         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8680                                   Src.getOperand(0), Src.getOperand(1));
8681         DCI.CombineTo(N, Res);
8682         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8683         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8684         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8685       }
8686     }
8687     return SDValue();
8688   case RISCVISD::SELECT_CC: {
8689     // Transform
8690     SDValue LHS = N->getOperand(0);
8691     SDValue RHS = N->getOperand(1);
8692     SDValue TrueV = N->getOperand(3);
8693     SDValue FalseV = N->getOperand(4);
8694 
8695     // If the True and False values are the same, we don't need a select_cc.
8696     if (TrueV == FalseV)
8697       return TrueV;
8698 
8699     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8700     if (!ISD::isIntEqualitySetCC(CCVal))
8701       break;
8702 
8703     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8704     //      (select_cc X, Y, lt, trueV, falseV)
8705     // Sometimes the setcc is introduced after select_cc has been formed.
8706     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8707         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8708       // If we're looking for eq 0 instead of ne 0, we need to invert the
8709       // condition.
8710       bool Invert = CCVal == ISD::SETEQ;
8711       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8712       if (Invert)
8713         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8714 
8715       SDLoc DL(N);
8716       RHS = LHS.getOperand(1);
8717       LHS = LHS.getOperand(0);
8718       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8719 
8720       SDValue TargetCC = DAG.getCondCode(CCVal);
8721       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8722                          {LHS, RHS, TargetCC, TrueV, FalseV});
8723     }
8724 
8725     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8726     //      (select_cc X, Y, eq/ne, trueV, falseV)
8727     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8728       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8729                          {LHS.getOperand(0), LHS.getOperand(1),
8730                           N->getOperand(2), TrueV, FalseV});
8731     // (select_cc X, 1, setne, trueV, falseV) ->
8732     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8733     // This can occur when legalizing some floating point comparisons.
8734     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8735     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8736       SDLoc DL(N);
8737       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8738       SDValue TargetCC = DAG.getCondCode(CCVal);
8739       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8740       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8741                          {LHS, RHS, TargetCC, TrueV, FalseV});
8742     }
8743 
8744     break;
8745   }
8746   case RISCVISD::BR_CC: {
8747     SDValue LHS = N->getOperand(1);
8748     SDValue RHS = N->getOperand(2);
8749     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8750     if (!ISD::isIntEqualitySetCC(CCVal))
8751       break;
8752 
8753     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8754     //      (br_cc X, Y, lt, dest)
8755     // Sometimes the setcc is introduced after br_cc has been formed.
8756     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8757         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8758       // If we're looking for eq 0 instead of ne 0, we need to invert the
8759       // condition.
8760       bool Invert = CCVal == ISD::SETEQ;
8761       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8762       if (Invert)
8763         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8764 
8765       SDLoc DL(N);
8766       RHS = LHS.getOperand(1);
8767       LHS = LHS.getOperand(0);
8768       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8769 
8770       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8771                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8772                          N->getOperand(4));
8773     }
8774 
8775     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8776     //      (br_cc X, Y, eq/ne, trueV, falseV)
8777     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8778       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8779                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8780                          N->getOperand(3), N->getOperand(4));
8781 
8782     // (br_cc X, 1, setne, br_cc) ->
8783     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8784     // This can occur when legalizing some floating point comparisons.
8785     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8786     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8787       SDLoc DL(N);
8788       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8789       SDValue TargetCC = DAG.getCondCode(CCVal);
8790       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8791       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8792                          N->getOperand(0), LHS, RHS, TargetCC,
8793                          N->getOperand(4));
8794     }
8795     break;
8796   }
8797   case ISD::BITREVERSE:
8798     return performBITREVERSECombine(N, DAG, Subtarget);
8799   case ISD::FP_TO_SINT:
8800   case ISD::FP_TO_UINT:
8801     return performFP_TO_INTCombine(N, DCI, Subtarget);
8802   case ISD::FP_TO_SINT_SAT:
8803   case ISD::FP_TO_UINT_SAT:
8804     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8805   case ISD::FCOPYSIGN: {
8806     EVT VT = N->getValueType(0);
8807     if (!VT.isVector())
8808       break;
8809     // There is a form of VFSGNJ which injects the negated sign of its second
8810     // operand. Try and bubble any FNEG up after the extend/round to produce
8811     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8812     // TRUNC=1.
8813     SDValue In2 = N->getOperand(1);
8814     // Avoid cases where the extend/round has multiple uses, as duplicating
8815     // those is typically more expensive than removing a fneg.
8816     if (!In2.hasOneUse())
8817       break;
8818     if (In2.getOpcode() != ISD::FP_EXTEND &&
8819         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8820       break;
8821     In2 = In2.getOperand(0);
8822     if (In2.getOpcode() != ISD::FNEG)
8823       break;
8824     SDLoc DL(N);
8825     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8826     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8827                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8828   }
8829   case ISD::MGATHER:
8830   case ISD::MSCATTER:
8831   case ISD::VP_GATHER:
8832   case ISD::VP_SCATTER: {
8833     if (!DCI.isBeforeLegalize())
8834       break;
8835     SDValue Index, ScaleOp;
8836     bool IsIndexScaled = false;
8837     bool IsIndexSigned = false;
8838     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8839       Index = VPGSN->getIndex();
8840       ScaleOp = VPGSN->getScale();
8841       IsIndexScaled = VPGSN->isIndexScaled();
8842       IsIndexSigned = VPGSN->isIndexSigned();
8843     } else {
8844       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8845       Index = MGSN->getIndex();
8846       ScaleOp = MGSN->getScale();
8847       IsIndexScaled = MGSN->isIndexScaled();
8848       IsIndexSigned = MGSN->isIndexSigned();
8849     }
8850     EVT IndexVT = Index.getValueType();
8851     MVT XLenVT = Subtarget.getXLenVT();
8852     // RISCV indexed loads only support the "unsigned unscaled" addressing
8853     // mode, so anything else must be manually legalized.
8854     bool NeedsIdxLegalization =
8855         IsIndexScaled ||
8856         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8857     if (!NeedsIdxLegalization)
8858       break;
8859 
8860     SDLoc DL(N);
8861 
8862     // Any index legalization should first promote to XLenVT, so we don't lose
8863     // bits when scaling. This may create an illegal index type so we let
8864     // LLVM's legalization take care of the splitting.
8865     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8866     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8867       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8868       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8869                           DL, IndexVT, Index);
8870     }
8871 
8872     if (IsIndexScaled) {
8873       // Manually scale the indices.
8874       // TODO: Sanitize the scale operand here?
8875       // TODO: For VP nodes, should we use VP_SHL here?
8876       unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8877       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8878       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8879       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8880       ScaleOp = DAG.getTargetConstant(1, DL, ScaleOp.getValueType());
8881     }
8882 
8883     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED;
8884     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8885       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8886                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8887                               ScaleOp, VPGN->getMask(),
8888                               VPGN->getVectorLength()},
8889                              VPGN->getMemOperand(), NewIndexTy);
8890     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8891       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8892                               {VPSN->getChain(), VPSN->getValue(),
8893                                VPSN->getBasePtr(), Index, ScaleOp,
8894                                VPSN->getMask(), VPSN->getVectorLength()},
8895                               VPSN->getMemOperand(), NewIndexTy);
8896     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8897       return DAG.getMaskedGather(
8898           N->getVTList(), MGN->getMemoryVT(), DL,
8899           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8900            MGN->getBasePtr(), Index, ScaleOp},
8901           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8902     const auto *MSN = cast<MaskedScatterSDNode>(N);
8903     return DAG.getMaskedScatter(
8904         N->getVTList(), MSN->getMemoryVT(), DL,
8905         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8906          Index, ScaleOp},
8907         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8908   }
8909   case RISCVISD::SRA_VL:
8910   case RISCVISD::SRL_VL:
8911   case RISCVISD::SHL_VL: {
8912     SDValue ShAmt = N->getOperand(1);
8913     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8914       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8915       SDLoc DL(N);
8916       SDValue VL = N->getOperand(3);
8917       EVT VT = N->getValueType(0);
8918       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8919                           ShAmt.getOperand(1), VL);
8920       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8921                          N->getOperand(2), N->getOperand(3));
8922     }
8923     break;
8924   }
8925   case ISD::SRA:
8926   case ISD::SRL:
8927   case ISD::SHL: {
8928     SDValue ShAmt = N->getOperand(1);
8929     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8930       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8931       SDLoc DL(N);
8932       EVT VT = N->getValueType(0);
8933       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8934                           ShAmt.getOperand(1),
8935                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8936       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8937     }
8938     break;
8939   }
8940   case RISCVISD::ADD_VL:
8941     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8942       return V;
8943     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8944   case RISCVISD::SUB_VL:
8945     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8946   case RISCVISD::VWADD_W_VL:
8947   case RISCVISD::VWADDU_W_VL:
8948   case RISCVISD::VWSUB_W_VL:
8949   case RISCVISD::VWSUBU_W_VL:
8950     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8951   case RISCVISD::MUL_VL:
8952     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8953       return V;
8954     // Mul is commutative.
8955     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8956   case ISD::STORE: {
8957     auto *Store = cast<StoreSDNode>(N);
8958     SDValue Val = Store->getValue();
8959     // Combine store of vmv.x.s to vse with VL of 1.
8960     // FIXME: Support FP.
8961     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8962       SDValue Src = Val.getOperand(0);
8963       EVT VecVT = Src.getValueType();
8964       EVT MemVT = Store->getMemoryVT();
8965       // The memory VT and the element type must match.
8966       if (VecVT.getVectorElementType() == MemVT) {
8967         SDLoc DL(N);
8968         MVT MaskVT = getMaskTypeFor(VecVT);
8969         return DAG.getStoreVP(
8970             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8971             DAG.getConstant(1, DL, MaskVT),
8972             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8973             Store->getMemOperand(), Store->getAddressingMode(),
8974             Store->isTruncatingStore(), /*IsCompress*/ false);
8975       }
8976     }
8977 
8978     break;
8979   }
8980   case ISD::SPLAT_VECTOR: {
8981     EVT VT = N->getValueType(0);
8982     // Only perform this combine on legal MVT types.
8983     if (!isTypeLegal(VT))
8984       break;
8985     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8986                                          DAG, Subtarget))
8987       return Gather;
8988     break;
8989   }
8990   case RISCVISD::VMV_V_X_VL: {
8991     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8992     // scalar input.
8993     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8994     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8995     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8996       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8997         return SDValue(N, 0);
8998 
8999     break;
9000   }
9001   case ISD::INTRINSIC_WO_CHAIN: {
9002     unsigned IntNo = N->getConstantOperandVal(0);
9003     switch (IntNo) {
9004       // By default we do not combine any intrinsic.
9005     default:
9006       return SDValue();
9007     case Intrinsic::riscv_vcpop:
9008     case Intrinsic::riscv_vcpop_mask:
9009     case Intrinsic::riscv_vfirst:
9010     case Intrinsic::riscv_vfirst_mask: {
9011       SDValue VL = N->getOperand(2);
9012       if (IntNo == Intrinsic::riscv_vcpop_mask ||
9013           IntNo == Intrinsic::riscv_vfirst_mask)
9014         VL = N->getOperand(3);
9015       if (!isNullConstant(VL))
9016         return SDValue();
9017       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
9018       SDLoc DL(N);
9019       EVT VT = N->getValueType(0);
9020       if (IntNo == Intrinsic::riscv_vfirst ||
9021           IntNo == Intrinsic::riscv_vfirst_mask)
9022         return DAG.getConstant(-1, DL, VT);
9023       return DAG.getConstant(0, DL, VT);
9024     }
9025     }
9026   }
9027   }
9028 
9029   return SDValue();
9030 }
9031 
9032 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
9033     const SDNode *N, CombineLevel Level) const {
9034   // The following folds are only desirable if `(OP _, c1 << c2)` can be
9035   // materialised in fewer instructions than `(OP _, c1)`:
9036   //
9037   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
9038   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
9039   SDValue N0 = N->getOperand(0);
9040   EVT Ty = N0.getValueType();
9041   if (Ty.isScalarInteger() &&
9042       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
9043     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
9044     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
9045     if (C1 && C2) {
9046       const APInt &C1Int = C1->getAPIntValue();
9047       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
9048 
9049       // We can materialise `c1 << c2` into an add immediate, so it's "free",
9050       // and the combine should happen, to potentially allow further combines
9051       // later.
9052       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
9053           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
9054         return true;
9055 
9056       // We can materialise `c1` in an add immediate, so it's "free", and the
9057       // combine should be prevented.
9058       if (C1Int.getMinSignedBits() <= 64 &&
9059           isLegalAddImmediate(C1Int.getSExtValue()))
9060         return false;
9061 
9062       // Neither constant will fit into an immediate, so find materialisation
9063       // costs.
9064       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
9065                                               Subtarget.getFeatureBits(),
9066                                               /*CompressionCost*/true);
9067       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
9068           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
9069           /*CompressionCost*/true);
9070 
9071       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
9072       // combine should be prevented.
9073       if (C1Cost < ShiftedC1Cost)
9074         return false;
9075     }
9076   }
9077   return true;
9078 }
9079 
9080 bool RISCVTargetLowering::targetShrinkDemandedConstant(
9081     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
9082     TargetLoweringOpt &TLO) const {
9083   // Delay this optimization as late as possible.
9084   if (!TLO.LegalOps)
9085     return false;
9086 
9087   EVT VT = Op.getValueType();
9088   if (VT.isVector())
9089     return false;
9090 
9091   // Only handle AND for now.
9092   if (Op.getOpcode() != ISD::AND)
9093     return false;
9094 
9095   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
9096   if (!C)
9097     return false;
9098 
9099   const APInt &Mask = C->getAPIntValue();
9100 
9101   // Clear all non-demanded bits initially.
9102   APInt ShrunkMask = Mask & DemandedBits;
9103 
9104   // Try to make a smaller immediate by setting undemanded bits.
9105 
9106   APInt ExpandedMask = Mask | ~DemandedBits;
9107 
9108   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
9109     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
9110   };
9111   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
9112     if (NewMask == Mask)
9113       return true;
9114     SDLoc DL(Op);
9115     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
9116     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
9117     return TLO.CombineTo(Op, NewOp);
9118   };
9119 
9120   // If the shrunk mask fits in sign extended 12 bits, let the target
9121   // independent code apply it.
9122   if (ShrunkMask.isSignedIntN(12))
9123     return false;
9124 
9125   // Preserve (and X, 0xffff) when zext.h is supported.
9126   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
9127     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
9128     if (IsLegalMask(NewMask))
9129       return UseMask(NewMask);
9130   }
9131 
9132   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
9133   if (VT == MVT::i64) {
9134     APInt NewMask = APInt(64, 0xffffffff);
9135     if (IsLegalMask(NewMask))
9136       return UseMask(NewMask);
9137   }
9138 
9139   // For the remaining optimizations, we need to be able to make a negative
9140   // number through a combination of mask and undemanded bits.
9141   if (!ExpandedMask.isNegative())
9142     return false;
9143 
9144   // What is the fewest number of bits we need to represent the negative number.
9145   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
9146 
9147   // Try to make a 12 bit negative immediate. If that fails try to make a 32
9148   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
9149   APInt NewMask = ShrunkMask;
9150   if (MinSignedBits <= 12)
9151     NewMask.setBitsFrom(11);
9152   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
9153     NewMask.setBitsFrom(31);
9154   else
9155     return false;
9156 
9157   // Check that our new mask is a subset of the demanded mask.
9158   assert(IsLegalMask(NewMask));
9159   return UseMask(NewMask);
9160 }
9161 
9162 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
9163   static const uint64_t GREVMasks[] = {
9164       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
9165       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
9166 
9167   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9168     unsigned Shift = 1 << Stage;
9169     if (ShAmt & Shift) {
9170       uint64_t Mask = GREVMasks[Stage];
9171       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9172       if (IsGORC)
9173         Res |= x;
9174       x = Res;
9175     }
9176   }
9177 
9178   return x;
9179 }
9180 
9181 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9182                                                         KnownBits &Known,
9183                                                         const APInt &DemandedElts,
9184                                                         const SelectionDAG &DAG,
9185                                                         unsigned Depth) const {
9186   unsigned BitWidth = Known.getBitWidth();
9187   unsigned Opc = Op.getOpcode();
9188   assert((Opc >= ISD::BUILTIN_OP_END ||
9189           Opc == ISD::INTRINSIC_WO_CHAIN ||
9190           Opc == ISD::INTRINSIC_W_CHAIN ||
9191           Opc == ISD::INTRINSIC_VOID) &&
9192          "Should use MaskedValueIsZero if you don't know whether Op"
9193          " is a target node!");
9194 
9195   Known.resetAll();
9196   switch (Opc) {
9197   default: break;
9198   case RISCVISD::SELECT_CC: {
9199     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9200     // If we don't know any bits, early out.
9201     if (Known.isUnknown())
9202       break;
9203     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9204 
9205     // Only known if known in both the LHS and RHS.
9206     Known = KnownBits::commonBits(Known, Known2);
9207     break;
9208   }
9209   case RISCVISD::REMUW: {
9210     KnownBits Known2;
9211     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9212     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9213     // We only care about the lower 32 bits.
9214     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9215     // Restore the original width by sign extending.
9216     Known = Known.sext(BitWidth);
9217     break;
9218   }
9219   case RISCVISD::DIVUW: {
9220     KnownBits Known2;
9221     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9222     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9223     // We only care about the lower 32 bits.
9224     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9225     // Restore the original width by sign extending.
9226     Known = Known.sext(BitWidth);
9227     break;
9228   }
9229   case RISCVISD::CTZW: {
9230     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9231     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9232     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9233     Known.Zero.setBitsFrom(LowBits);
9234     break;
9235   }
9236   case RISCVISD::CLZW: {
9237     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9238     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9239     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9240     Known.Zero.setBitsFrom(LowBits);
9241     break;
9242   }
9243   case RISCVISD::GREV:
9244   case RISCVISD::GORC: {
9245     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9246       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9247       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9248       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9249       // To compute zeros, we need to invert the value and invert it back after.
9250       Known.Zero =
9251           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9252       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9253     }
9254     break;
9255   }
9256   case RISCVISD::READ_VLENB: {
9257     // If we know the minimum VLen from Zvl extensions, we can use that to
9258     // determine the trailing zeros of VLENB.
9259     // FIXME: Limit to 128 bit vectors until we have more testing.
9260     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
9261     if (MinVLenB > 0)
9262       Known.Zero.setLowBits(Log2_32(MinVLenB));
9263     // We assume VLENB is no more than 65536 / 8 bytes.
9264     Known.Zero.setBitsFrom(14);
9265     break;
9266   }
9267   case ISD::INTRINSIC_W_CHAIN:
9268   case ISD::INTRINSIC_WO_CHAIN: {
9269     unsigned IntNo =
9270         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9271     switch (IntNo) {
9272     default:
9273       // We can't do anything for most intrinsics.
9274       break;
9275     case Intrinsic::riscv_vsetvli:
9276     case Intrinsic::riscv_vsetvlimax:
9277     case Intrinsic::riscv_vsetvli_opt:
9278     case Intrinsic::riscv_vsetvlimax_opt:
9279       // Assume that VL output is positive and would fit in an int32_t.
9280       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9281       if (BitWidth >= 32)
9282         Known.Zero.setBitsFrom(31);
9283       break;
9284     }
9285     break;
9286   }
9287   }
9288 }
9289 
9290 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9291     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9292     unsigned Depth) const {
9293   switch (Op.getOpcode()) {
9294   default:
9295     break;
9296   case RISCVISD::SELECT_CC: {
9297     unsigned Tmp =
9298         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9299     if (Tmp == 1) return 1;  // Early out.
9300     unsigned Tmp2 =
9301         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9302     return std::min(Tmp, Tmp2);
9303   }
9304   case RISCVISD::SLLW:
9305   case RISCVISD::SRAW:
9306   case RISCVISD::SRLW:
9307   case RISCVISD::DIVW:
9308   case RISCVISD::DIVUW:
9309   case RISCVISD::REMUW:
9310   case RISCVISD::ROLW:
9311   case RISCVISD::RORW:
9312   case RISCVISD::GREVW:
9313   case RISCVISD::GORCW:
9314   case RISCVISD::FSLW:
9315   case RISCVISD::FSRW:
9316   case RISCVISD::SHFLW:
9317   case RISCVISD::UNSHFLW:
9318   case RISCVISD::BCOMPRESSW:
9319   case RISCVISD::BDECOMPRESSW:
9320   case RISCVISD::BFPW:
9321   case RISCVISD::FCVT_W_RV64:
9322   case RISCVISD::FCVT_WU_RV64:
9323   case RISCVISD::STRICT_FCVT_W_RV64:
9324   case RISCVISD::STRICT_FCVT_WU_RV64:
9325     // TODO: As the result is sign-extended, this is conservatively correct. A
9326     // more precise answer could be calculated for SRAW depending on known
9327     // bits in the shift amount.
9328     return 33;
9329   case RISCVISD::SHFL:
9330   case RISCVISD::UNSHFL: {
9331     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9332     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9333     // will stay within the upper 32 bits. If there were more than 32 sign bits
9334     // before there will be at least 33 sign bits after.
9335     if (Op.getValueType() == MVT::i64 &&
9336         isa<ConstantSDNode>(Op.getOperand(1)) &&
9337         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9338       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9339       if (Tmp > 32)
9340         return 33;
9341     }
9342     break;
9343   }
9344   case RISCVISD::VMV_X_S: {
9345     // The number of sign bits of the scalar result is computed by obtaining the
9346     // element type of the input vector operand, subtracting its width from the
9347     // XLEN, and then adding one (sign bit within the element type). If the
9348     // element type is wider than XLen, the least-significant XLEN bits are
9349     // taken.
9350     unsigned XLen = Subtarget.getXLen();
9351     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9352     if (EltBits <= XLen)
9353       return XLen - EltBits + 1;
9354     break;
9355   }
9356   }
9357 
9358   return 1;
9359 }
9360 
9361 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9362                                                   MachineBasicBlock *BB) {
9363   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9364 
9365   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9366   // Should the count have wrapped while it was being read, we need to try
9367   // again.
9368   // ...
9369   // read:
9370   // rdcycleh x3 # load high word of cycle
9371   // rdcycle  x2 # load low word of cycle
9372   // rdcycleh x4 # load high word of cycle
9373   // bne x3, x4, read # check if high word reads match, otherwise try again
9374   // ...
9375 
9376   MachineFunction &MF = *BB->getParent();
9377   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9378   MachineFunction::iterator It = ++BB->getIterator();
9379 
9380   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9381   MF.insert(It, LoopMBB);
9382 
9383   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9384   MF.insert(It, DoneMBB);
9385 
9386   // Transfer the remainder of BB and its successor edges to DoneMBB.
9387   DoneMBB->splice(DoneMBB->begin(), BB,
9388                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9389   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9390 
9391   BB->addSuccessor(LoopMBB);
9392 
9393   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9394   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9395   Register LoReg = MI.getOperand(0).getReg();
9396   Register HiReg = MI.getOperand(1).getReg();
9397   DebugLoc DL = MI.getDebugLoc();
9398 
9399   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9400   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9401       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9402       .addReg(RISCV::X0);
9403   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9404       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9405       .addReg(RISCV::X0);
9406   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9407       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9408       .addReg(RISCV::X0);
9409 
9410   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9411       .addReg(HiReg)
9412       .addReg(ReadAgainReg)
9413       .addMBB(LoopMBB);
9414 
9415   LoopMBB->addSuccessor(LoopMBB);
9416   LoopMBB->addSuccessor(DoneMBB);
9417 
9418   MI.eraseFromParent();
9419 
9420   return DoneMBB;
9421 }
9422 
9423 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9424                                              MachineBasicBlock *BB) {
9425   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9426 
9427   MachineFunction &MF = *BB->getParent();
9428   DebugLoc DL = MI.getDebugLoc();
9429   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9430   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9431   Register LoReg = MI.getOperand(0).getReg();
9432   Register HiReg = MI.getOperand(1).getReg();
9433   Register SrcReg = MI.getOperand(2).getReg();
9434   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9435   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9436 
9437   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9438                           RI);
9439   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9440   MachineMemOperand *MMOLo =
9441       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9442   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9443       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9444   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9445       .addFrameIndex(FI)
9446       .addImm(0)
9447       .addMemOperand(MMOLo);
9448   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9449       .addFrameIndex(FI)
9450       .addImm(4)
9451       .addMemOperand(MMOHi);
9452   MI.eraseFromParent(); // The pseudo instruction is gone now.
9453   return BB;
9454 }
9455 
9456 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9457                                                  MachineBasicBlock *BB) {
9458   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9459          "Unexpected instruction");
9460 
9461   MachineFunction &MF = *BB->getParent();
9462   DebugLoc DL = MI.getDebugLoc();
9463   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9464   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9465   Register DstReg = MI.getOperand(0).getReg();
9466   Register LoReg = MI.getOperand(1).getReg();
9467   Register HiReg = MI.getOperand(2).getReg();
9468   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9469   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9470 
9471   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9472   MachineMemOperand *MMOLo =
9473       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9474   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9475       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9476   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9477       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9478       .addFrameIndex(FI)
9479       .addImm(0)
9480       .addMemOperand(MMOLo);
9481   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9482       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9483       .addFrameIndex(FI)
9484       .addImm(4)
9485       .addMemOperand(MMOHi);
9486   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9487   MI.eraseFromParent(); // The pseudo instruction is gone now.
9488   return BB;
9489 }
9490 
9491 static bool isSelectPseudo(MachineInstr &MI) {
9492   switch (MI.getOpcode()) {
9493   default:
9494     return false;
9495   case RISCV::Select_GPR_Using_CC_GPR:
9496   case RISCV::Select_FPR16_Using_CC_GPR:
9497   case RISCV::Select_FPR32_Using_CC_GPR:
9498   case RISCV::Select_FPR64_Using_CC_GPR:
9499     return true;
9500   }
9501 }
9502 
9503 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9504                                         unsigned RelOpcode, unsigned EqOpcode,
9505                                         const RISCVSubtarget &Subtarget) {
9506   DebugLoc DL = MI.getDebugLoc();
9507   Register DstReg = MI.getOperand(0).getReg();
9508   Register Src1Reg = MI.getOperand(1).getReg();
9509   Register Src2Reg = MI.getOperand(2).getReg();
9510   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9511   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9512   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9513 
9514   // Save the current FFLAGS.
9515   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9516 
9517   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9518                  .addReg(Src1Reg)
9519                  .addReg(Src2Reg);
9520   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9521     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9522 
9523   // Restore the FFLAGS.
9524   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9525       .addReg(SavedFFlags, RegState::Kill);
9526 
9527   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9528   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9529                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9530                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9531   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9532     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9533 
9534   // Erase the pseudoinstruction.
9535   MI.eraseFromParent();
9536   return BB;
9537 }
9538 
9539 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9540                                            MachineBasicBlock *BB,
9541                                            const RISCVSubtarget &Subtarget) {
9542   // To "insert" Select_* instructions, we actually have to insert the triangle
9543   // control-flow pattern.  The incoming instructions know the destination vreg
9544   // to set, the condition code register to branch on, the true/false values to
9545   // select between, and the condcode to use to select the appropriate branch.
9546   //
9547   // We produce the following control flow:
9548   //     HeadMBB
9549   //     |  \
9550   //     |  IfFalseMBB
9551   //     | /
9552   //    TailMBB
9553   //
9554   // When we find a sequence of selects we attempt to optimize their emission
9555   // by sharing the control flow. Currently we only handle cases where we have
9556   // multiple selects with the exact same condition (same LHS, RHS and CC).
9557   // The selects may be interleaved with other instructions if the other
9558   // instructions meet some requirements we deem safe:
9559   // - They are debug instructions. Otherwise,
9560   // - They do not have side-effects, do not access memory and their inputs do
9561   //   not depend on the results of the select pseudo-instructions.
9562   // The TrueV/FalseV operands of the selects cannot depend on the result of
9563   // previous selects in the sequence.
9564   // These conditions could be further relaxed. See the X86 target for a
9565   // related approach and more information.
9566   Register LHS = MI.getOperand(1).getReg();
9567   Register RHS = MI.getOperand(2).getReg();
9568   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9569 
9570   SmallVector<MachineInstr *, 4> SelectDebugValues;
9571   SmallSet<Register, 4> SelectDests;
9572   SelectDests.insert(MI.getOperand(0).getReg());
9573 
9574   MachineInstr *LastSelectPseudo = &MI;
9575 
9576   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9577        SequenceMBBI != E; ++SequenceMBBI) {
9578     if (SequenceMBBI->isDebugInstr())
9579       continue;
9580     if (isSelectPseudo(*SequenceMBBI)) {
9581       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9582           SequenceMBBI->getOperand(2).getReg() != RHS ||
9583           SequenceMBBI->getOperand(3).getImm() != CC ||
9584           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9585           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9586         break;
9587       LastSelectPseudo = &*SequenceMBBI;
9588       SequenceMBBI->collectDebugValues(SelectDebugValues);
9589       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9590     } else {
9591       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9592           SequenceMBBI->mayLoadOrStore())
9593         break;
9594       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9595             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9596           }))
9597         break;
9598     }
9599   }
9600 
9601   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9602   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9603   DebugLoc DL = MI.getDebugLoc();
9604   MachineFunction::iterator I = ++BB->getIterator();
9605 
9606   MachineBasicBlock *HeadMBB = BB;
9607   MachineFunction *F = BB->getParent();
9608   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9609   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9610 
9611   F->insert(I, IfFalseMBB);
9612   F->insert(I, TailMBB);
9613 
9614   // Transfer debug instructions associated with the selects to TailMBB.
9615   for (MachineInstr *DebugInstr : SelectDebugValues) {
9616     TailMBB->push_back(DebugInstr->removeFromParent());
9617   }
9618 
9619   // Move all instructions after the sequence to TailMBB.
9620   TailMBB->splice(TailMBB->end(), HeadMBB,
9621                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9622   // Update machine-CFG edges by transferring all successors of the current
9623   // block to the new block which will contain the Phi nodes for the selects.
9624   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9625   // Set the successors for HeadMBB.
9626   HeadMBB->addSuccessor(IfFalseMBB);
9627   HeadMBB->addSuccessor(TailMBB);
9628 
9629   // Insert appropriate branch.
9630   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9631     .addReg(LHS)
9632     .addReg(RHS)
9633     .addMBB(TailMBB);
9634 
9635   // IfFalseMBB just falls through to TailMBB.
9636   IfFalseMBB->addSuccessor(TailMBB);
9637 
9638   // Create PHIs for all of the select pseudo-instructions.
9639   auto SelectMBBI = MI.getIterator();
9640   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9641   auto InsertionPoint = TailMBB->begin();
9642   while (SelectMBBI != SelectEnd) {
9643     auto Next = std::next(SelectMBBI);
9644     if (isSelectPseudo(*SelectMBBI)) {
9645       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9646       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9647               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9648           .addReg(SelectMBBI->getOperand(4).getReg())
9649           .addMBB(HeadMBB)
9650           .addReg(SelectMBBI->getOperand(5).getReg())
9651           .addMBB(IfFalseMBB);
9652       SelectMBBI->eraseFromParent();
9653     }
9654     SelectMBBI = Next;
9655   }
9656 
9657   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9658   return TailMBB;
9659 }
9660 
9661 MachineBasicBlock *
9662 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9663                                                  MachineBasicBlock *BB) const {
9664   switch (MI.getOpcode()) {
9665   default:
9666     llvm_unreachable("Unexpected instr type to insert");
9667   case RISCV::ReadCycleWide:
9668     assert(!Subtarget.is64Bit() &&
9669            "ReadCycleWrite is only to be used on riscv32");
9670     return emitReadCycleWidePseudo(MI, BB);
9671   case RISCV::Select_GPR_Using_CC_GPR:
9672   case RISCV::Select_FPR16_Using_CC_GPR:
9673   case RISCV::Select_FPR32_Using_CC_GPR:
9674   case RISCV::Select_FPR64_Using_CC_GPR:
9675     return emitSelectPseudo(MI, BB, Subtarget);
9676   case RISCV::BuildPairF64Pseudo:
9677     return emitBuildPairF64Pseudo(MI, BB);
9678   case RISCV::SplitF64Pseudo:
9679     return emitSplitF64Pseudo(MI, BB);
9680   case RISCV::PseudoQuietFLE_H:
9681     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9682   case RISCV::PseudoQuietFLT_H:
9683     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9684   case RISCV::PseudoQuietFLE_S:
9685     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9686   case RISCV::PseudoQuietFLT_S:
9687     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9688   case RISCV::PseudoQuietFLE_D:
9689     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9690   case RISCV::PseudoQuietFLT_D:
9691     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9692   }
9693 }
9694 
9695 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9696                                                         SDNode *Node) const {
9697   // Add FRM dependency to any instructions with dynamic rounding mode.
9698   unsigned Opc = MI.getOpcode();
9699   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9700   if (Idx < 0)
9701     return;
9702   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9703     return;
9704   // If the instruction already reads FRM, don't add another read.
9705   if (MI.readsRegister(RISCV::FRM))
9706     return;
9707   MI.addOperand(
9708       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9709 }
9710 
9711 // Calling Convention Implementation.
9712 // The expectations for frontend ABI lowering vary from target to target.
9713 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9714 // details, but this is a longer term goal. For now, we simply try to keep the
9715 // role of the frontend as simple and well-defined as possible. The rules can
9716 // be summarised as:
9717 // * Never split up large scalar arguments. We handle them here.
9718 // * If a hardfloat calling convention is being used, and the struct may be
9719 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9720 // available, then pass as two separate arguments. If either the GPRs or FPRs
9721 // are exhausted, then pass according to the rule below.
9722 // * If a struct could never be passed in registers or directly in a stack
9723 // slot (as it is larger than 2*XLEN and the floating point rules don't
9724 // apply), then pass it using a pointer with the byval attribute.
9725 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9726 // word-sized array or a 2*XLEN scalar (depending on alignment).
9727 // * The frontend can determine whether a struct is returned by reference or
9728 // not based on its size and fields. If it will be returned by reference, the
9729 // frontend must modify the prototype so a pointer with the sret annotation is
9730 // passed as the first argument. This is not necessary for large scalar
9731 // returns.
9732 // * Struct return values and varargs should be coerced to structs containing
9733 // register-size fields in the same situations they would be for fixed
9734 // arguments.
9735 
9736 static const MCPhysReg ArgGPRs[] = {
9737   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9738   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9739 };
9740 static const MCPhysReg ArgFPR16s[] = {
9741   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9742   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9743 };
9744 static const MCPhysReg ArgFPR32s[] = {
9745   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9746   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9747 };
9748 static const MCPhysReg ArgFPR64s[] = {
9749   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9750   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9751 };
9752 // This is an interim calling convention and it may be changed in the future.
9753 static const MCPhysReg ArgVRs[] = {
9754     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9755     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9756     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9757 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9758                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9759                                      RISCV::V20M2, RISCV::V22M2};
9760 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9761                                      RISCV::V20M4};
9762 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9763 
9764 // Pass a 2*XLEN argument that has been split into two XLEN values through
9765 // registers or the stack as necessary.
9766 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9767                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9768                                 MVT ValVT2, MVT LocVT2,
9769                                 ISD::ArgFlagsTy ArgFlags2) {
9770   unsigned XLenInBytes = XLen / 8;
9771   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9772     // At least one half can be passed via register.
9773     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9774                                      VA1.getLocVT(), CCValAssign::Full));
9775   } else {
9776     // Both halves must be passed on the stack, with proper alignment.
9777     Align StackAlign =
9778         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9779     State.addLoc(
9780         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9781                             State.AllocateStack(XLenInBytes, StackAlign),
9782                             VA1.getLocVT(), CCValAssign::Full));
9783     State.addLoc(CCValAssign::getMem(
9784         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9785         LocVT2, CCValAssign::Full));
9786     return false;
9787   }
9788 
9789   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9790     // The second half can also be passed via register.
9791     State.addLoc(
9792         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9793   } else {
9794     // The second half is passed via the stack, without additional alignment.
9795     State.addLoc(CCValAssign::getMem(
9796         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9797         LocVT2, CCValAssign::Full));
9798   }
9799 
9800   return false;
9801 }
9802 
9803 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9804                                Optional<unsigned> FirstMaskArgument,
9805                                CCState &State, const RISCVTargetLowering &TLI) {
9806   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9807   if (RC == &RISCV::VRRegClass) {
9808     // Assign the first mask argument to V0.
9809     // This is an interim calling convention and it may be changed in the
9810     // future.
9811     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9812       return State.AllocateReg(RISCV::V0);
9813     return State.AllocateReg(ArgVRs);
9814   }
9815   if (RC == &RISCV::VRM2RegClass)
9816     return State.AllocateReg(ArgVRM2s);
9817   if (RC == &RISCV::VRM4RegClass)
9818     return State.AllocateReg(ArgVRM4s);
9819   if (RC == &RISCV::VRM8RegClass)
9820     return State.AllocateReg(ArgVRM8s);
9821   llvm_unreachable("Unhandled register class for ValueType");
9822 }
9823 
9824 // Implements the RISC-V calling convention. Returns true upon failure.
9825 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9826                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9827                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9828                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9829                      Optional<unsigned> FirstMaskArgument) {
9830   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9831   assert(XLen == 32 || XLen == 64);
9832   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9833 
9834   // Any return value split in to more than two values can't be returned
9835   // directly. Vectors are returned via the available vector registers.
9836   if (!LocVT.isVector() && IsRet && ValNo > 1)
9837     return true;
9838 
9839   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9840   // variadic argument, or if no F16/F32 argument registers are available.
9841   bool UseGPRForF16_F32 = true;
9842   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9843   // variadic argument, or if no F64 argument registers are available.
9844   bool UseGPRForF64 = true;
9845 
9846   switch (ABI) {
9847   default:
9848     llvm_unreachable("Unexpected ABI");
9849   case RISCVABI::ABI_ILP32:
9850   case RISCVABI::ABI_LP64:
9851     break;
9852   case RISCVABI::ABI_ILP32F:
9853   case RISCVABI::ABI_LP64F:
9854     UseGPRForF16_F32 = !IsFixed;
9855     break;
9856   case RISCVABI::ABI_ILP32D:
9857   case RISCVABI::ABI_LP64D:
9858     UseGPRForF16_F32 = !IsFixed;
9859     UseGPRForF64 = !IsFixed;
9860     break;
9861   }
9862 
9863   // FPR16, FPR32, and FPR64 alias each other.
9864   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9865     UseGPRForF16_F32 = true;
9866     UseGPRForF64 = true;
9867   }
9868 
9869   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9870   // similar local variables rather than directly checking against the target
9871   // ABI.
9872 
9873   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9874     LocVT = XLenVT;
9875     LocInfo = CCValAssign::BCvt;
9876   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9877     LocVT = MVT::i64;
9878     LocInfo = CCValAssign::BCvt;
9879   }
9880 
9881   // If this is a variadic argument, the RISC-V calling convention requires
9882   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9883   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9884   // be used regardless of whether the original argument was split during
9885   // legalisation or not. The argument will not be passed by registers if the
9886   // original type is larger than 2*XLEN, so the register alignment rule does
9887   // not apply.
9888   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9889   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9890       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9891     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9892     // Skip 'odd' register if necessary.
9893     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9894       State.AllocateReg(ArgGPRs);
9895   }
9896 
9897   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9898   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9899       State.getPendingArgFlags();
9900 
9901   assert(PendingLocs.size() == PendingArgFlags.size() &&
9902          "PendingLocs and PendingArgFlags out of sync");
9903 
9904   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9905   // registers are exhausted.
9906   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9907     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9908            "Can't lower f64 if it is split");
9909     // Depending on available argument GPRS, f64 may be passed in a pair of
9910     // GPRs, split between a GPR and the stack, or passed completely on the
9911     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9912     // cases.
9913     Register Reg = State.AllocateReg(ArgGPRs);
9914     LocVT = MVT::i32;
9915     if (!Reg) {
9916       unsigned StackOffset = State.AllocateStack(8, Align(8));
9917       State.addLoc(
9918           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9919       return false;
9920     }
9921     if (!State.AllocateReg(ArgGPRs))
9922       State.AllocateStack(4, Align(4));
9923     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9924     return false;
9925   }
9926 
9927   // Fixed-length vectors are located in the corresponding scalable-vector
9928   // container types.
9929   if (ValVT.isFixedLengthVector())
9930     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9931 
9932   // Split arguments might be passed indirectly, so keep track of the pending
9933   // values. Split vectors are passed via a mix of registers and indirectly, so
9934   // treat them as we would any other argument.
9935   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9936     LocVT = XLenVT;
9937     LocInfo = CCValAssign::Indirect;
9938     PendingLocs.push_back(
9939         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9940     PendingArgFlags.push_back(ArgFlags);
9941     if (!ArgFlags.isSplitEnd()) {
9942       return false;
9943     }
9944   }
9945 
9946   // If the split argument only had two elements, it should be passed directly
9947   // in registers or on the stack.
9948   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9949       PendingLocs.size() <= 2) {
9950     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9951     // Apply the normal calling convention rules to the first half of the
9952     // split argument.
9953     CCValAssign VA = PendingLocs[0];
9954     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9955     PendingLocs.clear();
9956     PendingArgFlags.clear();
9957     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9958                                ArgFlags);
9959   }
9960 
9961   // Allocate to a register if possible, or else a stack slot.
9962   Register Reg;
9963   unsigned StoreSizeBytes = XLen / 8;
9964   Align StackAlign = Align(XLen / 8);
9965 
9966   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9967     Reg = State.AllocateReg(ArgFPR16s);
9968   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9969     Reg = State.AllocateReg(ArgFPR32s);
9970   else if (ValVT == MVT::f64 && !UseGPRForF64)
9971     Reg = State.AllocateReg(ArgFPR64s);
9972   else if (ValVT.isVector()) {
9973     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9974     if (!Reg) {
9975       // For return values, the vector must be passed fully via registers or
9976       // via the stack.
9977       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9978       // but we're using all of them.
9979       if (IsRet)
9980         return true;
9981       // Try using a GPR to pass the address
9982       if ((Reg = State.AllocateReg(ArgGPRs))) {
9983         LocVT = XLenVT;
9984         LocInfo = CCValAssign::Indirect;
9985       } else if (ValVT.isScalableVector()) {
9986         LocVT = XLenVT;
9987         LocInfo = CCValAssign::Indirect;
9988       } else {
9989         // Pass fixed-length vectors on the stack.
9990         LocVT = ValVT;
9991         StoreSizeBytes = ValVT.getStoreSize();
9992         // Align vectors to their element sizes, being careful for vXi1
9993         // vectors.
9994         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9995       }
9996     }
9997   } else {
9998     Reg = State.AllocateReg(ArgGPRs);
9999   }
10000 
10001   unsigned StackOffset =
10002       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
10003 
10004   // If we reach this point and PendingLocs is non-empty, we must be at the
10005   // end of a split argument that must be passed indirectly.
10006   if (!PendingLocs.empty()) {
10007     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
10008     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
10009 
10010     for (auto &It : PendingLocs) {
10011       if (Reg)
10012         It.convertToReg(Reg);
10013       else
10014         It.convertToMem(StackOffset);
10015       State.addLoc(It);
10016     }
10017     PendingLocs.clear();
10018     PendingArgFlags.clear();
10019     return false;
10020   }
10021 
10022   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
10023           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
10024          "Expected an XLenVT or vector types at this stage");
10025 
10026   if (Reg) {
10027     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10028     return false;
10029   }
10030 
10031   // When a floating-point value is passed on the stack, no bit-conversion is
10032   // needed.
10033   if (ValVT.isFloatingPoint()) {
10034     LocVT = ValVT;
10035     LocInfo = CCValAssign::Full;
10036   }
10037   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10038   return false;
10039 }
10040 
10041 template <typename ArgTy>
10042 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
10043   for (const auto &ArgIdx : enumerate(Args)) {
10044     MVT ArgVT = ArgIdx.value().VT;
10045     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
10046       return ArgIdx.index();
10047   }
10048   return None;
10049 }
10050 
10051 void RISCVTargetLowering::analyzeInputArgs(
10052     MachineFunction &MF, CCState &CCInfo,
10053     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
10054     RISCVCCAssignFn Fn) const {
10055   unsigned NumArgs = Ins.size();
10056   FunctionType *FType = MF.getFunction().getFunctionType();
10057 
10058   Optional<unsigned> FirstMaskArgument;
10059   if (Subtarget.hasVInstructions())
10060     FirstMaskArgument = preAssignMask(Ins);
10061 
10062   for (unsigned i = 0; i != NumArgs; ++i) {
10063     MVT ArgVT = Ins[i].VT;
10064     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
10065 
10066     Type *ArgTy = nullptr;
10067     if (IsRet)
10068       ArgTy = FType->getReturnType();
10069     else if (Ins[i].isOrigArg())
10070       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
10071 
10072     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10073     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10074            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
10075            FirstMaskArgument)) {
10076       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
10077                         << EVT(ArgVT).getEVTString() << '\n');
10078       llvm_unreachable(nullptr);
10079     }
10080   }
10081 }
10082 
10083 void RISCVTargetLowering::analyzeOutputArgs(
10084     MachineFunction &MF, CCState &CCInfo,
10085     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
10086     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
10087   unsigned NumArgs = Outs.size();
10088 
10089   Optional<unsigned> FirstMaskArgument;
10090   if (Subtarget.hasVInstructions())
10091     FirstMaskArgument = preAssignMask(Outs);
10092 
10093   for (unsigned i = 0; i != NumArgs; i++) {
10094     MVT ArgVT = Outs[i].VT;
10095     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10096     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
10097 
10098     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10099     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10100            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
10101            FirstMaskArgument)) {
10102       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
10103                         << EVT(ArgVT).getEVTString() << "\n");
10104       llvm_unreachable(nullptr);
10105     }
10106   }
10107 }
10108 
10109 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
10110 // values.
10111 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
10112                                    const CCValAssign &VA, const SDLoc &DL,
10113                                    const RISCVSubtarget &Subtarget) {
10114   switch (VA.getLocInfo()) {
10115   default:
10116     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10117   case CCValAssign::Full:
10118     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
10119       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
10120     break;
10121   case CCValAssign::BCvt:
10122     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10123       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
10124     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10125       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
10126     else
10127       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
10128     break;
10129   }
10130   return Val;
10131 }
10132 
10133 // The caller is responsible for loading the full value if the argument is
10134 // passed with CCValAssign::Indirect.
10135 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
10136                                 const CCValAssign &VA, const SDLoc &DL,
10137                                 const RISCVTargetLowering &TLI) {
10138   MachineFunction &MF = DAG.getMachineFunction();
10139   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10140   EVT LocVT = VA.getLocVT();
10141   SDValue Val;
10142   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
10143   Register VReg = RegInfo.createVirtualRegister(RC);
10144   RegInfo.addLiveIn(VA.getLocReg(), VReg);
10145   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
10146 
10147   if (VA.getLocInfo() == CCValAssign::Indirect)
10148     return Val;
10149 
10150   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
10151 }
10152 
10153 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
10154                                    const CCValAssign &VA, const SDLoc &DL,
10155                                    const RISCVSubtarget &Subtarget) {
10156   EVT LocVT = VA.getLocVT();
10157 
10158   switch (VA.getLocInfo()) {
10159   default:
10160     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10161   case CCValAssign::Full:
10162     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
10163       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
10164     break;
10165   case CCValAssign::BCvt:
10166     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10167       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10168     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10169       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10170     else
10171       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10172     break;
10173   }
10174   return Val;
10175 }
10176 
10177 // The caller is responsible for loading the full value if the argument is
10178 // passed with CCValAssign::Indirect.
10179 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10180                                 const CCValAssign &VA, const SDLoc &DL) {
10181   MachineFunction &MF = DAG.getMachineFunction();
10182   MachineFrameInfo &MFI = MF.getFrameInfo();
10183   EVT LocVT = VA.getLocVT();
10184   EVT ValVT = VA.getValVT();
10185   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10186   if (ValVT.isScalableVector()) {
10187     // When the value is a scalable vector, we save the pointer which points to
10188     // the scalable vector value in the stack. The ValVT will be the pointer
10189     // type, instead of the scalable vector type.
10190     ValVT = LocVT;
10191   }
10192   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10193                                  /*IsImmutable=*/true);
10194   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10195   SDValue Val;
10196 
10197   ISD::LoadExtType ExtType;
10198   switch (VA.getLocInfo()) {
10199   default:
10200     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10201   case CCValAssign::Full:
10202   case CCValAssign::Indirect:
10203   case CCValAssign::BCvt:
10204     ExtType = ISD::NON_EXTLOAD;
10205     break;
10206   }
10207   Val = DAG.getExtLoad(
10208       ExtType, DL, LocVT, Chain, FIN,
10209       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10210   return Val;
10211 }
10212 
10213 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10214                                        const CCValAssign &VA, const SDLoc &DL) {
10215   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10216          "Unexpected VA");
10217   MachineFunction &MF = DAG.getMachineFunction();
10218   MachineFrameInfo &MFI = MF.getFrameInfo();
10219   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10220 
10221   if (VA.isMemLoc()) {
10222     // f64 is passed on the stack.
10223     int FI =
10224         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10225     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10226     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10227                        MachinePointerInfo::getFixedStack(MF, FI));
10228   }
10229 
10230   assert(VA.isRegLoc() && "Expected register VA assignment");
10231 
10232   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10233   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10234   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10235   SDValue Hi;
10236   if (VA.getLocReg() == RISCV::X17) {
10237     // Second half of f64 is passed on the stack.
10238     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10239     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10240     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10241                      MachinePointerInfo::getFixedStack(MF, FI));
10242   } else {
10243     // Second half of f64 is passed in another GPR.
10244     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10245     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10246     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10247   }
10248   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10249 }
10250 
10251 // FastCC has less than 1% performance improvement for some particular
10252 // benchmark. But theoretically, it may has benenfit for some cases.
10253 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10254                             unsigned ValNo, MVT ValVT, MVT LocVT,
10255                             CCValAssign::LocInfo LocInfo,
10256                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10257                             bool IsFixed, bool IsRet, Type *OrigTy,
10258                             const RISCVTargetLowering &TLI,
10259                             Optional<unsigned> FirstMaskArgument) {
10260 
10261   // X5 and X6 might be used for save-restore libcall.
10262   static const MCPhysReg GPRList[] = {
10263       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10264       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10265       RISCV::X29, RISCV::X30, RISCV::X31};
10266 
10267   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10268     if (unsigned Reg = State.AllocateReg(GPRList)) {
10269       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10270       return false;
10271     }
10272   }
10273 
10274   if (LocVT == MVT::f16) {
10275     static const MCPhysReg FPR16List[] = {
10276         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10277         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10278         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10279         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10280     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10281       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10282       return false;
10283     }
10284   }
10285 
10286   if (LocVT == MVT::f32) {
10287     static const MCPhysReg FPR32List[] = {
10288         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10289         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10290         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10291         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10292     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10293       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10294       return false;
10295     }
10296   }
10297 
10298   if (LocVT == MVT::f64) {
10299     static const MCPhysReg FPR64List[] = {
10300         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10301         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10302         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10303         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10304     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10305       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10306       return false;
10307     }
10308   }
10309 
10310   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10311     unsigned Offset4 = State.AllocateStack(4, Align(4));
10312     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10313     return false;
10314   }
10315 
10316   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10317     unsigned Offset5 = State.AllocateStack(8, Align(8));
10318     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10319     return false;
10320   }
10321 
10322   if (LocVT.isVector()) {
10323     if (unsigned Reg =
10324             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10325       // Fixed-length vectors are located in the corresponding scalable-vector
10326       // container types.
10327       if (ValVT.isFixedLengthVector())
10328         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10329       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10330     } else {
10331       // Try and pass the address via a "fast" GPR.
10332       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10333         LocInfo = CCValAssign::Indirect;
10334         LocVT = TLI.getSubtarget().getXLenVT();
10335         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10336       } else if (ValVT.isFixedLengthVector()) {
10337         auto StackAlign =
10338             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10339         unsigned StackOffset =
10340             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10341         State.addLoc(
10342             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10343       } else {
10344         // Can't pass scalable vectors on the stack.
10345         return true;
10346       }
10347     }
10348 
10349     return false;
10350   }
10351 
10352   return true; // CC didn't match.
10353 }
10354 
10355 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10356                          CCValAssign::LocInfo LocInfo,
10357                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10358 
10359   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10360     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10361     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10362     static const MCPhysReg GPRList[] = {
10363         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10364         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10365     if (unsigned Reg = State.AllocateReg(GPRList)) {
10366       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10367       return false;
10368     }
10369   }
10370 
10371   if (LocVT == MVT::f32) {
10372     // Pass in STG registers: F1, ..., F6
10373     //                        fs0 ... fs5
10374     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10375                                           RISCV::F18_F, RISCV::F19_F,
10376                                           RISCV::F20_F, RISCV::F21_F};
10377     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10378       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10379       return false;
10380     }
10381   }
10382 
10383   if (LocVT == MVT::f64) {
10384     // Pass in STG registers: D1, ..., D6
10385     //                        fs6 ... fs11
10386     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10387                                           RISCV::F24_D, RISCV::F25_D,
10388                                           RISCV::F26_D, RISCV::F27_D};
10389     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10390       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10391       return false;
10392     }
10393   }
10394 
10395   report_fatal_error("No registers left in GHC calling convention");
10396   return true;
10397 }
10398 
10399 // Transform physical registers into virtual registers.
10400 SDValue RISCVTargetLowering::LowerFormalArguments(
10401     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10402     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10403     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10404 
10405   MachineFunction &MF = DAG.getMachineFunction();
10406 
10407   switch (CallConv) {
10408   default:
10409     report_fatal_error("Unsupported calling convention");
10410   case CallingConv::C:
10411   case CallingConv::Fast:
10412     break;
10413   case CallingConv::GHC:
10414     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10415         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10416       report_fatal_error(
10417         "GHC calling convention requires the F and D instruction set extensions");
10418   }
10419 
10420   const Function &Func = MF.getFunction();
10421   if (Func.hasFnAttribute("interrupt")) {
10422     if (!Func.arg_empty())
10423       report_fatal_error(
10424         "Functions with the interrupt attribute cannot have arguments!");
10425 
10426     StringRef Kind =
10427       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10428 
10429     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10430       report_fatal_error(
10431         "Function interrupt attribute argument not supported!");
10432   }
10433 
10434   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10435   MVT XLenVT = Subtarget.getXLenVT();
10436   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10437   // Used with vargs to acumulate store chains.
10438   std::vector<SDValue> OutChains;
10439 
10440   // Assign locations to all of the incoming arguments.
10441   SmallVector<CCValAssign, 16> ArgLocs;
10442   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10443 
10444   if (CallConv == CallingConv::GHC)
10445     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10446   else
10447     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10448                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10449                                                    : CC_RISCV);
10450 
10451   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10452     CCValAssign &VA = ArgLocs[i];
10453     SDValue ArgValue;
10454     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10455     // case.
10456     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10457       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10458     else if (VA.isRegLoc())
10459       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10460     else
10461       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10462 
10463     if (VA.getLocInfo() == CCValAssign::Indirect) {
10464       // If the original argument was split and passed by reference (e.g. i128
10465       // on RV32), we need to load all parts of it here (using the same
10466       // address). Vectors may be partly split to registers and partly to the
10467       // stack, in which case the base address is partly offset and subsequent
10468       // stores are relative to that.
10469       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10470                                    MachinePointerInfo()));
10471       unsigned ArgIndex = Ins[i].OrigArgIndex;
10472       unsigned ArgPartOffset = Ins[i].PartOffset;
10473       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10474       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10475         CCValAssign &PartVA = ArgLocs[i + 1];
10476         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10477         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10478         if (PartVA.getValVT().isScalableVector())
10479           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10480         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10481         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10482                                      MachinePointerInfo()));
10483         ++i;
10484       }
10485       continue;
10486     }
10487     InVals.push_back(ArgValue);
10488   }
10489 
10490   if (IsVarArg) {
10491     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10492     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10493     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10494     MachineFrameInfo &MFI = MF.getFrameInfo();
10495     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10496     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10497 
10498     // Offset of the first variable argument from stack pointer, and size of
10499     // the vararg save area. For now, the varargs save area is either zero or
10500     // large enough to hold a0-a7.
10501     int VaArgOffset, VarArgsSaveSize;
10502 
10503     // If all registers are allocated, then all varargs must be passed on the
10504     // stack and we don't need to save any argregs.
10505     if (ArgRegs.size() == Idx) {
10506       VaArgOffset = CCInfo.getNextStackOffset();
10507       VarArgsSaveSize = 0;
10508     } else {
10509       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10510       VaArgOffset = -VarArgsSaveSize;
10511     }
10512 
10513     // Record the frame index of the first variable argument
10514     // which is a value necessary to VASTART.
10515     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10516     RVFI->setVarArgsFrameIndex(FI);
10517 
10518     // If saving an odd number of registers then create an extra stack slot to
10519     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10520     // offsets to even-numbered registered remain 2*XLEN-aligned.
10521     if (Idx % 2) {
10522       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10523       VarArgsSaveSize += XLenInBytes;
10524     }
10525 
10526     // Copy the integer registers that may have been used for passing varargs
10527     // to the vararg save area.
10528     for (unsigned I = Idx; I < ArgRegs.size();
10529          ++I, VaArgOffset += XLenInBytes) {
10530       const Register Reg = RegInfo.createVirtualRegister(RC);
10531       RegInfo.addLiveIn(ArgRegs[I], Reg);
10532       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10533       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10534       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10535       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10536                                    MachinePointerInfo::getFixedStack(MF, FI));
10537       cast<StoreSDNode>(Store.getNode())
10538           ->getMemOperand()
10539           ->setValue((Value *)nullptr);
10540       OutChains.push_back(Store);
10541     }
10542     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10543   }
10544 
10545   // All stores are grouped in one node to allow the matching between
10546   // the size of Ins and InVals. This only happens for vararg functions.
10547   if (!OutChains.empty()) {
10548     OutChains.push_back(Chain);
10549     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10550   }
10551 
10552   return Chain;
10553 }
10554 
10555 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10556 /// for tail call optimization.
10557 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10558 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10559     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10560     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10561 
10562   auto &Callee = CLI.Callee;
10563   auto CalleeCC = CLI.CallConv;
10564   auto &Outs = CLI.Outs;
10565   auto &Caller = MF.getFunction();
10566   auto CallerCC = Caller.getCallingConv();
10567 
10568   // Exception-handling functions need a special set of instructions to
10569   // indicate a return to the hardware. Tail-calling another function would
10570   // probably break this.
10571   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10572   // should be expanded as new function attributes are introduced.
10573   if (Caller.hasFnAttribute("interrupt"))
10574     return false;
10575 
10576   // Do not tail call opt if the stack is used to pass parameters.
10577   if (CCInfo.getNextStackOffset() != 0)
10578     return false;
10579 
10580   // Do not tail call opt if any parameters need to be passed indirectly.
10581   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10582   // passed indirectly. So the address of the value will be passed in a
10583   // register, or if not available, then the address is put on the stack. In
10584   // order to pass indirectly, space on the stack often needs to be allocated
10585   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10586   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10587   // are passed CCValAssign::Indirect.
10588   for (auto &VA : ArgLocs)
10589     if (VA.getLocInfo() == CCValAssign::Indirect)
10590       return false;
10591 
10592   // Do not tail call opt if either caller or callee uses struct return
10593   // semantics.
10594   auto IsCallerStructRet = Caller.hasStructRetAttr();
10595   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10596   if (IsCallerStructRet || IsCalleeStructRet)
10597     return false;
10598 
10599   // Externally-defined functions with weak linkage should not be
10600   // tail-called. The behaviour of branch instructions in this situation (as
10601   // used for tail calls) is implementation-defined, so we cannot rely on the
10602   // linker replacing the tail call with a return.
10603   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10604     const GlobalValue *GV = G->getGlobal();
10605     if (GV->hasExternalWeakLinkage())
10606       return false;
10607   }
10608 
10609   // The callee has to preserve all registers the caller needs to preserve.
10610   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10611   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10612   if (CalleeCC != CallerCC) {
10613     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10614     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10615       return false;
10616   }
10617 
10618   // Byval parameters hand the function a pointer directly into the stack area
10619   // we want to reuse during a tail call. Working around this *is* possible
10620   // but less efficient and uglier in LowerCall.
10621   for (auto &Arg : Outs)
10622     if (Arg.Flags.isByVal())
10623       return false;
10624 
10625   return true;
10626 }
10627 
10628 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10629   return DAG.getDataLayout().getPrefTypeAlign(
10630       VT.getTypeForEVT(*DAG.getContext()));
10631 }
10632 
10633 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10634 // and output parameter nodes.
10635 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10636                                        SmallVectorImpl<SDValue> &InVals) const {
10637   SelectionDAG &DAG = CLI.DAG;
10638   SDLoc &DL = CLI.DL;
10639   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10640   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10641   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10642   SDValue Chain = CLI.Chain;
10643   SDValue Callee = CLI.Callee;
10644   bool &IsTailCall = CLI.IsTailCall;
10645   CallingConv::ID CallConv = CLI.CallConv;
10646   bool IsVarArg = CLI.IsVarArg;
10647   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10648   MVT XLenVT = Subtarget.getXLenVT();
10649 
10650   MachineFunction &MF = DAG.getMachineFunction();
10651 
10652   // Analyze the operands of the call, assigning locations to each operand.
10653   SmallVector<CCValAssign, 16> ArgLocs;
10654   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10655 
10656   if (CallConv == CallingConv::GHC)
10657     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10658   else
10659     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10660                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10661                                                     : CC_RISCV);
10662 
10663   // Check if it's really possible to do a tail call.
10664   if (IsTailCall)
10665     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10666 
10667   if (IsTailCall)
10668     ++NumTailCalls;
10669   else if (CLI.CB && CLI.CB->isMustTailCall())
10670     report_fatal_error("failed to perform tail call elimination on a call "
10671                        "site marked musttail");
10672 
10673   // Get a count of how many bytes are to be pushed on the stack.
10674   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10675 
10676   // Create local copies for byval args
10677   SmallVector<SDValue, 8> ByValArgs;
10678   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10679     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10680     if (!Flags.isByVal())
10681       continue;
10682 
10683     SDValue Arg = OutVals[i];
10684     unsigned Size = Flags.getByValSize();
10685     Align Alignment = Flags.getNonZeroByValAlign();
10686 
10687     int FI =
10688         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10689     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10690     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10691 
10692     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10693                           /*IsVolatile=*/false,
10694                           /*AlwaysInline=*/false, IsTailCall,
10695                           MachinePointerInfo(), MachinePointerInfo());
10696     ByValArgs.push_back(FIPtr);
10697   }
10698 
10699   if (!IsTailCall)
10700     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10701 
10702   // Copy argument values to their designated locations.
10703   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10704   SmallVector<SDValue, 8> MemOpChains;
10705   SDValue StackPtr;
10706   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10707     CCValAssign &VA = ArgLocs[i];
10708     SDValue ArgValue = OutVals[i];
10709     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10710 
10711     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10712     bool IsF64OnRV32DSoftABI =
10713         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10714     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10715       SDValue SplitF64 = DAG.getNode(
10716           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10717       SDValue Lo = SplitF64.getValue(0);
10718       SDValue Hi = SplitF64.getValue(1);
10719 
10720       Register RegLo = VA.getLocReg();
10721       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10722 
10723       if (RegLo == RISCV::X17) {
10724         // Second half of f64 is passed on the stack.
10725         // Work out the address of the stack slot.
10726         if (!StackPtr.getNode())
10727           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10728         // Emit the store.
10729         MemOpChains.push_back(
10730             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10731       } else {
10732         // Second half of f64 is passed in another GPR.
10733         assert(RegLo < RISCV::X31 && "Invalid register pair");
10734         Register RegHigh = RegLo + 1;
10735         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10736       }
10737       continue;
10738     }
10739 
10740     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10741     // as any other MemLoc.
10742 
10743     // Promote the value if needed.
10744     // For now, only handle fully promoted and indirect arguments.
10745     if (VA.getLocInfo() == CCValAssign::Indirect) {
10746       // Store the argument in a stack slot and pass its address.
10747       Align StackAlign =
10748           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10749                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10750       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10751       // If the original argument was split (e.g. i128), we need
10752       // to store the required parts of it here (and pass just one address).
10753       // Vectors may be partly split to registers and partly to the stack, in
10754       // which case the base address is partly offset and subsequent stores are
10755       // relative to that.
10756       unsigned ArgIndex = Outs[i].OrigArgIndex;
10757       unsigned ArgPartOffset = Outs[i].PartOffset;
10758       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10759       // Calculate the total size to store. We don't have access to what we're
10760       // actually storing other than performing the loop and collecting the
10761       // info.
10762       SmallVector<std::pair<SDValue, SDValue>> Parts;
10763       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10764         SDValue PartValue = OutVals[i + 1];
10765         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10766         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10767         EVT PartVT = PartValue.getValueType();
10768         if (PartVT.isScalableVector())
10769           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10770         StoredSize += PartVT.getStoreSize();
10771         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10772         Parts.push_back(std::make_pair(PartValue, Offset));
10773         ++i;
10774       }
10775       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10776       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10777       MemOpChains.push_back(
10778           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10779                        MachinePointerInfo::getFixedStack(MF, FI)));
10780       for (const auto &Part : Parts) {
10781         SDValue PartValue = Part.first;
10782         SDValue PartOffset = Part.second;
10783         SDValue Address =
10784             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10785         MemOpChains.push_back(
10786             DAG.getStore(Chain, DL, PartValue, Address,
10787                          MachinePointerInfo::getFixedStack(MF, FI)));
10788       }
10789       ArgValue = SpillSlot;
10790     } else {
10791       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10792     }
10793 
10794     // Use local copy if it is a byval arg.
10795     if (Flags.isByVal())
10796       ArgValue = ByValArgs[j++];
10797 
10798     if (VA.isRegLoc()) {
10799       // Queue up the argument copies and emit them at the end.
10800       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10801     } else {
10802       assert(VA.isMemLoc() && "Argument not register or memory");
10803       assert(!IsTailCall && "Tail call not allowed if stack is used "
10804                             "for passing parameters");
10805 
10806       // Work out the address of the stack slot.
10807       if (!StackPtr.getNode())
10808         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10809       SDValue Address =
10810           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10811                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10812 
10813       // Emit the store.
10814       MemOpChains.push_back(
10815           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10816     }
10817   }
10818 
10819   // Join the stores, which are independent of one another.
10820   if (!MemOpChains.empty())
10821     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10822 
10823   SDValue Glue;
10824 
10825   // Build a sequence of copy-to-reg nodes, chained and glued together.
10826   for (auto &Reg : RegsToPass) {
10827     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10828     Glue = Chain.getValue(1);
10829   }
10830 
10831   // Validate that none of the argument registers have been marked as
10832   // reserved, if so report an error. Do the same for the return address if this
10833   // is not a tailcall.
10834   validateCCReservedRegs(RegsToPass, MF);
10835   if (!IsTailCall &&
10836       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10837     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10838         MF.getFunction(),
10839         "Return address register required, but has been reserved."});
10840 
10841   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10842   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10843   // split it and then direct call can be matched by PseudoCALL.
10844   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10845     const GlobalValue *GV = S->getGlobal();
10846 
10847     unsigned OpFlags = RISCVII::MO_CALL;
10848     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10849       OpFlags = RISCVII::MO_PLT;
10850 
10851     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10852   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10853     unsigned OpFlags = RISCVII::MO_CALL;
10854 
10855     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10856                                                  nullptr))
10857       OpFlags = RISCVII::MO_PLT;
10858 
10859     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10860   }
10861 
10862   // The first call operand is the chain and the second is the target address.
10863   SmallVector<SDValue, 8> Ops;
10864   Ops.push_back(Chain);
10865   Ops.push_back(Callee);
10866 
10867   // Add argument registers to the end of the list so that they are
10868   // known live into the call.
10869   for (auto &Reg : RegsToPass)
10870     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10871 
10872   if (!IsTailCall) {
10873     // Add a register mask operand representing the call-preserved registers.
10874     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10875     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10876     assert(Mask && "Missing call preserved mask for calling convention");
10877     Ops.push_back(DAG.getRegisterMask(Mask));
10878   }
10879 
10880   // Glue the call to the argument copies, if any.
10881   if (Glue.getNode())
10882     Ops.push_back(Glue);
10883 
10884   // Emit the call.
10885   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10886 
10887   if (IsTailCall) {
10888     MF.getFrameInfo().setHasTailCall();
10889     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10890   }
10891 
10892   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10893   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10894   Glue = Chain.getValue(1);
10895 
10896   // Mark the end of the call, which is glued to the call itself.
10897   Chain = DAG.getCALLSEQ_END(Chain,
10898                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10899                              DAG.getConstant(0, DL, PtrVT, true),
10900                              Glue, DL);
10901   Glue = Chain.getValue(1);
10902 
10903   // Assign locations to each value returned by this call.
10904   SmallVector<CCValAssign, 16> RVLocs;
10905   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10906   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10907 
10908   // Copy all of the result registers out of their specified physreg.
10909   for (auto &VA : RVLocs) {
10910     // Copy the value out
10911     SDValue RetValue =
10912         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10913     // Glue the RetValue to the end of the call sequence
10914     Chain = RetValue.getValue(1);
10915     Glue = RetValue.getValue(2);
10916 
10917     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10918       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10919       SDValue RetValue2 =
10920           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10921       Chain = RetValue2.getValue(1);
10922       Glue = RetValue2.getValue(2);
10923       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10924                              RetValue2);
10925     }
10926 
10927     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10928 
10929     InVals.push_back(RetValue);
10930   }
10931 
10932   return Chain;
10933 }
10934 
10935 bool RISCVTargetLowering::CanLowerReturn(
10936     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10937     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10938   SmallVector<CCValAssign, 16> RVLocs;
10939   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10940 
10941   Optional<unsigned> FirstMaskArgument;
10942   if (Subtarget.hasVInstructions())
10943     FirstMaskArgument = preAssignMask(Outs);
10944 
10945   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10946     MVT VT = Outs[i].VT;
10947     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10948     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10949     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10950                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10951                  *this, FirstMaskArgument))
10952       return false;
10953   }
10954   return true;
10955 }
10956 
10957 SDValue
10958 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10959                                  bool IsVarArg,
10960                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10961                                  const SmallVectorImpl<SDValue> &OutVals,
10962                                  const SDLoc &DL, SelectionDAG &DAG) const {
10963   const MachineFunction &MF = DAG.getMachineFunction();
10964   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10965 
10966   // Stores the assignment of the return value to a location.
10967   SmallVector<CCValAssign, 16> RVLocs;
10968 
10969   // Info about the registers and stack slot.
10970   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10971                  *DAG.getContext());
10972 
10973   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10974                     nullptr, CC_RISCV);
10975 
10976   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10977     report_fatal_error("GHC functions return void only");
10978 
10979   SDValue Glue;
10980   SmallVector<SDValue, 4> RetOps(1, Chain);
10981 
10982   // Copy the result values into the output registers.
10983   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10984     SDValue Val = OutVals[i];
10985     CCValAssign &VA = RVLocs[i];
10986     assert(VA.isRegLoc() && "Can only return in registers!");
10987 
10988     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10989       // Handle returning f64 on RV32D with a soft float ABI.
10990       assert(VA.isRegLoc() && "Expected return via registers");
10991       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10992                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10993       SDValue Lo = SplitF64.getValue(0);
10994       SDValue Hi = SplitF64.getValue(1);
10995       Register RegLo = VA.getLocReg();
10996       assert(RegLo < RISCV::X31 && "Invalid register pair");
10997       Register RegHi = RegLo + 1;
10998 
10999       if (STI.isRegisterReservedByUser(RegLo) ||
11000           STI.isRegisterReservedByUser(RegHi))
11001         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11002             MF.getFunction(),
11003             "Return value register required, but has been reserved."});
11004 
11005       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
11006       Glue = Chain.getValue(1);
11007       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
11008       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
11009       Glue = Chain.getValue(1);
11010       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
11011     } else {
11012       // Handle a 'normal' return.
11013       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
11014       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
11015 
11016       if (STI.isRegisterReservedByUser(VA.getLocReg()))
11017         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11018             MF.getFunction(),
11019             "Return value register required, but has been reserved."});
11020 
11021       // Guarantee that all emitted copies are stuck together.
11022       Glue = Chain.getValue(1);
11023       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
11024     }
11025   }
11026 
11027   RetOps[0] = Chain; // Update chain.
11028 
11029   // Add the glue node if we have it.
11030   if (Glue.getNode()) {
11031     RetOps.push_back(Glue);
11032   }
11033 
11034   unsigned RetOpc = RISCVISD::RET_FLAG;
11035   // Interrupt service routines use different return instructions.
11036   const Function &Func = DAG.getMachineFunction().getFunction();
11037   if (Func.hasFnAttribute("interrupt")) {
11038     if (!Func.getReturnType()->isVoidTy())
11039       report_fatal_error(
11040           "Functions with the interrupt attribute must have void return type!");
11041 
11042     MachineFunction &MF = DAG.getMachineFunction();
11043     StringRef Kind =
11044       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
11045 
11046     if (Kind == "user")
11047       RetOpc = RISCVISD::URET_FLAG;
11048     else if (Kind == "supervisor")
11049       RetOpc = RISCVISD::SRET_FLAG;
11050     else
11051       RetOpc = RISCVISD::MRET_FLAG;
11052   }
11053 
11054   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
11055 }
11056 
11057 void RISCVTargetLowering::validateCCReservedRegs(
11058     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
11059     MachineFunction &MF) const {
11060   const Function &F = MF.getFunction();
11061   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11062 
11063   if (llvm::any_of(Regs, [&STI](auto Reg) {
11064         return STI.isRegisterReservedByUser(Reg.first);
11065       }))
11066     F.getContext().diagnose(DiagnosticInfoUnsupported{
11067         F, "Argument register required, but has been reserved."});
11068 }
11069 
11070 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
11071   return CI->isTailCall();
11072 }
11073 
11074 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
11075 #define NODE_NAME_CASE(NODE)                                                   \
11076   case RISCVISD::NODE:                                                         \
11077     return "RISCVISD::" #NODE;
11078   // clang-format off
11079   switch ((RISCVISD::NodeType)Opcode) {
11080   case RISCVISD::FIRST_NUMBER:
11081     break;
11082   NODE_NAME_CASE(RET_FLAG)
11083   NODE_NAME_CASE(URET_FLAG)
11084   NODE_NAME_CASE(SRET_FLAG)
11085   NODE_NAME_CASE(MRET_FLAG)
11086   NODE_NAME_CASE(CALL)
11087   NODE_NAME_CASE(SELECT_CC)
11088   NODE_NAME_CASE(BR_CC)
11089   NODE_NAME_CASE(BuildPairF64)
11090   NODE_NAME_CASE(SplitF64)
11091   NODE_NAME_CASE(TAIL)
11092   NODE_NAME_CASE(MULHSU)
11093   NODE_NAME_CASE(SLLW)
11094   NODE_NAME_CASE(SRAW)
11095   NODE_NAME_CASE(SRLW)
11096   NODE_NAME_CASE(DIVW)
11097   NODE_NAME_CASE(DIVUW)
11098   NODE_NAME_CASE(REMUW)
11099   NODE_NAME_CASE(ROLW)
11100   NODE_NAME_CASE(RORW)
11101   NODE_NAME_CASE(CLZW)
11102   NODE_NAME_CASE(CTZW)
11103   NODE_NAME_CASE(FSLW)
11104   NODE_NAME_CASE(FSRW)
11105   NODE_NAME_CASE(FSL)
11106   NODE_NAME_CASE(FSR)
11107   NODE_NAME_CASE(FMV_H_X)
11108   NODE_NAME_CASE(FMV_X_ANYEXTH)
11109   NODE_NAME_CASE(FMV_X_SIGNEXTH)
11110   NODE_NAME_CASE(FMV_W_X_RV64)
11111   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
11112   NODE_NAME_CASE(FCVT_X)
11113   NODE_NAME_CASE(FCVT_XU)
11114   NODE_NAME_CASE(FCVT_W_RV64)
11115   NODE_NAME_CASE(FCVT_WU_RV64)
11116   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
11117   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
11118   NODE_NAME_CASE(READ_CYCLE_WIDE)
11119   NODE_NAME_CASE(GREV)
11120   NODE_NAME_CASE(GREVW)
11121   NODE_NAME_CASE(GORC)
11122   NODE_NAME_CASE(GORCW)
11123   NODE_NAME_CASE(SHFL)
11124   NODE_NAME_CASE(SHFLW)
11125   NODE_NAME_CASE(UNSHFL)
11126   NODE_NAME_CASE(UNSHFLW)
11127   NODE_NAME_CASE(BFP)
11128   NODE_NAME_CASE(BFPW)
11129   NODE_NAME_CASE(BCOMPRESS)
11130   NODE_NAME_CASE(BCOMPRESSW)
11131   NODE_NAME_CASE(BDECOMPRESS)
11132   NODE_NAME_CASE(BDECOMPRESSW)
11133   NODE_NAME_CASE(VMV_V_X_VL)
11134   NODE_NAME_CASE(VFMV_V_F_VL)
11135   NODE_NAME_CASE(VMV_X_S)
11136   NODE_NAME_CASE(VMV_S_X_VL)
11137   NODE_NAME_CASE(VFMV_S_F_VL)
11138   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
11139   NODE_NAME_CASE(READ_VLENB)
11140   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
11141   NODE_NAME_CASE(VSLIDEUP_VL)
11142   NODE_NAME_CASE(VSLIDE1UP_VL)
11143   NODE_NAME_CASE(VSLIDEDOWN_VL)
11144   NODE_NAME_CASE(VSLIDE1DOWN_VL)
11145   NODE_NAME_CASE(VID_VL)
11146   NODE_NAME_CASE(VFNCVT_ROD_VL)
11147   NODE_NAME_CASE(VECREDUCE_ADD_VL)
11148   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
11149   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
11150   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
11151   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
11152   NODE_NAME_CASE(VECREDUCE_AND_VL)
11153   NODE_NAME_CASE(VECREDUCE_OR_VL)
11154   NODE_NAME_CASE(VECREDUCE_XOR_VL)
11155   NODE_NAME_CASE(VECREDUCE_FADD_VL)
11156   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
11157   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
11158   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
11159   NODE_NAME_CASE(ADD_VL)
11160   NODE_NAME_CASE(AND_VL)
11161   NODE_NAME_CASE(MUL_VL)
11162   NODE_NAME_CASE(OR_VL)
11163   NODE_NAME_CASE(SDIV_VL)
11164   NODE_NAME_CASE(SHL_VL)
11165   NODE_NAME_CASE(SREM_VL)
11166   NODE_NAME_CASE(SRA_VL)
11167   NODE_NAME_CASE(SRL_VL)
11168   NODE_NAME_CASE(SUB_VL)
11169   NODE_NAME_CASE(UDIV_VL)
11170   NODE_NAME_CASE(UREM_VL)
11171   NODE_NAME_CASE(XOR_VL)
11172   NODE_NAME_CASE(SADDSAT_VL)
11173   NODE_NAME_CASE(UADDSAT_VL)
11174   NODE_NAME_CASE(SSUBSAT_VL)
11175   NODE_NAME_CASE(USUBSAT_VL)
11176   NODE_NAME_CASE(FADD_VL)
11177   NODE_NAME_CASE(FSUB_VL)
11178   NODE_NAME_CASE(FMUL_VL)
11179   NODE_NAME_CASE(FDIV_VL)
11180   NODE_NAME_CASE(FNEG_VL)
11181   NODE_NAME_CASE(FABS_VL)
11182   NODE_NAME_CASE(FSQRT_VL)
11183   NODE_NAME_CASE(FMA_VL)
11184   NODE_NAME_CASE(FCOPYSIGN_VL)
11185   NODE_NAME_CASE(SMIN_VL)
11186   NODE_NAME_CASE(SMAX_VL)
11187   NODE_NAME_CASE(UMIN_VL)
11188   NODE_NAME_CASE(UMAX_VL)
11189   NODE_NAME_CASE(FMINNUM_VL)
11190   NODE_NAME_CASE(FMAXNUM_VL)
11191   NODE_NAME_CASE(MULHS_VL)
11192   NODE_NAME_CASE(MULHU_VL)
11193   NODE_NAME_CASE(FP_TO_SINT_VL)
11194   NODE_NAME_CASE(FP_TO_UINT_VL)
11195   NODE_NAME_CASE(SINT_TO_FP_VL)
11196   NODE_NAME_CASE(UINT_TO_FP_VL)
11197   NODE_NAME_CASE(FP_EXTEND_VL)
11198   NODE_NAME_CASE(FP_ROUND_VL)
11199   NODE_NAME_CASE(VWMUL_VL)
11200   NODE_NAME_CASE(VWMULU_VL)
11201   NODE_NAME_CASE(VWMULSU_VL)
11202   NODE_NAME_CASE(VWADD_VL)
11203   NODE_NAME_CASE(VWADDU_VL)
11204   NODE_NAME_CASE(VWSUB_VL)
11205   NODE_NAME_CASE(VWSUBU_VL)
11206   NODE_NAME_CASE(VWADD_W_VL)
11207   NODE_NAME_CASE(VWADDU_W_VL)
11208   NODE_NAME_CASE(VWSUB_W_VL)
11209   NODE_NAME_CASE(VWSUBU_W_VL)
11210   NODE_NAME_CASE(SETCC_VL)
11211   NODE_NAME_CASE(VSELECT_VL)
11212   NODE_NAME_CASE(VP_MERGE_VL)
11213   NODE_NAME_CASE(VMAND_VL)
11214   NODE_NAME_CASE(VMOR_VL)
11215   NODE_NAME_CASE(VMXOR_VL)
11216   NODE_NAME_CASE(VMCLR_VL)
11217   NODE_NAME_CASE(VMSET_VL)
11218   NODE_NAME_CASE(VRGATHER_VX_VL)
11219   NODE_NAME_CASE(VRGATHER_VV_VL)
11220   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11221   NODE_NAME_CASE(VSEXT_VL)
11222   NODE_NAME_CASE(VZEXT_VL)
11223   NODE_NAME_CASE(VCPOP_VL)
11224   NODE_NAME_CASE(READ_CSR)
11225   NODE_NAME_CASE(WRITE_CSR)
11226   NODE_NAME_CASE(SWAP_CSR)
11227   }
11228   // clang-format on
11229   return nullptr;
11230 #undef NODE_NAME_CASE
11231 }
11232 
11233 /// getConstraintType - Given a constraint letter, return the type of
11234 /// constraint it is for this target.
11235 RISCVTargetLowering::ConstraintType
11236 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11237   if (Constraint.size() == 1) {
11238     switch (Constraint[0]) {
11239     default:
11240       break;
11241     case 'f':
11242       return C_RegisterClass;
11243     case 'I':
11244     case 'J':
11245     case 'K':
11246       return C_Immediate;
11247     case 'A':
11248       return C_Memory;
11249     case 'S': // A symbolic address
11250       return C_Other;
11251     }
11252   } else {
11253     if (Constraint == "vr" || Constraint == "vm")
11254       return C_RegisterClass;
11255   }
11256   return TargetLowering::getConstraintType(Constraint);
11257 }
11258 
11259 std::pair<unsigned, const TargetRegisterClass *>
11260 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11261                                                   StringRef Constraint,
11262                                                   MVT VT) const {
11263   // First, see if this is a constraint that directly corresponds to a
11264   // RISCV register class.
11265   if (Constraint.size() == 1) {
11266     switch (Constraint[0]) {
11267     case 'r':
11268       // TODO: Support fixed vectors up to XLen for P extension?
11269       if (VT.isVector())
11270         break;
11271       return std::make_pair(0U, &RISCV::GPRRegClass);
11272     case 'f':
11273       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11274         return std::make_pair(0U, &RISCV::FPR16RegClass);
11275       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11276         return std::make_pair(0U, &RISCV::FPR32RegClass);
11277       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11278         return std::make_pair(0U, &RISCV::FPR64RegClass);
11279       break;
11280     default:
11281       break;
11282     }
11283   } else if (Constraint == "vr") {
11284     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11285                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11286       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11287         return std::make_pair(0U, RC);
11288     }
11289   } else if (Constraint == "vm") {
11290     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11291       return std::make_pair(0U, &RISCV::VMV0RegClass);
11292   }
11293 
11294   // Clang will correctly decode the usage of register name aliases into their
11295   // official names. However, other frontends like `rustc` do not. This allows
11296   // users of these frontends to use the ABI names for registers in LLVM-style
11297   // register constraints.
11298   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11299                                .Case("{zero}", RISCV::X0)
11300                                .Case("{ra}", RISCV::X1)
11301                                .Case("{sp}", RISCV::X2)
11302                                .Case("{gp}", RISCV::X3)
11303                                .Case("{tp}", RISCV::X4)
11304                                .Case("{t0}", RISCV::X5)
11305                                .Case("{t1}", RISCV::X6)
11306                                .Case("{t2}", RISCV::X7)
11307                                .Cases("{s0}", "{fp}", RISCV::X8)
11308                                .Case("{s1}", RISCV::X9)
11309                                .Case("{a0}", RISCV::X10)
11310                                .Case("{a1}", RISCV::X11)
11311                                .Case("{a2}", RISCV::X12)
11312                                .Case("{a3}", RISCV::X13)
11313                                .Case("{a4}", RISCV::X14)
11314                                .Case("{a5}", RISCV::X15)
11315                                .Case("{a6}", RISCV::X16)
11316                                .Case("{a7}", RISCV::X17)
11317                                .Case("{s2}", RISCV::X18)
11318                                .Case("{s3}", RISCV::X19)
11319                                .Case("{s4}", RISCV::X20)
11320                                .Case("{s5}", RISCV::X21)
11321                                .Case("{s6}", RISCV::X22)
11322                                .Case("{s7}", RISCV::X23)
11323                                .Case("{s8}", RISCV::X24)
11324                                .Case("{s9}", RISCV::X25)
11325                                .Case("{s10}", RISCV::X26)
11326                                .Case("{s11}", RISCV::X27)
11327                                .Case("{t3}", RISCV::X28)
11328                                .Case("{t4}", RISCV::X29)
11329                                .Case("{t5}", RISCV::X30)
11330                                .Case("{t6}", RISCV::X31)
11331                                .Default(RISCV::NoRegister);
11332   if (XRegFromAlias != RISCV::NoRegister)
11333     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11334 
11335   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11336   // TableGen record rather than the AsmName to choose registers for InlineAsm
11337   // constraints, plus we want to match those names to the widest floating point
11338   // register type available, manually select floating point registers here.
11339   //
11340   // The second case is the ABI name of the register, so that frontends can also
11341   // use the ABI names in register constraint lists.
11342   if (Subtarget.hasStdExtF()) {
11343     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11344                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11345                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11346                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11347                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11348                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11349                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11350                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11351                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11352                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11353                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11354                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11355                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11356                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11357                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11358                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11359                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11360                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11361                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11362                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11363                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11364                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11365                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11366                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11367                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11368                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11369                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11370                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11371                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11372                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11373                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11374                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11375                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11376                         .Default(RISCV::NoRegister);
11377     if (FReg != RISCV::NoRegister) {
11378       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11379       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11380         unsigned RegNo = FReg - RISCV::F0_F;
11381         unsigned DReg = RISCV::F0_D + RegNo;
11382         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11383       }
11384       if (VT == MVT::f32 || VT == MVT::Other)
11385         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11386       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11387         unsigned RegNo = FReg - RISCV::F0_F;
11388         unsigned HReg = RISCV::F0_H + RegNo;
11389         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11390       }
11391     }
11392   }
11393 
11394   if (Subtarget.hasVInstructions()) {
11395     Register VReg = StringSwitch<Register>(Constraint.lower())
11396                         .Case("{v0}", RISCV::V0)
11397                         .Case("{v1}", RISCV::V1)
11398                         .Case("{v2}", RISCV::V2)
11399                         .Case("{v3}", RISCV::V3)
11400                         .Case("{v4}", RISCV::V4)
11401                         .Case("{v5}", RISCV::V5)
11402                         .Case("{v6}", RISCV::V6)
11403                         .Case("{v7}", RISCV::V7)
11404                         .Case("{v8}", RISCV::V8)
11405                         .Case("{v9}", RISCV::V9)
11406                         .Case("{v10}", RISCV::V10)
11407                         .Case("{v11}", RISCV::V11)
11408                         .Case("{v12}", RISCV::V12)
11409                         .Case("{v13}", RISCV::V13)
11410                         .Case("{v14}", RISCV::V14)
11411                         .Case("{v15}", RISCV::V15)
11412                         .Case("{v16}", RISCV::V16)
11413                         .Case("{v17}", RISCV::V17)
11414                         .Case("{v18}", RISCV::V18)
11415                         .Case("{v19}", RISCV::V19)
11416                         .Case("{v20}", RISCV::V20)
11417                         .Case("{v21}", RISCV::V21)
11418                         .Case("{v22}", RISCV::V22)
11419                         .Case("{v23}", RISCV::V23)
11420                         .Case("{v24}", RISCV::V24)
11421                         .Case("{v25}", RISCV::V25)
11422                         .Case("{v26}", RISCV::V26)
11423                         .Case("{v27}", RISCV::V27)
11424                         .Case("{v28}", RISCV::V28)
11425                         .Case("{v29}", RISCV::V29)
11426                         .Case("{v30}", RISCV::V30)
11427                         .Case("{v31}", RISCV::V31)
11428                         .Default(RISCV::NoRegister);
11429     if (VReg != RISCV::NoRegister) {
11430       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11431         return std::make_pair(VReg, &RISCV::VMRegClass);
11432       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11433         return std::make_pair(VReg, &RISCV::VRRegClass);
11434       for (const auto *RC :
11435            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11436         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11437           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11438           return std::make_pair(VReg, RC);
11439         }
11440       }
11441     }
11442   }
11443 
11444   std::pair<Register, const TargetRegisterClass *> Res =
11445       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11446 
11447   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11448   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11449   // Subtarget into account.
11450   if (Res.second == &RISCV::GPRF16RegClass ||
11451       Res.second == &RISCV::GPRF32RegClass ||
11452       Res.second == &RISCV::GPRF64RegClass)
11453     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11454 
11455   return Res;
11456 }
11457 
11458 unsigned
11459 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11460   // Currently only support length 1 constraints.
11461   if (ConstraintCode.size() == 1) {
11462     switch (ConstraintCode[0]) {
11463     case 'A':
11464       return InlineAsm::Constraint_A;
11465     default:
11466       break;
11467     }
11468   }
11469 
11470   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11471 }
11472 
11473 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11474     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11475     SelectionDAG &DAG) const {
11476   // Currently only support length 1 constraints.
11477   if (Constraint.length() == 1) {
11478     switch (Constraint[0]) {
11479     case 'I':
11480       // Validate & create a 12-bit signed immediate operand.
11481       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11482         uint64_t CVal = C->getSExtValue();
11483         if (isInt<12>(CVal))
11484           Ops.push_back(
11485               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11486       }
11487       return;
11488     case 'J':
11489       // Validate & create an integer zero operand.
11490       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11491         if (C->getZExtValue() == 0)
11492           Ops.push_back(
11493               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11494       return;
11495     case 'K':
11496       // Validate & create a 5-bit unsigned immediate operand.
11497       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11498         uint64_t CVal = C->getZExtValue();
11499         if (isUInt<5>(CVal))
11500           Ops.push_back(
11501               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11502       }
11503       return;
11504     case 'S':
11505       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11506         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11507                                                  GA->getValueType(0)));
11508       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11509         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11510                                                 BA->getValueType(0)));
11511       }
11512       return;
11513     default:
11514       break;
11515     }
11516   }
11517   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11518 }
11519 
11520 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11521                                                    Instruction *Inst,
11522                                                    AtomicOrdering Ord) const {
11523   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11524     return Builder.CreateFence(Ord);
11525   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11526     return Builder.CreateFence(AtomicOrdering::Release);
11527   return nullptr;
11528 }
11529 
11530 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11531                                                     Instruction *Inst,
11532                                                     AtomicOrdering Ord) const {
11533   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11534     return Builder.CreateFence(AtomicOrdering::Acquire);
11535   return nullptr;
11536 }
11537 
11538 TargetLowering::AtomicExpansionKind
11539 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11540   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11541   // point operations can't be used in an lr/sc sequence without breaking the
11542   // forward-progress guarantee.
11543   if (AI->isFloatingPointOperation())
11544     return AtomicExpansionKind::CmpXChg;
11545 
11546   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11547   if (Size == 8 || Size == 16)
11548     return AtomicExpansionKind::MaskedIntrinsic;
11549   return AtomicExpansionKind::None;
11550 }
11551 
11552 static Intrinsic::ID
11553 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11554   if (XLen == 32) {
11555     switch (BinOp) {
11556     default:
11557       llvm_unreachable("Unexpected AtomicRMW BinOp");
11558     case AtomicRMWInst::Xchg:
11559       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11560     case AtomicRMWInst::Add:
11561       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11562     case AtomicRMWInst::Sub:
11563       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11564     case AtomicRMWInst::Nand:
11565       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11566     case AtomicRMWInst::Max:
11567       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11568     case AtomicRMWInst::Min:
11569       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11570     case AtomicRMWInst::UMax:
11571       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11572     case AtomicRMWInst::UMin:
11573       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11574     }
11575   }
11576 
11577   if (XLen == 64) {
11578     switch (BinOp) {
11579     default:
11580       llvm_unreachable("Unexpected AtomicRMW BinOp");
11581     case AtomicRMWInst::Xchg:
11582       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11583     case AtomicRMWInst::Add:
11584       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11585     case AtomicRMWInst::Sub:
11586       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11587     case AtomicRMWInst::Nand:
11588       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11589     case AtomicRMWInst::Max:
11590       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11591     case AtomicRMWInst::Min:
11592       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11593     case AtomicRMWInst::UMax:
11594       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11595     case AtomicRMWInst::UMin:
11596       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11597     }
11598   }
11599 
11600   llvm_unreachable("Unexpected XLen\n");
11601 }
11602 
11603 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11604     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11605     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11606   unsigned XLen = Subtarget.getXLen();
11607   Value *Ordering =
11608       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11609   Type *Tys[] = {AlignedAddr->getType()};
11610   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11611       AI->getModule(),
11612       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11613 
11614   if (XLen == 64) {
11615     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11616     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11617     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11618   }
11619 
11620   Value *Result;
11621 
11622   // Must pass the shift amount needed to sign extend the loaded value prior
11623   // to performing a signed comparison for min/max. ShiftAmt is the number of
11624   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11625   // is the number of bits to left+right shift the value in order to
11626   // sign-extend.
11627   if (AI->getOperation() == AtomicRMWInst::Min ||
11628       AI->getOperation() == AtomicRMWInst::Max) {
11629     const DataLayout &DL = AI->getModule()->getDataLayout();
11630     unsigned ValWidth =
11631         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11632     Value *SextShamt =
11633         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11634     Result = Builder.CreateCall(LrwOpScwLoop,
11635                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11636   } else {
11637     Result =
11638         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11639   }
11640 
11641   if (XLen == 64)
11642     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11643   return Result;
11644 }
11645 
11646 TargetLowering::AtomicExpansionKind
11647 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11648     AtomicCmpXchgInst *CI) const {
11649   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11650   if (Size == 8 || Size == 16)
11651     return AtomicExpansionKind::MaskedIntrinsic;
11652   return AtomicExpansionKind::None;
11653 }
11654 
11655 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11656     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11657     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11658   unsigned XLen = Subtarget.getXLen();
11659   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11660   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11661   if (XLen == 64) {
11662     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11663     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11664     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11665     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11666   }
11667   Type *Tys[] = {AlignedAddr->getType()};
11668   Function *MaskedCmpXchg =
11669       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11670   Value *Result = Builder.CreateCall(
11671       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11672   if (XLen == 64)
11673     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11674   return Result;
11675 }
11676 
11677 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
11678                                                         EVT DataVT) const {
11679   return false;
11680 }
11681 
11682 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11683                                                EVT VT) const {
11684   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11685     return false;
11686 
11687   switch (FPVT.getSimpleVT().SimpleTy) {
11688   case MVT::f16:
11689     return Subtarget.hasStdExtZfh();
11690   case MVT::f32:
11691     return Subtarget.hasStdExtF();
11692   case MVT::f64:
11693     return Subtarget.hasStdExtD();
11694   default:
11695     return false;
11696   }
11697 }
11698 
11699 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11700   // If we are using the small code model, we can reduce size of jump table
11701   // entry to 4 bytes.
11702   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11703       getTargetMachine().getCodeModel() == CodeModel::Small) {
11704     return MachineJumpTableInfo::EK_Custom32;
11705   }
11706   return TargetLowering::getJumpTableEncoding();
11707 }
11708 
11709 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11710     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11711     unsigned uid, MCContext &Ctx) const {
11712   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11713          getTargetMachine().getCodeModel() == CodeModel::Small);
11714   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11715 }
11716 
11717 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11718                                                      EVT VT) const {
11719   VT = VT.getScalarType();
11720 
11721   if (!VT.isSimple())
11722     return false;
11723 
11724   switch (VT.getSimpleVT().SimpleTy) {
11725   case MVT::f16:
11726     return Subtarget.hasStdExtZfh();
11727   case MVT::f32:
11728     return Subtarget.hasStdExtF();
11729   case MVT::f64:
11730     return Subtarget.hasStdExtD();
11731   default:
11732     break;
11733   }
11734 
11735   return false;
11736 }
11737 
11738 Register RISCVTargetLowering::getExceptionPointerRegister(
11739     const Constant *PersonalityFn) const {
11740   return RISCV::X10;
11741 }
11742 
11743 Register RISCVTargetLowering::getExceptionSelectorRegister(
11744     const Constant *PersonalityFn) const {
11745   return RISCV::X11;
11746 }
11747 
11748 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11749   // Return false to suppress the unnecessary extensions if the LibCall
11750   // arguments or return value is f32 type for LP64 ABI.
11751   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11752   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11753     return false;
11754 
11755   return true;
11756 }
11757 
11758 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11759   if (Subtarget.is64Bit() && Type == MVT::i32)
11760     return true;
11761 
11762   return IsSigned;
11763 }
11764 
11765 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11766                                                  SDValue C) const {
11767   // Check integral scalar types.
11768   if (VT.isScalarInteger()) {
11769     // Omit the optimization if the sub target has the M extension and the data
11770     // size exceeds XLen.
11771     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11772       return false;
11773     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11774       // Break the MUL to a SLLI and an ADD/SUB.
11775       const APInt &Imm = ConstNode->getAPIntValue();
11776       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11777           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11778         return true;
11779       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11780       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11781           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11782            (Imm - 8).isPowerOf2()))
11783         return true;
11784       // Omit the following optimization if the sub target has the M extension
11785       // and the data size >= XLen.
11786       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11787         return false;
11788       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11789       // a pair of LUI/ADDI.
11790       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11791         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11792         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11793             (1 - ImmS).isPowerOf2())
11794         return true;
11795       }
11796     }
11797   }
11798 
11799   return false;
11800 }
11801 
11802 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11803                                                       SDValue ConstNode) const {
11804   // Let the DAGCombiner decide for vectors.
11805   EVT VT = AddNode.getValueType();
11806   if (VT.isVector())
11807     return true;
11808 
11809   // Let the DAGCombiner decide for larger types.
11810   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11811     return true;
11812 
11813   // It is worse if c1 is simm12 while c1*c2 is not.
11814   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11815   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11816   const APInt &C1 = C1Node->getAPIntValue();
11817   const APInt &C2 = C2Node->getAPIntValue();
11818   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11819     return false;
11820 
11821   // Default to true and let the DAGCombiner decide.
11822   return true;
11823 }
11824 
11825 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11826     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11827     bool *Fast) const {
11828   if (!VT.isVector()) {
11829     if (Fast)
11830       *Fast = false;
11831     return Subtarget.enableUnalignedScalarMem();
11832   }
11833 
11834   // All vector implementations must support element alignment
11835   EVT ElemVT = VT.getVectorElementType();
11836   if (Alignment >= ElemVT.getStoreSize()) {
11837     if (Fast)
11838       *Fast = true;
11839     return true;
11840   }
11841 
11842   return false;
11843 }
11844 
11845 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11846     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11847     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11848   bool IsABIRegCopy = CC.hasValue();
11849   EVT ValueVT = Val.getValueType();
11850   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11851     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11852     // and cast to f32.
11853     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11854     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11855     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11856                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11857     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11858     Parts[0] = Val;
11859     return true;
11860   }
11861 
11862   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11863     LLVMContext &Context = *DAG.getContext();
11864     EVT ValueEltVT = ValueVT.getVectorElementType();
11865     EVT PartEltVT = PartVT.getVectorElementType();
11866     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11867     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11868     if (PartVTBitSize % ValueVTBitSize == 0) {
11869       assert(PartVTBitSize >= ValueVTBitSize);
11870       // If the element types are different, bitcast to the same element type of
11871       // PartVT first.
11872       // Give an example here, we want copy a <vscale x 1 x i8> value to
11873       // <vscale x 4 x i16>.
11874       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11875       // subvector, then we can bitcast to <vscale x 4 x i16>.
11876       if (ValueEltVT != PartEltVT) {
11877         if (PartVTBitSize > ValueVTBitSize) {
11878           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11879           assert(Count != 0 && "The number of element should not be zero.");
11880           EVT SameEltTypeVT =
11881               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11882           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11883                             DAG.getUNDEF(SameEltTypeVT), Val,
11884                             DAG.getVectorIdxConstant(0, DL));
11885         }
11886         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11887       } else {
11888         Val =
11889             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11890                         Val, DAG.getVectorIdxConstant(0, DL));
11891       }
11892       Parts[0] = Val;
11893       return true;
11894     }
11895   }
11896   return false;
11897 }
11898 
11899 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11900     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11901     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11902   bool IsABIRegCopy = CC.hasValue();
11903   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11904     SDValue Val = Parts[0];
11905 
11906     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11907     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11908     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11909     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11910     return Val;
11911   }
11912 
11913   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11914     LLVMContext &Context = *DAG.getContext();
11915     SDValue Val = Parts[0];
11916     EVT ValueEltVT = ValueVT.getVectorElementType();
11917     EVT PartEltVT = PartVT.getVectorElementType();
11918     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11919     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11920     if (PartVTBitSize % ValueVTBitSize == 0) {
11921       assert(PartVTBitSize >= ValueVTBitSize);
11922       EVT SameEltTypeVT = ValueVT;
11923       // If the element types are different, convert it to the same element type
11924       // of PartVT.
11925       // Give an example here, we want copy a <vscale x 1 x i8> value from
11926       // <vscale x 4 x i16>.
11927       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11928       // then we can extract <vscale x 1 x i8>.
11929       if (ValueEltVT != PartEltVT) {
11930         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11931         assert(Count != 0 && "The number of element should not be zero.");
11932         SameEltTypeVT =
11933             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11934         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11935       }
11936       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11937                         DAG.getVectorIdxConstant(0, DL));
11938       return Val;
11939     }
11940   }
11941   return SDValue();
11942 }
11943 
11944 SDValue
11945 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11946                                    SelectionDAG &DAG,
11947                                    SmallVectorImpl<SDNode *> &Created) const {
11948   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11949   if (isIntDivCheap(N->getValueType(0), Attr))
11950     return SDValue(N, 0); // Lower SDIV as SDIV
11951 
11952   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11953          "Unexpected divisor!");
11954 
11955   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11956   if (!Subtarget.hasStdExtZbt())
11957     return SDValue();
11958 
11959   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11960   // Besides, more critical path instructions will be generated when dividing
11961   // by 2. So we keep using the original DAGs for these cases.
11962   unsigned Lg2 = Divisor.countTrailingZeros();
11963   if (Lg2 == 1 || Lg2 >= 12)
11964     return SDValue();
11965 
11966   // fold (sdiv X, pow2)
11967   EVT VT = N->getValueType(0);
11968   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11969     return SDValue();
11970 
11971   SDLoc DL(N);
11972   SDValue N0 = N->getOperand(0);
11973   SDValue Zero = DAG.getConstant(0, DL, VT);
11974   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11975 
11976   // Add (N0 < 0) ? Pow2 - 1 : 0;
11977   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11978   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11979   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11980 
11981   Created.push_back(Cmp.getNode());
11982   Created.push_back(Add.getNode());
11983   Created.push_back(Sel.getNode());
11984 
11985   // Divide by pow2.
11986   SDValue SRA =
11987       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11988 
11989   // If we're dividing by a positive value, we're done.  Otherwise, we must
11990   // negate the result.
11991   if (Divisor.isNonNegative())
11992     return SRA;
11993 
11994   Created.push_back(SRA.getNode());
11995   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11996 }
11997 
11998 #define GET_REGISTER_MATCHER
11999 #include "RISCVGenAsmMatcher.inc"
12000 
12001 Register
12002 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
12003                                        const MachineFunction &MF) const {
12004   Register Reg = MatchRegisterAltName(RegName);
12005   if (Reg == RISCV::NoRegister)
12006     Reg = MatchRegisterName(RegName);
12007   if (Reg == RISCV::NoRegister)
12008     report_fatal_error(
12009         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
12010   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
12011   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
12012     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
12013                              StringRef(RegName) + "\"."));
12014   return Reg;
12015 }
12016 
12017 namespace llvm {
12018 namespace RISCVVIntrinsicsTable {
12019 
12020 #define GET_RISCVVIntrinsicsTable_IMPL
12021 #include "RISCVGenSearchableTables.inc"
12022 
12023 } // namespace RISCVVIntrinsicsTable
12024 
12025 } // namespace llvm
12026