1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   } else {
203     setLibcallName(RTLIB::MUL_I128, nullptr);
204     setLibcallName(RTLIB::MULO_I64, nullptr);
205   }
206 
207   if (!Subtarget.hasStdExtM()) {
208     setOperationAction(ISD::MUL, XLenVT, Expand);
209     setOperationAction(ISD::MULHS, XLenVT, Expand);
210     setOperationAction(ISD::MULHU, XLenVT, Expand);
211     setOperationAction(ISD::SDIV, XLenVT, Expand);
212     setOperationAction(ISD::UDIV, XLenVT, Expand);
213     setOperationAction(ISD::SREM, XLenVT, Expand);
214     setOperationAction(ISD::UREM, XLenVT, Expand);
215   } else {
216     if (Subtarget.is64Bit()) {
217       setOperationAction(ISD::MUL, MVT::i32, Custom);
218       setOperationAction(ISD::MUL, MVT::i128, Custom);
219 
220       setOperationAction(ISD::SDIV, MVT::i8, Custom);
221       setOperationAction(ISD::UDIV, MVT::i8, Custom);
222       setOperationAction(ISD::UREM, MVT::i8, Custom);
223       setOperationAction(ISD::SDIV, MVT::i16, Custom);
224       setOperationAction(ISD::UDIV, MVT::i16, Custom);
225       setOperationAction(ISD::UREM, MVT::i16, Custom);
226       setOperationAction(ISD::SDIV, MVT::i32, Custom);
227       setOperationAction(ISD::UDIV, MVT::i32, Custom);
228       setOperationAction(ISD::UREM, MVT::i32, Custom);
229     } else {
230       setOperationAction(ISD::MUL, MVT::i64, Custom);
231     }
232   }
233 
234   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
235   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
236   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
237   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
238 
239   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
240   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
241   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
242 
243   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
244     if (Subtarget.is64Bit()) {
245       setOperationAction(ISD::ROTL, MVT::i32, Custom);
246       setOperationAction(ISD::ROTR, MVT::i32, Custom);
247     }
248   } else {
249     setOperationAction(ISD::ROTL, XLenVT, Expand);
250     setOperationAction(ISD::ROTR, XLenVT, Expand);
251   }
252 
253   if (Subtarget.hasStdExtZbp()) {
254     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
255     // more combining.
256     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
257     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
258     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
259     // BSWAP i8 doesn't exist.
260     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
261     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
262 
263     if (Subtarget.is64Bit()) {
264       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
265       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
266     }
267   } else {
268     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
269     // pattern match it directly in isel.
270     setOperationAction(ISD::BSWAP, XLenVT,
271                        Subtarget.hasStdExtZbb() ? Legal : Expand);
272   }
273 
274   if (Subtarget.hasStdExtZbb()) {
275     setOperationAction(ISD::SMIN, XLenVT, Legal);
276     setOperationAction(ISD::SMAX, XLenVT, Legal);
277     setOperationAction(ISD::UMIN, XLenVT, Legal);
278     setOperationAction(ISD::UMAX, XLenVT, Legal);
279 
280     if (Subtarget.is64Bit()) {
281       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
282       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
283       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
284       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
285     }
286   } else {
287     setOperationAction(ISD::CTTZ, XLenVT, Expand);
288     setOperationAction(ISD::CTLZ, XLenVT, Expand);
289     setOperationAction(ISD::CTPOP, XLenVT, Expand);
290   }
291 
292   if (Subtarget.hasStdExtZbt()) {
293     setOperationAction(ISD::FSHL, XLenVT, Custom);
294     setOperationAction(ISD::FSHR, XLenVT, Custom);
295     setOperationAction(ISD::SELECT, XLenVT, Legal);
296 
297     if (Subtarget.is64Bit()) {
298       setOperationAction(ISD::FSHL, MVT::i32, Custom);
299       setOperationAction(ISD::FSHR, MVT::i32, Custom);
300     }
301   } else {
302     setOperationAction(ISD::SELECT, XLenVT, Custom);
303   }
304 
305   ISD::CondCode FPCCToExpand[] = {
306       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
307       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
308       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
309 
310   ISD::NodeType FPOpToExpand[] = {
311       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
312       ISD::FP_TO_FP16};
313 
314   if (Subtarget.hasStdExtZfh())
315     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
316 
317   if (Subtarget.hasStdExtZfh()) {
318     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
319     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
320     setOperationAction(ISD::LRINT, MVT::f16, Legal);
321     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
322     setOperationAction(ISD::LROUND, MVT::f16, Legal);
323     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
324     for (auto CC : FPCCToExpand)
325       setCondCodeAction(CC, MVT::f16, Expand);
326     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
327     setOperationAction(ISD::SELECT, MVT::f16, Custom);
328     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
329     for (auto Op : FPOpToExpand)
330       setOperationAction(Op, MVT::f16, Expand);
331   }
332 
333   if (Subtarget.hasStdExtF()) {
334     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
335     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
336     setOperationAction(ISD::LRINT, MVT::f32, Legal);
337     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
338     setOperationAction(ISD::LROUND, MVT::f32, Legal);
339     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
340     for (auto CC : FPCCToExpand)
341       setCondCodeAction(CC, MVT::f32, Expand);
342     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
343     setOperationAction(ISD::SELECT, MVT::f32, Custom);
344     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
345     for (auto Op : FPOpToExpand)
346       setOperationAction(Op, MVT::f32, Expand);
347     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
348     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
349   }
350 
351   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
352     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
353 
354   if (Subtarget.hasStdExtD()) {
355     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
356     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
357     setOperationAction(ISD::LRINT, MVT::f64, Legal);
358     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
359     setOperationAction(ISD::LROUND, MVT::f64, Legal);
360     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
361     for (auto CC : FPCCToExpand)
362       setCondCodeAction(CC, MVT::f64, Expand);
363     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
364     setOperationAction(ISD::SELECT, MVT::f64, Custom);
365     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
366     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
367     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
368     for (auto Op : FPOpToExpand)
369       setOperationAction(Op, MVT::f64, Expand);
370     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
371     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
372   }
373 
374   if (Subtarget.is64Bit()) {
375     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
376     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
377     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
378     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
379   }
380 
381   if (Subtarget.hasStdExtF()) {
382     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
383     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
384 
385     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
386     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
387   }
388 
389   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
390   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
391   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
392   setOperationAction(ISD::JumpTable, XLenVT, Custom);
393 
394   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
395 
396   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
397   // Unfortunately this can't be determined just from the ISA naming string.
398   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
399                      Subtarget.is64Bit() ? Legal : Custom);
400 
401   setOperationAction(ISD::TRAP, MVT::Other, Legal);
402   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
403   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
404   if (Subtarget.is64Bit())
405     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
406 
407   if (Subtarget.hasStdExtA()) {
408     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
409     setMinCmpXchgSizeInBits(32);
410   } else {
411     setMaxAtomicSizeInBitsSupported(0);
412   }
413 
414   setBooleanContents(ZeroOrOneBooleanContent);
415 
416   if (Subtarget.hasStdExtV()) {
417     setBooleanVectorContents(ZeroOrOneBooleanContent);
418 
419     setOperationAction(ISD::VSCALE, XLenVT, Custom);
420 
421     // RVV intrinsics may have illegal operands.
422     // We also need to custom legalize vmv.x.s.
423     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
424     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
425     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
426     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
427     if (Subtarget.is64Bit()) {
428       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
429     } else {
430       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
431       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
432     }
433 
434     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
435 
436     static unsigned IntegerVPOps[] = {
437         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
438         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
439         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
440 
441     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
442                                             ISD::VP_FMUL, ISD::VP_FDIV};
443 
444     if (!Subtarget.is64Bit()) {
445       // We must custom-lower certain vXi64 operations on RV32 due to the vector
446       // element type being illegal.
447       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
448       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
449 
450       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
451       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
452       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
453       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
454       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
455       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
456       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
457       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
458     }
459 
460     for (MVT VT : BoolVecVTs) {
461       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
462 
463       // Mask VTs are custom-expanded into a series of standard nodes
464       setOperationAction(ISD::TRUNCATE, VT, Custom);
465       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
466       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
467       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
468 
469       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
470       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
471 
472       setOperationAction(ISD::SELECT, VT, Custom);
473       setOperationAction(ISD::SELECT_CC, VT, Expand);
474       setOperationAction(ISD::VSELECT, VT, Expand);
475 
476       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
477       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
478       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
479 
480       // RVV has native int->float & float->int conversions where the
481       // element type sizes are within one power-of-two of each other. Any
482       // wider distances between type sizes have to be lowered as sequences
483       // which progressively narrow the gap in stages.
484       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
485       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
486       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
487       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
488 
489       // Expand all extending loads to types larger than this, and truncating
490       // stores from types larger than this.
491       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
492         setTruncStoreAction(OtherVT, VT, Expand);
493         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
494         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
495         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
496       }
497     }
498 
499     for (MVT VT : IntVecVTs) {
500       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
501       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
502 
503       setOperationAction(ISD::SMIN, VT, Legal);
504       setOperationAction(ISD::SMAX, VT, Legal);
505       setOperationAction(ISD::UMIN, VT, Legal);
506       setOperationAction(ISD::UMAX, VT, Legal);
507 
508       setOperationAction(ISD::ROTL, VT, Expand);
509       setOperationAction(ISD::ROTR, VT, Expand);
510 
511       // Custom-lower extensions and truncations from/to mask types.
512       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
513       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
514       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
515 
516       // RVV has native int->float & float->int conversions where the
517       // element type sizes are within one power-of-two of each other. Any
518       // wider distances between type sizes have to be lowered as sequences
519       // which progressively narrow the gap in stages.
520       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
521       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
522       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
523       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
524 
525       setOperationAction(ISD::SADDSAT, VT, Legal);
526       setOperationAction(ISD::UADDSAT, VT, Legal);
527       setOperationAction(ISD::SSUBSAT, VT, Legal);
528       setOperationAction(ISD::USUBSAT, VT, Legal);
529 
530       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
531       // nodes which truncate by one power of two at a time.
532       setOperationAction(ISD::TRUNCATE, VT, Custom);
533 
534       // Custom-lower insert/extract operations to simplify patterns.
535       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
536       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
537 
538       // Custom-lower reduction operations to set up the corresponding custom
539       // nodes' operands.
540       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
541       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
542       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
543       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
544       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
545       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
546       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
547       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
548 
549       for (unsigned VPOpc : IntegerVPOps)
550         setOperationAction(VPOpc, VT, Custom);
551 
552       setOperationAction(ISD::LOAD, VT, Custom);
553       setOperationAction(ISD::STORE, VT, Custom);
554 
555       setOperationAction(ISD::MLOAD, VT, Custom);
556       setOperationAction(ISD::MSTORE, VT, Custom);
557       setOperationAction(ISD::MGATHER, VT, Custom);
558       setOperationAction(ISD::MSCATTER, VT, Custom);
559 
560       setOperationAction(ISD::VP_LOAD, VT, Custom);
561       setOperationAction(ISD::VP_STORE, VT, Custom);
562       setOperationAction(ISD::VP_GATHER, VT, Custom);
563       setOperationAction(ISD::VP_SCATTER, VT, Custom);
564 
565       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
566       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
567       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
568 
569       setOperationAction(ISD::SELECT, VT, Custom);
570       setOperationAction(ISD::SELECT_CC, VT, Expand);
571 
572       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
573       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
574 
575       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
576         setTruncStoreAction(VT, OtherVT, Expand);
577         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
578         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
579         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
580       }
581     }
582 
583     // Expand various CCs to best match the RVV ISA, which natively supports UNE
584     // but no other unordered comparisons, and supports all ordered comparisons
585     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
586     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
587     // and we pattern-match those back to the "original", swapping operands once
588     // more. This way we catch both operations and both "vf" and "fv" forms with
589     // fewer patterns.
590     ISD::CondCode VFPCCToExpand[] = {
591         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
592         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
593         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
594     };
595 
596     // Sets common operation actions on RVV floating-point vector types.
597     const auto SetCommonVFPActions = [&](MVT VT) {
598       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
599       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
600       // sizes are within one power-of-two of each other. Therefore conversions
601       // between vXf16 and vXf64 must be lowered as sequences which convert via
602       // vXf32.
603       setOperationAction(ISD::FP_ROUND, VT, Custom);
604       setOperationAction(ISD::FP_EXTEND, VT, Custom);
605       // Custom-lower insert/extract operations to simplify patterns.
606       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
607       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
608       // Expand various condition codes (explained above).
609       for (auto CC : VFPCCToExpand)
610         setCondCodeAction(CC, VT, Expand);
611 
612       setOperationAction(ISD::FMINNUM, VT, Legal);
613       setOperationAction(ISD::FMAXNUM, VT, Legal);
614 
615       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
616       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
617       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
618       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
619       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
620 
621       setOperationAction(ISD::LOAD, VT, Custom);
622       setOperationAction(ISD::STORE, VT, Custom);
623 
624       setOperationAction(ISD::MLOAD, VT, Custom);
625       setOperationAction(ISD::MSTORE, VT, Custom);
626       setOperationAction(ISD::MGATHER, VT, Custom);
627       setOperationAction(ISD::MSCATTER, VT, Custom);
628 
629       setOperationAction(ISD::VP_LOAD, VT, Custom);
630       setOperationAction(ISD::VP_STORE, VT, Custom);
631       setOperationAction(ISD::VP_GATHER, VT, Custom);
632       setOperationAction(ISD::VP_SCATTER, VT, Custom);
633 
634       setOperationAction(ISD::SELECT, VT, Custom);
635       setOperationAction(ISD::SELECT_CC, VT, Expand);
636 
637       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
638       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
639       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
640 
641       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
642 
643       for (unsigned VPOpc : FloatingPointVPOps)
644         setOperationAction(VPOpc, VT, Custom);
645     };
646 
647     // Sets common extload/truncstore actions on RVV floating-point vector
648     // types.
649     const auto SetCommonVFPExtLoadTruncStoreActions =
650         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
651           for (auto SmallVT : SmallerVTs) {
652             setTruncStoreAction(VT, SmallVT, Expand);
653             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
654           }
655         };
656 
657     if (Subtarget.hasStdExtZfh())
658       for (MVT VT : F16VecVTs)
659         SetCommonVFPActions(VT);
660 
661     for (MVT VT : F32VecVTs) {
662       if (Subtarget.hasStdExtF())
663         SetCommonVFPActions(VT);
664       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
665     }
666 
667     for (MVT VT : F64VecVTs) {
668       if (Subtarget.hasStdExtD())
669         SetCommonVFPActions(VT);
670       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
671       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
672     }
673 
674     if (Subtarget.useRVVForFixedLengthVectors()) {
675       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
676         if (!useRVVForFixedLengthVectorVT(VT))
677           continue;
678 
679         // By default everything must be expanded.
680         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
681           setOperationAction(Op, VT, Expand);
682         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
683           setTruncStoreAction(VT, OtherVT, Expand);
684           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
685           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
686           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
687         }
688 
689         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
690         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
691         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
692 
693         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
694         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
695 
696         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
697         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
698 
699         setOperationAction(ISD::LOAD, VT, Custom);
700         setOperationAction(ISD::STORE, VT, Custom);
701 
702         setOperationAction(ISD::SETCC, VT, Custom);
703 
704         setOperationAction(ISD::SELECT, VT, Custom);
705 
706         setOperationAction(ISD::TRUNCATE, VT, Custom);
707 
708         setOperationAction(ISD::BITCAST, VT, Custom);
709 
710         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
711         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
712         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
713 
714         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
715         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
716         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
717         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
718 
719         // Operations below are different for between masks and other vectors.
720         if (VT.getVectorElementType() == MVT::i1) {
721           setOperationAction(ISD::AND, VT, Custom);
722           setOperationAction(ISD::OR, VT, Custom);
723           setOperationAction(ISD::XOR, VT, Custom);
724           continue;
725         }
726 
727         // Use SPLAT_VECTOR to prevent type legalization from destroying the
728         // splats when type legalizing i64 scalar on RV32.
729         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
730         // improvements first.
731         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
732           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
733           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
734         }
735 
736         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
737         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
738 
739         setOperationAction(ISD::MLOAD, VT, Custom);
740         setOperationAction(ISD::MSTORE, VT, Custom);
741         setOperationAction(ISD::MGATHER, VT, Custom);
742         setOperationAction(ISD::MSCATTER, VT, Custom);
743 
744         setOperationAction(ISD::VP_LOAD, VT, Custom);
745         setOperationAction(ISD::VP_STORE, VT, Custom);
746         setOperationAction(ISD::VP_GATHER, VT, Custom);
747         setOperationAction(ISD::VP_SCATTER, VT, Custom);
748 
749         setOperationAction(ISD::ADD, VT, Custom);
750         setOperationAction(ISD::MUL, VT, Custom);
751         setOperationAction(ISD::SUB, VT, Custom);
752         setOperationAction(ISD::AND, VT, Custom);
753         setOperationAction(ISD::OR, VT, Custom);
754         setOperationAction(ISD::XOR, VT, Custom);
755         setOperationAction(ISD::SDIV, VT, Custom);
756         setOperationAction(ISD::SREM, VT, Custom);
757         setOperationAction(ISD::UDIV, VT, Custom);
758         setOperationAction(ISD::UREM, VT, Custom);
759         setOperationAction(ISD::SHL, VT, Custom);
760         setOperationAction(ISD::SRA, VT, Custom);
761         setOperationAction(ISD::SRL, VT, Custom);
762 
763         setOperationAction(ISD::SMIN, VT, Custom);
764         setOperationAction(ISD::SMAX, VT, Custom);
765         setOperationAction(ISD::UMIN, VT, Custom);
766         setOperationAction(ISD::UMAX, VT, Custom);
767         setOperationAction(ISD::ABS,  VT, Custom);
768 
769         setOperationAction(ISD::MULHS, VT, Custom);
770         setOperationAction(ISD::MULHU, VT, Custom);
771 
772         setOperationAction(ISD::SADDSAT, VT, Custom);
773         setOperationAction(ISD::UADDSAT, VT, Custom);
774         setOperationAction(ISD::SSUBSAT, VT, Custom);
775         setOperationAction(ISD::USUBSAT, VT, Custom);
776 
777         setOperationAction(ISD::VSELECT, VT, Custom);
778         setOperationAction(ISD::SELECT_CC, VT, Expand);
779 
780         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
781         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
782         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
783 
784         // Custom-lower reduction operations to set up the corresponding custom
785         // nodes' operands.
786         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
787         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
788         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
789         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
790         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
791 
792         for (unsigned VPOpc : IntegerVPOps)
793           setOperationAction(VPOpc, VT, Custom);
794       }
795 
796       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
797         if (!useRVVForFixedLengthVectorVT(VT))
798           continue;
799 
800         // By default everything must be expanded.
801         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
802           setOperationAction(Op, VT, Expand);
803         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
804           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
805           setTruncStoreAction(VT, OtherVT, Expand);
806         }
807 
808         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
809         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
810         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
811 
812         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
813         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
814         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
815         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
816         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
817 
818         setOperationAction(ISD::LOAD, VT, Custom);
819         setOperationAction(ISD::STORE, VT, Custom);
820         setOperationAction(ISD::MLOAD, VT, Custom);
821         setOperationAction(ISD::MSTORE, VT, Custom);
822         setOperationAction(ISD::MGATHER, VT, Custom);
823         setOperationAction(ISD::MSCATTER, VT, Custom);
824 
825         setOperationAction(ISD::VP_LOAD, VT, Custom);
826         setOperationAction(ISD::VP_STORE, VT, Custom);
827         setOperationAction(ISD::VP_GATHER, VT, Custom);
828         setOperationAction(ISD::VP_SCATTER, VT, Custom);
829 
830         setOperationAction(ISD::FADD, VT, Custom);
831         setOperationAction(ISD::FSUB, VT, Custom);
832         setOperationAction(ISD::FMUL, VT, Custom);
833         setOperationAction(ISD::FDIV, VT, Custom);
834         setOperationAction(ISD::FNEG, VT, Custom);
835         setOperationAction(ISD::FABS, VT, Custom);
836         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
837         setOperationAction(ISD::FSQRT, VT, Custom);
838         setOperationAction(ISD::FMA, VT, Custom);
839         setOperationAction(ISD::FMINNUM, VT, Custom);
840         setOperationAction(ISD::FMAXNUM, VT, Custom);
841 
842         setOperationAction(ISD::FP_ROUND, VT, Custom);
843         setOperationAction(ISD::FP_EXTEND, VT, Custom);
844 
845         for (auto CC : VFPCCToExpand)
846           setCondCodeAction(CC, VT, Expand);
847 
848         setOperationAction(ISD::VSELECT, VT, Custom);
849         setOperationAction(ISD::SELECT, VT, Custom);
850         setOperationAction(ISD::SELECT_CC, VT, Expand);
851 
852         setOperationAction(ISD::BITCAST, VT, Custom);
853 
854         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
855         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
856         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
857         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
858 
859         for (unsigned VPOpc : FloatingPointVPOps)
860           setOperationAction(VPOpc, VT, Custom);
861       }
862 
863       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
864       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
865       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
866       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
867       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
868       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
869       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
870       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
871     }
872   }
873 
874   // Function alignments.
875   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
876   setMinFunctionAlignment(FunctionAlignment);
877   setPrefFunctionAlignment(FunctionAlignment);
878 
879   setMinimumJumpTableEntries(5);
880 
881   // Jumps are expensive, compared to logic
882   setJumpIsExpensive();
883 
884   // We can use any register for comparisons
885   setHasMultipleConditionRegisters();
886 
887   setTargetDAGCombine(ISD::ADD);
888   setTargetDAGCombine(ISD::SUB);
889   setTargetDAGCombine(ISD::AND);
890   setTargetDAGCombine(ISD::OR);
891   setTargetDAGCombine(ISD::XOR);
892   setTargetDAGCombine(ISD::ANY_EXTEND);
893   setTargetDAGCombine(ISD::ZERO_EXTEND);
894   if (Subtarget.hasStdExtV()) {
895     setTargetDAGCombine(ISD::FCOPYSIGN);
896     setTargetDAGCombine(ISD::MGATHER);
897     setTargetDAGCombine(ISD::MSCATTER);
898     setTargetDAGCombine(ISD::VP_GATHER);
899     setTargetDAGCombine(ISD::VP_SCATTER);
900     setTargetDAGCombine(ISD::SRA);
901     setTargetDAGCombine(ISD::SRL);
902     setTargetDAGCombine(ISD::SHL);
903   }
904 }
905 
906 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
907                                             LLVMContext &Context,
908                                             EVT VT) const {
909   if (!VT.isVector())
910     return getPointerTy(DL);
911   if (Subtarget.hasStdExtV() &&
912       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
913     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
914   return VT.changeVectorElementTypeToInteger();
915 }
916 
917 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
918   return Subtarget.getXLenVT();
919 }
920 
921 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
922                                              const CallInst &I,
923                                              MachineFunction &MF,
924                                              unsigned Intrinsic) const {
925   switch (Intrinsic) {
926   default:
927     return false;
928   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
929   case Intrinsic::riscv_masked_atomicrmw_add_i32:
930   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
931   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
932   case Intrinsic::riscv_masked_atomicrmw_max_i32:
933   case Intrinsic::riscv_masked_atomicrmw_min_i32:
934   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
935   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
936   case Intrinsic::riscv_masked_cmpxchg_i32: {
937     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
938     Info.opc = ISD::INTRINSIC_W_CHAIN;
939     Info.memVT = MVT::getVT(PtrTy->getElementType());
940     Info.ptrVal = I.getArgOperand(0);
941     Info.offset = 0;
942     Info.align = Align(4);
943     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
944                  MachineMemOperand::MOVolatile;
945     return true;
946   }
947   }
948 }
949 
950 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
951                                                 const AddrMode &AM, Type *Ty,
952                                                 unsigned AS,
953                                                 Instruction *I) const {
954   // No global is ever allowed as a base.
955   if (AM.BaseGV)
956     return false;
957 
958   // Require a 12-bit signed offset.
959   if (!isInt<12>(AM.BaseOffs))
960     return false;
961 
962   switch (AM.Scale) {
963   case 0: // "r+i" or just "i", depending on HasBaseReg.
964     break;
965   case 1:
966     if (!AM.HasBaseReg) // allow "r+i".
967       break;
968     return false; // disallow "r+r" or "r+r+i".
969   default:
970     return false;
971   }
972 
973   return true;
974 }
975 
976 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
977   return isInt<12>(Imm);
978 }
979 
980 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
981   return isInt<12>(Imm);
982 }
983 
984 // On RV32, 64-bit integers are split into their high and low parts and held
985 // in two different registers, so the trunc is free since the low register can
986 // just be used.
987 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
988   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
989     return false;
990   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
991   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
992   return (SrcBits == 64 && DestBits == 32);
993 }
994 
995 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
996   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
997       !SrcVT.isInteger() || !DstVT.isInteger())
998     return false;
999   unsigned SrcBits = SrcVT.getSizeInBits();
1000   unsigned DestBits = DstVT.getSizeInBits();
1001   return (SrcBits == 64 && DestBits == 32);
1002 }
1003 
1004 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1005   // Zexts are free if they can be combined with a load.
1006   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1007     EVT MemVT = LD->getMemoryVT();
1008     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
1009          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
1010         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1011          LD->getExtensionType() == ISD::ZEXTLOAD))
1012       return true;
1013   }
1014 
1015   return TargetLowering::isZExtFree(Val, VT2);
1016 }
1017 
1018 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1019   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1020 }
1021 
1022 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1023   return Subtarget.hasStdExtZbb();
1024 }
1025 
1026 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1027   return Subtarget.hasStdExtZbb();
1028 }
1029 
1030 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1031                                        bool ForCodeSize) const {
1032   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1033     return false;
1034   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1035     return false;
1036   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1037     return false;
1038   if (Imm.isNegZero())
1039     return false;
1040   return Imm.isZero();
1041 }
1042 
1043 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1044   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1045          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1046          (VT == MVT::f64 && Subtarget.hasStdExtD());
1047 }
1048 
1049 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1050                                                       CallingConv::ID CC,
1051                                                       EVT VT) const {
1052   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1053   // end up using a GPR but that will be decided based on ABI.
1054   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1055     return MVT::f32;
1056 
1057   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1058 }
1059 
1060 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1061                                                            CallingConv::ID CC,
1062                                                            EVT VT) const {
1063   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1064   // end up using a GPR but that will be decided based on ABI.
1065   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1066     return 1;
1067 
1068   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1069 }
1070 
1071 // Changes the condition code and swaps operands if necessary, so the SetCC
1072 // operation matches one of the comparisons supported directly by branches
1073 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1074 // with 1/-1.
1075 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1076                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1077   // Convert X > -1 to X >= 0.
1078   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1079     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1080     CC = ISD::SETGE;
1081     return;
1082   }
1083   // Convert X < 1 to 0 >= X.
1084   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1085     RHS = LHS;
1086     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1087     CC = ISD::SETGE;
1088     return;
1089   }
1090 
1091   switch (CC) {
1092   default:
1093     break;
1094   case ISD::SETGT:
1095   case ISD::SETLE:
1096   case ISD::SETUGT:
1097   case ISD::SETULE:
1098     CC = ISD::getSetCCSwappedOperands(CC);
1099     std::swap(LHS, RHS);
1100     break;
1101   }
1102 }
1103 
1104 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1105   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1106   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1107   if (VT.getVectorElementType() == MVT::i1)
1108     KnownSize *= 8;
1109 
1110   switch (KnownSize) {
1111   default:
1112     llvm_unreachable("Invalid LMUL.");
1113   case 8:
1114     return RISCVII::VLMUL::LMUL_F8;
1115   case 16:
1116     return RISCVII::VLMUL::LMUL_F4;
1117   case 32:
1118     return RISCVII::VLMUL::LMUL_F2;
1119   case 64:
1120     return RISCVII::VLMUL::LMUL_1;
1121   case 128:
1122     return RISCVII::VLMUL::LMUL_2;
1123   case 256:
1124     return RISCVII::VLMUL::LMUL_4;
1125   case 512:
1126     return RISCVII::VLMUL::LMUL_8;
1127   }
1128 }
1129 
1130 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1131   switch (LMul) {
1132   default:
1133     llvm_unreachable("Invalid LMUL.");
1134   case RISCVII::VLMUL::LMUL_F8:
1135   case RISCVII::VLMUL::LMUL_F4:
1136   case RISCVII::VLMUL::LMUL_F2:
1137   case RISCVII::VLMUL::LMUL_1:
1138     return RISCV::VRRegClassID;
1139   case RISCVII::VLMUL::LMUL_2:
1140     return RISCV::VRM2RegClassID;
1141   case RISCVII::VLMUL::LMUL_4:
1142     return RISCV::VRM4RegClassID;
1143   case RISCVII::VLMUL::LMUL_8:
1144     return RISCV::VRM8RegClassID;
1145   }
1146 }
1147 
1148 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1149   RISCVII::VLMUL LMUL = getLMUL(VT);
1150   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1151       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1152       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1153       LMUL == RISCVII::VLMUL::LMUL_1) {
1154     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1155                   "Unexpected subreg numbering");
1156     return RISCV::sub_vrm1_0 + Index;
1157   }
1158   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1159     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1160                   "Unexpected subreg numbering");
1161     return RISCV::sub_vrm2_0 + Index;
1162   }
1163   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1164     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1165                   "Unexpected subreg numbering");
1166     return RISCV::sub_vrm4_0 + Index;
1167   }
1168   llvm_unreachable("Invalid vector type.");
1169 }
1170 
1171 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1172   if (VT.getVectorElementType() == MVT::i1)
1173     return RISCV::VRRegClassID;
1174   return getRegClassIDForLMUL(getLMUL(VT));
1175 }
1176 
1177 // Attempt to decompose a subvector insert/extract between VecVT and
1178 // SubVecVT via subregister indices. Returns the subregister index that
1179 // can perform the subvector insert/extract with the given element index, as
1180 // well as the index corresponding to any leftover subvectors that must be
1181 // further inserted/extracted within the register class for SubVecVT.
1182 std::pair<unsigned, unsigned>
1183 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1184     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1185     const RISCVRegisterInfo *TRI) {
1186   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1187                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1188                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1189                 "Register classes not ordered");
1190   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1191   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1192   // Try to compose a subregister index that takes us from the incoming
1193   // LMUL>1 register class down to the outgoing one. At each step we half
1194   // the LMUL:
1195   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1196   // Note that this is not guaranteed to find a subregister index, such as
1197   // when we are extracting from one VR type to another.
1198   unsigned SubRegIdx = RISCV::NoSubRegister;
1199   for (const unsigned RCID :
1200        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1201     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1202       VecVT = VecVT.getHalfNumVectorElementsVT();
1203       bool IsHi =
1204           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1205       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1206                                             getSubregIndexByMVT(VecVT, IsHi));
1207       if (IsHi)
1208         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1209     }
1210   return {SubRegIdx, InsertExtractIdx};
1211 }
1212 
1213 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1214 // stores for those types.
1215 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1216   return !Subtarget.useRVVForFixedLengthVectors() ||
1217          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1218 }
1219 
1220 static bool useRVVForFixedLengthVectorVT(MVT VT,
1221                                          const RISCVSubtarget &Subtarget) {
1222   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1223   if (!Subtarget.useRVVForFixedLengthVectors())
1224     return false;
1225 
1226   // We only support a set of vector types with a consistent maximum fixed size
1227   // across all supported vector element types to avoid legalization issues.
1228   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1229   // fixed-length vector type we support is 1024 bytes.
1230   if (VT.getFixedSizeInBits() > 1024 * 8)
1231     return false;
1232 
1233   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1234 
1235   MVT EltVT = VT.getVectorElementType();
1236 
1237   // Don't use RVV for vectors we cannot scalarize if required.
1238   switch (EltVT.SimpleTy) {
1239   // i1 is supported but has different rules.
1240   default:
1241     return false;
1242   case MVT::i1:
1243     // Masks can only use a single register.
1244     if (VT.getVectorNumElements() > MinVLen)
1245       return false;
1246     MinVLen /= 8;
1247     break;
1248   case MVT::i8:
1249   case MVT::i16:
1250   case MVT::i32:
1251   case MVT::i64:
1252     break;
1253   case MVT::f16:
1254     if (!Subtarget.hasStdExtZfh())
1255       return false;
1256     break;
1257   case MVT::f32:
1258     if (!Subtarget.hasStdExtF())
1259       return false;
1260     break;
1261   case MVT::f64:
1262     if (!Subtarget.hasStdExtD())
1263       return false;
1264     break;
1265   }
1266 
1267   // Reject elements larger than ELEN.
1268   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1269     return false;
1270 
1271   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1272   // Don't use RVV for types that don't fit.
1273   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1274     return false;
1275 
1276   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1277   // the base fixed length RVV support in place.
1278   if (!VT.isPow2VectorType())
1279     return false;
1280 
1281   return true;
1282 }
1283 
1284 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1285   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1286 }
1287 
1288 // Return the largest legal scalable vector type that matches VT's element type.
1289 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1290                                             const RISCVSubtarget &Subtarget) {
1291   // This may be called before legal types are setup.
1292   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1293           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1294          "Expected legal fixed length vector!");
1295 
1296   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1297   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1298 
1299   MVT EltVT = VT.getVectorElementType();
1300   switch (EltVT.SimpleTy) {
1301   default:
1302     llvm_unreachable("unexpected element type for RVV container");
1303   case MVT::i1:
1304   case MVT::i8:
1305   case MVT::i16:
1306   case MVT::i32:
1307   case MVT::i64:
1308   case MVT::f16:
1309   case MVT::f32:
1310   case MVT::f64: {
1311     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1312     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1313     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1314     unsigned NumElts =
1315         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1316     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1317     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1318     return MVT::getScalableVectorVT(EltVT, NumElts);
1319   }
1320   }
1321 }
1322 
1323 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1324                                             const RISCVSubtarget &Subtarget) {
1325   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1326                                           Subtarget);
1327 }
1328 
1329 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1330   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1331 }
1332 
1333 // Grow V to consume an entire RVV register.
1334 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1335                                        const RISCVSubtarget &Subtarget) {
1336   assert(VT.isScalableVector() &&
1337          "Expected to convert into a scalable vector!");
1338   assert(V.getValueType().isFixedLengthVector() &&
1339          "Expected a fixed length vector operand!");
1340   SDLoc DL(V);
1341   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1342   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1343 }
1344 
1345 // Shrink V so it's just big enough to maintain a VT's worth of data.
1346 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1347                                          const RISCVSubtarget &Subtarget) {
1348   assert(VT.isFixedLengthVector() &&
1349          "Expected to convert into a fixed length vector!");
1350   assert(V.getValueType().isScalableVector() &&
1351          "Expected a scalable vector operand!");
1352   SDLoc DL(V);
1353   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1354   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1355 }
1356 
1357 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1358 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1359 // the vector type that it is contained in.
1360 static std::pair<SDValue, SDValue>
1361 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1362                 const RISCVSubtarget &Subtarget) {
1363   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1364   MVT XLenVT = Subtarget.getXLenVT();
1365   SDValue VL = VecVT.isFixedLengthVector()
1366                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1367                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1368   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1369   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1370   return {Mask, VL};
1371 }
1372 
1373 // As above but assuming the given type is a scalable vector type.
1374 static std::pair<SDValue, SDValue>
1375 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1376                         const RISCVSubtarget &Subtarget) {
1377   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1378   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1379 }
1380 
1381 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1382 // of either is (currently) supported. This can get us into an infinite loop
1383 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1384 // as a ..., etc.
1385 // Until either (or both) of these can reliably lower any node, reporting that
1386 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1387 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1388 // which is not desirable.
1389 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1390     EVT VT, unsigned DefinedValues) const {
1391   return false;
1392 }
1393 
1394 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1395   // Only splats are currently supported.
1396   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1397     return true;
1398 
1399   return false;
1400 }
1401 
1402 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
1403   // RISCV FP-to-int conversions saturate to the destination register size, but
1404   // don't produce 0 for nan. We can use a conversion instruction and fix the
1405   // nan case with a compare and a select.
1406   SDValue Src = Op.getOperand(0);
1407 
1408   EVT DstVT = Op.getValueType();
1409   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1410 
1411   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1412   unsigned Opc;
1413   if (SatVT == DstVT)
1414     Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ;
1415   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1416     Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
1417   else
1418     return SDValue();
1419   // FIXME: Support other SatVTs by clamping before or after the conversion.
1420 
1421   SDLoc DL(Op);
1422   SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src);
1423 
1424   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1425   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1426 }
1427 
1428 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1429                                  const RISCVSubtarget &Subtarget) {
1430   MVT VT = Op.getSimpleValueType();
1431   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1432 
1433   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1434 
1435   SDLoc DL(Op);
1436   SDValue Mask, VL;
1437   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1438 
1439   unsigned Opc =
1440       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1441   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1442   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1443 }
1444 
1445 struct VIDSequence {
1446   int64_t StepNumerator;
1447   unsigned StepDenominator;
1448   int64_t Addend;
1449 };
1450 
1451 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1452 // to the (non-zero) step S and start value X. This can be then lowered as the
1453 // RVV sequence (VID * S) + X, for example.
1454 // The step S is represented as an integer numerator divided by a positive
1455 // denominator. Note that the implementation currently only identifies
1456 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1457 // cannot detect 2/3, for example.
1458 // Note that this method will also match potentially unappealing index
1459 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1460 // determine whether this is worth generating code for.
1461 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1462   unsigned NumElts = Op.getNumOperands();
1463   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1464   if (!Op.getValueType().isInteger())
1465     return None;
1466 
1467   Optional<unsigned> SeqStepDenom;
1468   Optional<int64_t> SeqStepNum, SeqAddend;
1469   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1470   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1471   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1472     // Assume undef elements match the sequence; we just have to be careful
1473     // when interpolating across them.
1474     if (Op.getOperand(Idx).isUndef())
1475       continue;
1476     // The BUILD_VECTOR must be all constants.
1477     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1478       return None;
1479 
1480     uint64_t Val = Op.getConstantOperandVal(Idx) &
1481                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1482 
1483     if (PrevElt) {
1484       // Calculate the step since the last non-undef element, and ensure
1485       // it's consistent across the entire sequence.
1486       unsigned IdxDiff = Idx - PrevElt->second;
1487       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1488 
1489       // A zero-value value difference means that we're somewhere in the middle
1490       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1491       // step change before evaluating the sequence.
1492       if (ValDiff != 0) {
1493         int64_t Remainder = ValDiff % IdxDiff;
1494         // Normalize the step if it's greater than 1.
1495         if (Remainder != ValDiff) {
1496           // The difference must cleanly divide the element span.
1497           if (Remainder != 0)
1498             return None;
1499           ValDiff /= IdxDiff;
1500           IdxDiff = 1;
1501         }
1502 
1503         if (!SeqStepNum)
1504           SeqStepNum = ValDiff;
1505         else if (ValDiff != SeqStepNum)
1506           return None;
1507 
1508         if (!SeqStepDenom)
1509           SeqStepDenom = IdxDiff;
1510         else if (IdxDiff != *SeqStepDenom)
1511           return None;
1512       }
1513     }
1514 
1515     // Record and/or check any addend.
1516     if (SeqStepNum && SeqStepDenom) {
1517       uint64_t ExpectedVal =
1518           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1519       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1520       if (!SeqAddend)
1521         SeqAddend = Addend;
1522       else if (SeqAddend != Addend)
1523         return None;
1524     }
1525 
1526     // Record this non-undef element for later.
1527     if (!PrevElt || PrevElt->first != Val)
1528       PrevElt = std::make_pair(Val, Idx);
1529   }
1530   // We need to have logged both a step and an addend for this to count as
1531   // a legal index sequence.
1532   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1533     return None;
1534 
1535   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1536 }
1537 
1538 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1539                                  const RISCVSubtarget &Subtarget) {
1540   MVT VT = Op.getSimpleValueType();
1541   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1542 
1543   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1544 
1545   SDLoc DL(Op);
1546   SDValue Mask, VL;
1547   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1548 
1549   MVT XLenVT = Subtarget.getXLenVT();
1550   unsigned NumElts = Op.getNumOperands();
1551 
1552   if (VT.getVectorElementType() == MVT::i1) {
1553     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1554       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1555       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1556     }
1557 
1558     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1559       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1560       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1561     }
1562 
1563     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1564     // scalar integer chunks whose bit-width depends on the number of mask
1565     // bits and XLEN.
1566     // First, determine the most appropriate scalar integer type to use. This
1567     // is at most XLenVT, but may be shrunk to a smaller vector element type
1568     // according to the size of the final vector - use i8 chunks rather than
1569     // XLenVT if we're producing a v8i1. This results in more consistent
1570     // codegen across RV32 and RV64.
1571     unsigned NumViaIntegerBits =
1572         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1573     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1574       // If we have to use more than one INSERT_VECTOR_ELT then this
1575       // optimization is likely to increase code size; avoid peforming it in
1576       // such a case. We can use a load from a constant pool in this case.
1577       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1578         return SDValue();
1579       // Now we can create our integer vector type. Note that it may be larger
1580       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1581       MVT IntegerViaVecVT =
1582           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1583                            divideCeil(NumElts, NumViaIntegerBits));
1584 
1585       uint64_t Bits = 0;
1586       unsigned BitPos = 0, IntegerEltIdx = 0;
1587       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1588 
1589       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1590         // Once we accumulate enough bits to fill our scalar type, insert into
1591         // our vector and clear our accumulated data.
1592         if (I != 0 && I % NumViaIntegerBits == 0) {
1593           if (NumViaIntegerBits <= 32)
1594             Bits = SignExtend64(Bits, 32);
1595           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1596           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1597                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1598           Bits = 0;
1599           BitPos = 0;
1600           IntegerEltIdx++;
1601         }
1602         SDValue V = Op.getOperand(I);
1603         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1604         Bits |= ((uint64_t)BitValue << BitPos);
1605       }
1606 
1607       // Insert the (remaining) scalar value into position in our integer
1608       // vector type.
1609       if (NumViaIntegerBits <= 32)
1610         Bits = SignExtend64(Bits, 32);
1611       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1612       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1613                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1614 
1615       if (NumElts < NumViaIntegerBits) {
1616         // If we're producing a smaller vector than our minimum legal integer
1617         // type, bitcast to the equivalent (known-legal) mask type, and extract
1618         // our final mask.
1619         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1620         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1621         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1622                           DAG.getConstant(0, DL, XLenVT));
1623       } else {
1624         // Else we must have produced an integer type with the same size as the
1625         // mask type; bitcast for the final result.
1626         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1627         Vec = DAG.getBitcast(VT, Vec);
1628       }
1629 
1630       return Vec;
1631     }
1632 
1633     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1634     // vector type, we have a legal equivalently-sized i8 type, so we can use
1635     // that.
1636     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1637     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1638 
1639     SDValue WideVec;
1640     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1641       // For a splat, perform a scalar truncate before creating the wider
1642       // vector.
1643       assert(Splat.getValueType() == XLenVT &&
1644              "Unexpected type for i1 splat value");
1645       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1646                           DAG.getConstant(1, DL, XLenVT));
1647       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1648     } else {
1649       SmallVector<SDValue, 8> Ops(Op->op_values());
1650       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1651       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1652       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1653     }
1654 
1655     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1656   }
1657 
1658   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1659     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1660                                         : RISCVISD::VMV_V_X_VL;
1661     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1662     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1663   }
1664 
1665   // Try and match index sequences, which we can lower to the vid instruction
1666   // with optional modifications. An all-undef vector is matched by
1667   // getSplatValue, above.
1668   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1669     int64_t StepNumerator = SimpleVID->StepNumerator;
1670     unsigned StepDenominator = SimpleVID->StepDenominator;
1671     int64_t Addend = SimpleVID->Addend;
1672     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1673     // threshold since it's the immediate value many RVV instructions accept.
1674     if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) &&
1675         isInt<5>(Addend)) {
1676       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1677       // Convert right out of the scalable type so we can use standard ISD
1678       // nodes for the rest of the computation. If we used scalable types with
1679       // these, we'd lose the fixed-length vector info and generate worse
1680       // vsetvli code.
1681       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1682       assert(StepNumerator != 0 && "Invalid step");
1683       bool Negate = false;
1684       if (StepNumerator != 1) {
1685         int64_t SplatStepVal = StepNumerator;
1686         unsigned Opcode = ISD::MUL;
1687         if (isPowerOf2_64(std::abs(StepNumerator))) {
1688           Negate = StepNumerator < 0;
1689           Opcode = ISD::SHL;
1690           SplatStepVal = Log2_64(std::abs(StepNumerator));
1691         }
1692         SDValue SplatStep = DAG.getSplatVector(
1693             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1694         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1695       }
1696       if (StepDenominator != 1) {
1697         SDValue SplatStep = DAG.getSplatVector(
1698             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
1699         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
1700       }
1701       if (Addend != 0 || Negate) {
1702         SDValue SplatAddend =
1703             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1704         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1705       }
1706       return VID;
1707     }
1708   }
1709 
1710   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1711   // when re-interpreted as a vector with a larger element type. For example,
1712   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1713   // could be instead splat as
1714   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1715   // TODO: This optimization could also work on non-constant splats, but it
1716   // would require bit-manipulation instructions to construct the splat value.
1717   SmallVector<SDValue> Sequence;
1718   unsigned EltBitSize = VT.getScalarSizeInBits();
1719   const auto *BV = cast<BuildVectorSDNode>(Op);
1720   if (VT.isInteger() && EltBitSize < 64 &&
1721       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1722       BV->getRepeatedSequence(Sequence) &&
1723       (Sequence.size() * EltBitSize) <= 64) {
1724     unsigned SeqLen = Sequence.size();
1725     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1726     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1727     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1728             ViaIntVT == MVT::i64) &&
1729            "Unexpected sequence type");
1730 
1731     unsigned EltIdx = 0;
1732     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1733     uint64_t SplatValue = 0;
1734     // Construct the amalgamated value which can be splatted as this larger
1735     // vector type.
1736     for (const auto &SeqV : Sequence) {
1737       if (!SeqV.isUndef())
1738         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1739                        << (EltIdx * EltBitSize));
1740       EltIdx++;
1741     }
1742 
1743     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1744     // achieve better constant materializion.
1745     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1746       SplatValue = SignExtend64(SplatValue, 32);
1747 
1748     // Since we can't introduce illegal i64 types at this stage, we can only
1749     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1750     // way we can use RVV instructions to splat.
1751     assert((ViaIntVT.bitsLE(XLenVT) ||
1752             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1753            "Unexpected bitcast sequence");
1754     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1755       SDValue ViaVL =
1756           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1757       MVT ViaContainerVT =
1758           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1759       SDValue Splat =
1760           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1761                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1762       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1763       return DAG.getBitcast(VT, Splat);
1764     }
1765   }
1766 
1767   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1768   // which constitute a large proportion of the elements. In such cases we can
1769   // splat a vector with the dominant element and make up the shortfall with
1770   // INSERT_VECTOR_ELTs.
1771   // Note that this includes vectors of 2 elements by association. The
1772   // upper-most element is the "dominant" one, allowing us to use a splat to
1773   // "insert" the upper element, and an insert of the lower element at position
1774   // 0, which improves codegen.
1775   SDValue DominantValue;
1776   unsigned MostCommonCount = 0;
1777   DenseMap<SDValue, unsigned> ValueCounts;
1778   unsigned NumUndefElts =
1779       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1780 
1781   // Track the number of scalar loads we know we'd be inserting, estimated as
1782   // any non-zero floating-point constant. Other kinds of element are either
1783   // already in registers or are materialized on demand. The threshold at which
1784   // a vector load is more desirable than several scalar materializion and
1785   // vector-insertion instructions is not known.
1786   unsigned NumScalarLoads = 0;
1787 
1788   for (SDValue V : Op->op_values()) {
1789     if (V.isUndef())
1790       continue;
1791 
1792     ValueCounts.insert(std::make_pair(V, 0));
1793     unsigned &Count = ValueCounts[V];
1794 
1795     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
1796       NumScalarLoads += !CFP->isExactlyValue(+0.0);
1797 
1798     // Is this value dominant? In case of a tie, prefer the highest element as
1799     // it's cheaper to insert near the beginning of a vector than it is at the
1800     // end.
1801     if (++Count >= MostCommonCount) {
1802       DominantValue = V;
1803       MostCommonCount = Count;
1804     }
1805   }
1806 
1807   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1808   unsigned NumDefElts = NumElts - NumUndefElts;
1809   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1810 
1811   // Don't perform this optimization when optimizing for size, since
1812   // materializing elements and inserting them tends to cause code bloat.
1813   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
1814       ((MostCommonCount > DominantValueCountThreshold) ||
1815        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1816     // Start by splatting the most common element.
1817     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1818 
1819     DenseSet<SDValue> Processed{DominantValue};
1820     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1821     for (const auto &OpIdx : enumerate(Op->ops())) {
1822       const SDValue &V = OpIdx.value();
1823       if (V.isUndef() || !Processed.insert(V).second)
1824         continue;
1825       if (ValueCounts[V] == 1) {
1826         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1827                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1828       } else {
1829         // Blend in all instances of this value using a VSELECT, using a
1830         // mask where each bit signals whether that element is the one
1831         // we're after.
1832         SmallVector<SDValue> Ops;
1833         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1834           return DAG.getConstant(V == V1, DL, XLenVT);
1835         });
1836         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1837                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1838                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1839       }
1840     }
1841 
1842     return Vec;
1843   }
1844 
1845   return SDValue();
1846 }
1847 
1848 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1849                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1850   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1851     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1852     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1853     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1854     // node in order to try and match RVV vector/scalar instructions.
1855     if ((LoC >> 31) == HiC)
1856       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1857   }
1858 
1859   // Fall back to a stack store and stride x0 vector load.
1860   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1861 }
1862 
1863 // Called by type legalization to handle splat of i64 on RV32.
1864 // FIXME: We can optimize this when the type has sign or zero bits in one
1865 // of the halves.
1866 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1867                                    SDValue VL, SelectionDAG &DAG) {
1868   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1869   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1870                            DAG.getConstant(0, DL, MVT::i32));
1871   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1872                            DAG.getConstant(1, DL, MVT::i32));
1873   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1874 }
1875 
1876 // This function lowers a splat of a scalar operand Splat with the vector
1877 // length VL. It ensures the final sequence is type legal, which is useful when
1878 // lowering a splat after type legalization.
1879 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1880                                 SelectionDAG &DAG,
1881                                 const RISCVSubtarget &Subtarget) {
1882   if (VT.isFloatingPoint())
1883     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1884 
1885   MVT XLenVT = Subtarget.getXLenVT();
1886 
1887   // Simplest case is that the operand needs to be promoted to XLenVT.
1888   if (Scalar.getValueType().bitsLE(XLenVT)) {
1889     // If the operand is a constant, sign extend to increase our chances
1890     // of being able to use a .vi instruction. ANY_EXTEND would become a
1891     // a zero extend and the simm5 check in isel would fail.
1892     // FIXME: Should we ignore the upper bits in isel instead?
1893     unsigned ExtOpc =
1894         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1895     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1896     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1897   }
1898 
1899   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1900          "Unexpected scalar for splat lowering!");
1901 
1902   // Otherwise use the more complicated splatting algorithm.
1903   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1904 }
1905 
1906 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1907                                    const RISCVSubtarget &Subtarget) {
1908   SDValue V1 = Op.getOperand(0);
1909   SDValue V2 = Op.getOperand(1);
1910   SDLoc DL(Op);
1911   MVT XLenVT = Subtarget.getXLenVT();
1912   MVT VT = Op.getSimpleValueType();
1913   unsigned NumElts = VT.getVectorNumElements();
1914   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1915 
1916   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1917 
1918   SDValue TrueMask, VL;
1919   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1920 
1921   if (SVN->isSplat()) {
1922     const int Lane = SVN->getSplatIndex();
1923     if (Lane >= 0) {
1924       MVT SVT = VT.getVectorElementType();
1925 
1926       // Turn splatted vector load into a strided load with an X0 stride.
1927       SDValue V = V1;
1928       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1929       // with undef.
1930       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1931       int Offset = Lane;
1932       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1933         int OpElements =
1934             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1935         V = V.getOperand(Offset / OpElements);
1936         Offset %= OpElements;
1937       }
1938 
1939       // We need to ensure the load isn't atomic or volatile.
1940       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1941         auto *Ld = cast<LoadSDNode>(V);
1942         Offset *= SVT.getStoreSize();
1943         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1944                                                    TypeSize::Fixed(Offset), DL);
1945 
1946         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1947         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1948           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1949           SDValue IntID =
1950               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1951           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1952                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1953           SDValue NewLoad = DAG.getMemIntrinsicNode(
1954               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1955               DAG.getMachineFunction().getMachineMemOperand(
1956                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1957           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1958           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1959         }
1960 
1961         // Otherwise use a scalar load and splat. This will give the best
1962         // opportunity to fold a splat into the operation. ISel can turn it into
1963         // the x0 strided load if we aren't able to fold away the select.
1964         if (SVT.isFloatingPoint())
1965           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1966                           Ld->getPointerInfo().getWithOffset(Offset),
1967                           Ld->getOriginalAlign(),
1968                           Ld->getMemOperand()->getFlags());
1969         else
1970           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1971                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1972                              Ld->getOriginalAlign(),
1973                              Ld->getMemOperand()->getFlags());
1974         DAG.makeEquivalentMemoryOrdering(Ld, V);
1975 
1976         unsigned Opc =
1977             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1978         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1979         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1980       }
1981 
1982       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1983       assert(Lane < (int)NumElts && "Unexpected lane!");
1984       SDValue Gather =
1985           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1986                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1987       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1988     }
1989   }
1990 
1991   // Detect shuffles which can be re-expressed as vector selects; these are
1992   // shuffles in which each element in the destination is taken from an element
1993   // at the corresponding index in either source vectors.
1994   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1995     int MaskIndex = MaskIdx.value();
1996     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1997   });
1998 
1999   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2000 
2001   SmallVector<SDValue> MaskVals;
2002   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2003   // merged with a second vrgather.
2004   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2005 
2006   // By default we preserve the original operand order, and use a mask to
2007   // select LHS as true and RHS as false. However, since RVV vector selects may
2008   // feature splats but only on the LHS, we may choose to invert our mask and
2009   // instead select between RHS and LHS.
2010   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2011   bool InvertMask = IsSelect == SwapOps;
2012 
2013   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2014   // half.
2015   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2016 
2017   // Now construct the mask that will be used by the vselect or blended
2018   // vrgather operation. For vrgathers, construct the appropriate indices into
2019   // each vector.
2020   for (int MaskIndex : SVN->getMask()) {
2021     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2022     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2023     if (!IsSelect) {
2024       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2025       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2026                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2027                                      : DAG.getUNDEF(XLenVT));
2028       GatherIndicesRHS.push_back(
2029           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2030                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2031       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2032         ++LHSIndexCounts[MaskIndex];
2033       if (!IsLHSOrUndefIndex)
2034         ++RHSIndexCounts[MaskIndex - NumElts];
2035     }
2036   }
2037 
2038   if (SwapOps) {
2039     std::swap(V1, V2);
2040     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2041   }
2042 
2043   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2044   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2045   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2046 
2047   if (IsSelect)
2048     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2049 
2050   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2051     // On such a large vector we're unable to use i8 as the index type.
2052     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2053     // may involve vector splitting if we're already at LMUL=8, or our
2054     // user-supplied maximum fixed-length LMUL.
2055     return SDValue();
2056   }
2057 
2058   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2059   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2060   MVT IndexVT = VT.changeTypeToInteger();
2061   // Since we can't introduce illegal index types at this stage, use i16 and
2062   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2063   // than XLenVT.
2064   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2065     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2066     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2067   }
2068 
2069   MVT IndexContainerVT =
2070       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2071 
2072   SDValue Gather;
2073   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2074   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2075   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2076     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2077   } else {
2078     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2079     // If only one index is used, we can use a "splat" vrgather.
2080     // TODO: We can splat the most-common index and fix-up any stragglers, if
2081     // that's beneficial.
2082     if (LHSIndexCounts.size() == 1) {
2083       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2084       Gather =
2085           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2086                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2087     } else {
2088       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2089       LHSIndices =
2090           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2091 
2092       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2093                            TrueMask, VL);
2094     }
2095   }
2096 
2097   // If a second vector operand is used by this shuffle, blend it in with an
2098   // additional vrgather.
2099   if (!V2.isUndef()) {
2100     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2101     // If only one index is used, we can use a "splat" vrgather.
2102     // TODO: We can splat the most-common index and fix-up any stragglers, if
2103     // that's beneficial.
2104     if (RHSIndexCounts.size() == 1) {
2105       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2106       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2107                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2108     } else {
2109       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2110       RHSIndices =
2111           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2112       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2113                        VL);
2114     }
2115 
2116     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2117     SelectMask =
2118         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2119 
2120     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2121                          Gather, VL);
2122   }
2123 
2124   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2125 }
2126 
2127 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2128                                      SDLoc DL, SelectionDAG &DAG,
2129                                      const RISCVSubtarget &Subtarget) {
2130   if (VT.isScalableVector())
2131     return DAG.getFPExtendOrRound(Op, DL, VT);
2132   assert(VT.isFixedLengthVector() &&
2133          "Unexpected value type for RVV FP extend/round lowering");
2134   SDValue Mask, VL;
2135   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2136   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2137                         ? RISCVISD::FP_EXTEND_VL
2138                         : RISCVISD::FP_ROUND_VL;
2139   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2140 }
2141 
2142 // While RVV has alignment restrictions, we should always be able to load as a
2143 // legal equivalently-sized byte-typed vector instead. This method is
2144 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2145 // the load is already correctly-aligned, it returns SDValue().
2146 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2147                                                     SelectionDAG &DAG) const {
2148   auto *Load = cast<LoadSDNode>(Op);
2149   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2150 
2151   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2152                                      Load->getMemoryVT(),
2153                                      *Load->getMemOperand()))
2154     return SDValue();
2155 
2156   SDLoc DL(Op);
2157   MVT VT = Op.getSimpleValueType();
2158   unsigned EltSizeBits = VT.getScalarSizeInBits();
2159   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2160          "Unexpected unaligned RVV load type");
2161   MVT NewVT =
2162       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2163   assert(NewVT.isValid() &&
2164          "Expecting equally-sized RVV vector types to be legal");
2165   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2166                           Load->getPointerInfo(), Load->getOriginalAlign(),
2167                           Load->getMemOperand()->getFlags());
2168   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2169 }
2170 
2171 // While RVV has alignment restrictions, we should always be able to store as a
2172 // legal equivalently-sized byte-typed vector instead. This method is
2173 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2174 // returns SDValue() if the store is already correctly aligned.
2175 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2176                                                      SelectionDAG &DAG) const {
2177   auto *Store = cast<StoreSDNode>(Op);
2178   assert(Store && Store->getValue().getValueType().isVector() &&
2179          "Expected vector store");
2180 
2181   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2182                                      Store->getMemoryVT(),
2183                                      *Store->getMemOperand()))
2184     return SDValue();
2185 
2186   SDLoc DL(Op);
2187   SDValue StoredVal = Store->getValue();
2188   MVT VT = StoredVal.getSimpleValueType();
2189   unsigned EltSizeBits = VT.getScalarSizeInBits();
2190   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2191          "Unexpected unaligned RVV store type");
2192   MVT NewVT =
2193       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2194   assert(NewVT.isValid() &&
2195          "Expecting equally-sized RVV vector types to be legal");
2196   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2197   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2198                       Store->getPointerInfo(), Store->getOriginalAlign(),
2199                       Store->getMemOperand()->getFlags());
2200 }
2201 
2202 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2203                                             SelectionDAG &DAG) const {
2204   switch (Op.getOpcode()) {
2205   default:
2206     report_fatal_error("unimplemented operand");
2207   case ISD::GlobalAddress:
2208     return lowerGlobalAddress(Op, DAG);
2209   case ISD::BlockAddress:
2210     return lowerBlockAddress(Op, DAG);
2211   case ISD::ConstantPool:
2212     return lowerConstantPool(Op, DAG);
2213   case ISD::JumpTable:
2214     return lowerJumpTable(Op, DAG);
2215   case ISD::GlobalTLSAddress:
2216     return lowerGlobalTLSAddress(Op, DAG);
2217   case ISD::SELECT:
2218     return lowerSELECT(Op, DAG);
2219   case ISD::BRCOND:
2220     return lowerBRCOND(Op, DAG);
2221   case ISD::VASTART:
2222     return lowerVASTART(Op, DAG);
2223   case ISD::FRAMEADDR:
2224     return lowerFRAMEADDR(Op, DAG);
2225   case ISD::RETURNADDR:
2226     return lowerRETURNADDR(Op, DAG);
2227   case ISD::SHL_PARTS:
2228     return lowerShiftLeftParts(Op, DAG);
2229   case ISD::SRA_PARTS:
2230     return lowerShiftRightParts(Op, DAG, true);
2231   case ISD::SRL_PARTS:
2232     return lowerShiftRightParts(Op, DAG, false);
2233   case ISD::BITCAST: {
2234     SDLoc DL(Op);
2235     EVT VT = Op.getValueType();
2236     SDValue Op0 = Op.getOperand(0);
2237     EVT Op0VT = Op0.getValueType();
2238     MVT XLenVT = Subtarget.getXLenVT();
2239     if (VT.isFixedLengthVector()) {
2240       // We can handle fixed length vector bitcasts with a simple replacement
2241       // in isel.
2242       if (Op0VT.isFixedLengthVector())
2243         return Op;
2244       // When bitcasting from scalar to fixed-length vector, insert the scalar
2245       // into a one-element vector of the result type, and perform a vector
2246       // bitcast.
2247       if (!Op0VT.isVector()) {
2248         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2249         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2250                                               DAG.getUNDEF(BVT), Op0,
2251                                               DAG.getConstant(0, DL, XLenVT)));
2252       }
2253       return SDValue();
2254     }
2255     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2256     // thus: bitcast the vector to a one-element vector type whose element type
2257     // is the same as the result type, and extract the first element.
2258     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2259       LLVMContext &Context = *DAG.getContext();
2260       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2261       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2262                          DAG.getConstant(0, DL, XLenVT));
2263     }
2264     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2265       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2266       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2267       return FPConv;
2268     }
2269     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2270         Subtarget.hasStdExtF()) {
2271       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2272       SDValue FPConv =
2273           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2274       return FPConv;
2275     }
2276     return SDValue();
2277   }
2278   case ISD::INTRINSIC_WO_CHAIN:
2279     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2280   case ISD::INTRINSIC_W_CHAIN:
2281     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2282   case ISD::BSWAP:
2283   case ISD::BITREVERSE: {
2284     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2285     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2286     MVT VT = Op.getSimpleValueType();
2287     SDLoc DL(Op);
2288     // Start with the maximum immediate value which is the bitwidth - 1.
2289     unsigned Imm = VT.getSizeInBits() - 1;
2290     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2291     if (Op.getOpcode() == ISD::BSWAP)
2292       Imm &= ~0x7U;
2293     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2294                        DAG.getConstant(Imm, DL, VT));
2295   }
2296   case ISD::FSHL:
2297   case ISD::FSHR: {
2298     MVT VT = Op.getSimpleValueType();
2299     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2300     SDLoc DL(Op);
2301     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2302       return Op;
2303     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2304     // use log(XLen) bits. Mask the shift amount accordingly.
2305     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2306     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2307                                 DAG.getConstant(ShAmtWidth, DL, VT));
2308     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2309     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2310   }
2311   case ISD::TRUNCATE: {
2312     SDLoc DL(Op);
2313     MVT VT = Op.getSimpleValueType();
2314     // Only custom-lower vector truncates
2315     if (!VT.isVector())
2316       return Op;
2317 
2318     // Truncates to mask types are handled differently
2319     if (VT.getVectorElementType() == MVT::i1)
2320       return lowerVectorMaskTrunc(Op, DAG);
2321 
2322     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2323     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2324     // truncate by one power of two at a time.
2325     MVT DstEltVT = VT.getVectorElementType();
2326 
2327     SDValue Src = Op.getOperand(0);
2328     MVT SrcVT = Src.getSimpleValueType();
2329     MVT SrcEltVT = SrcVT.getVectorElementType();
2330 
2331     assert(DstEltVT.bitsLT(SrcEltVT) &&
2332            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2333            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2334            "Unexpected vector truncate lowering");
2335 
2336     MVT ContainerVT = SrcVT;
2337     if (SrcVT.isFixedLengthVector()) {
2338       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2339       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2340     }
2341 
2342     SDValue Result = Src;
2343     SDValue Mask, VL;
2344     std::tie(Mask, VL) =
2345         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2346     LLVMContext &Context = *DAG.getContext();
2347     const ElementCount Count = ContainerVT.getVectorElementCount();
2348     do {
2349       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2350       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2351       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2352                            Mask, VL);
2353     } while (SrcEltVT != DstEltVT);
2354 
2355     if (SrcVT.isFixedLengthVector())
2356       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2357 
2358     return Result;
2359   }
2360   case ISD::ANY_EXTEND:
2361   case ISD::ZERO_EXTEND:
2362     if (Op.getOperand(0).getValueType().isVector() &&
2363         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2364       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2365     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2366   case ISD::SIGN_EXTEND:
2367     if (Op.getOperand(0).getValueType().isVector() &&
2368         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2369       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2370     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2371   case ISD::SPLAT_VECTOR_PARTS:
2372     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2373   case ISD::INSERT_VECTOR_ELT:
2374     return lowerINSERT_VECTOR_ELT(Op, DAG);
2375   case ISD::EXTRACT_VECTOR_ELT:
2376     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2377   case ISD::VSCALE: {
2378     MVT VT = Op.getSimpleValueType();
2379     SDLoc DL(Op);
2380     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2381     // We define our scalable vector types for lmul=1 to use a 64 bit known
2382     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2383     // vscale as VLENB / 8.
2384     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2385     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2386       // We assume VLENB is a multiple of 8. We manually choose the best shift
2387       // here because SimplifyDemandedBits isn't always able to simplify it.
2388       uint64_t Val = Op.getConstantOperandVal(0);
2389       if (isPowerOf2_64(Val)) {
2390         uint64_t Log2 = Log2_64(Val);
2391         if (Log2 < 3)
2392           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2393                              DAG.getConstant(3 - Log2, DL, VT));
2394         if (Log2 > 3)
2395           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2396                              DAG.getConstant(Log2 - 3, DL, VT));
2397         return VLENB;
2398       }
2399       // If the multiplier is a multiple of 8, scale it down to avoid needing
2400       // to shift the VLENB value.
2401       if ((Val % 8) == 0)
2402         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2403                            DAG.getConstant(Val / 8, DL, VT));
2404     }
2405 
2406     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2407                                  DAG.getConstant(3, DL, VT));
2408     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2409   }
2410   case ISD::FP_EXTEND: {
2411     // RVV can only do fp_extend to types double the size as the source. We
2412     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2413     // via f32.
2414     SDLoc DL(Op);
2415     MVT VT = Op.getSimpleValueType();
2416     SDValue Src = Op.getOperand(0);
2417     MVT SrcVT = Src.getSimpleValueType();
2418 
2419     // Prepare any fixed-length vector operands.
2420     MVT ContainerVT = VT;
2421     if (SrcVT.isFixedLengthVector()) {
2422       ContainerVT = getContainerForFixedLengthVector(VT);
2423       MVT SrcContainerVT =
2424           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2425       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2426     }
2427 
2428     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2429         SrcVT.getVectorElementType() != MVT::f16) {
2430       // For scalable vectors, we only need to close the gap between
2431       // vXf16->vXf64.
2432       if (!VT.isFixedLengthVector())
2433         return Op;
2434       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2435       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2436       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2437     }
2438 
2439     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2440     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2441     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2442         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2443 
2444     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2445                                            DL, DAG, Subtarget);
2446     if (VT.isFixedLengthVector())
2447       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2448     return Extend;
2449   }
2450   case ISD::FP_ROUND: {
2451     // RVV can only do fp_round to types half the size as the source. We
2452     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2453     // conversion instruction.
2454     SDLoc DL(Op);
2455     MVT VT = Op.getSimpleValueType();
2456     SDValue Src = Op.getOperand(0);
2457     MVT SrcVT = Src.getSimpleValueType();
2458 
2459     // Prepare any fixed-length vector operands.
2460     MVT ContainerVT = VT;
2461     if (VT.isFixedLengthVector()) {
2462       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2463       ContainerVT =
2464           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2465       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2466     }
2467 
2468     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2469         SrcVT.getVectorElementType() != MVT::f64) {
2470       // For scalable vectors, we only need to close the gap between
2471       // vXf64<->vXf16.
2472       if (!VT.isFixedLengthVector())
2473         return Op;
2474       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2475       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2476       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2477     }
2478 
2479     SDValue Mask, VL;
2480     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2481 
2482     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2483     SDValue IntermediateRound =
2484         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2485     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2486                                           DL, DAG, Subtarget);
2487 
2488     if (VT.isFixedLengthVector())
2489       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2490     return Round;
2491   }
2492   case ISD::FP_TO_SINT:
2493   case ISD::FP_TO_UINT:
2494   case ISD::SINT_TO_FP:
2495   case ISD::UINT_TO_FP: {
2496     // RVV can only do fp<->int conversions to types half/double the size as
2497     // the source. We custom-lower any conversions that do two hops into
2498     // sequences.
2499     MVT VT = Op.getSimpleValueType();
2500     if (!VT.isVector())
2501       return Op;
2502     SDLoc DL(Op);
2503     SDValue Src = Op.getOperand(0);
2504     MVT EltVT = VT.getVectorElementType();
2505     MVT SrcVT = Src.getSimpleValueType();
2506     MVT SrcEltVT = SrcVT.getVectorElementType();
2507     unsigned EltSize = EltVT.getSizeInBits();
2508     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2509     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2510            "Unexpected vector element types");
2511 
2512     bool IsInt2FP = SrcEltVT.isInteger();
2513     // Widening conversions
2514     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2515       if (IsInt2FP) {
2516         // Do a regular integer sign/zero extension then convert to float.
2517         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2518                                       VT.getVectorElementCount());
2519         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2520                                  ? ISD::ZERO_EXTEND
2521                                  : ISD::SIGN_EXTEND;
2522         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2523         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2524       }
2525       // FP2Int
2526       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2527       // Do one doubling fp_extend then complete the operation by converting
2528       // to int.
2529       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2530       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2531       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2532     }
2533 
2534     // Narrowing conversions
2535     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2536       if (IsInt2FP) {
2537         // One narrowing int_to_fp, then an fp_round.
2538         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2539         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2540         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2541         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2542       }
2543       // FP2Int
2544       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2545       // representable by the integer, the result is poison.
2546       MVT IVecVT =
2547           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2548                            VT.getVectorElementCount());
2549       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2550       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2551     }
2552 
2553     // Scalable vectors can exit here. Patterns will handle equally-sized
2554     // conversions halving/doubling ones.
2555     if (!VT.isFixedLengthVector())
2556       return Op;
2557 
2558     // For fixed-length vectors we lower to a custom "VL" node.
2559     unsigned RVVOpc = 0;
2560     switch (Op.getOpcode()) {
2561     default:
2562       llvm_unreachable("Impossible opcode");
2563     case ISD::FP_TO_SINT:
2564       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2565       break;
2566     case ISD::FP_TO_UINT:
2567       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2568       break;
2569     case ISD::SINT_TO_FP:
2570       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2571       break;
2572     case ISD::UINT_TO_FP:
2573       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2574       break;
2575     }
2576 
2577     MVT ContainerVT, SrcContainerVT;
2578     // Derive the reference container type from the larger vector type.
2579     if (SrcEltSize > EltSize) {
2580       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2581       ContainerVT =
2582           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2583     } else {
2584       ContainerVT = getContainerForFixedLengthVector(VT);
2585       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2586     }
2587 
2588     SDValue Mask, VL;
2589     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2590 
2591     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2592     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2593     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2594   }
2595   case ISD::FP_TO_SINT_SAT:
2596   case ISD::FP_TO_UINT_SAT:
2597     return lowerFP_TO_INT_SAT(Op, DAG);
2598   case ISD::VECREDUCE_ADD:
2599   case ISD::VECREDUCE_UMAX:
2600   case ISD::VECREDUCE_SMAX:
2601   case ISD::VECREDUCE_UMIN:
2602   case ISD::VECREDUCE_SMIN:
2603     return lowerVECREDUCE(Op, DAG);
2604   case ISD::VECREDUCE_AND:
2605   case ISD::VECREDUCE_OR:
2606   case ISD::VECREDUCE_XOR:
2607     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2608       return lowerVectorMaskVECREDUCE(Op, DAG);
2609     return lowerVECREDUCE(Op, DAG);
2610   case ISD::VECREDUCE_FADD:
2611   case ISD::VECREDUCE_SEQ_FADD:
2612   case ISD::VECREDUCE_FMIN:
2613   case ISD::VECREDUCE_FMAX:
2614     return lowerFPVECREDUCE(Op, DAG);
2615   case ISD::INSERT_SUBVECTOR:
2616     return lowerINSERT_SUBVECTOR(Op, DAG);
2617   case ISD::EXTRACT_SUBVECTOR:
2618     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2619   case ISD::STEP_VECTOR:
2620     return lowerSTEP_VECTOR(Op, DAG);
2621   case ISD::VECTOR_REVERSE:
2622     return lowerVECTOR_REVERSE(Op, DAG);
2623   case ISD::BUILD_VECTOR:
2624     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2625   case ISD::SPLAT_VECTOR:
2626     if (Op.getValueType().getVectorElementType() == MVT::i1)
2627       return lowerVectorMaskSplat(Op, DAG);
2628     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2629   case ISD::VECTOR_SHUFFLE:
2630     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2631   case ISD::CONCAT_VECTORS: {
2632     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2633     // better than going through the stack, as the default expansion does.
2634     SDLoc DL(Op);
2635     MVT VT = Op.getSimpleValueType();
2636     unsigned NumOpElts =
2637         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2638     SDValue Vec = DAG.getUNDEF(VT);
2639     for (const auto &OpIdx : enumerate(Op->ops()))
2640       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2641                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2642     return Vec;
2643   }
2644   case ISD::LOAD:
2645     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2646       return V;
2647     if (Op.getValueType().isFixedLengthVector())
2648       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2649     return Op;
2650   case ISD::STORE:
2651     if (auto V = expandUnalignedRVVStore(Op, DAG))
2652       return V;
2653     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2654       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2655     return Op;
2656   case ISD::MLOAD:
2657   case ISD::VP_LOAD:
2658     return lowerMaskedLoad(Op, DAG);
2659   case ISD::MSTORE:
2660   case ISD::VP_STORE:
2661     return lowerMaskedStore(Op, DAG);
2662   case ISD::SETCC:
2663     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2664   case ISD::ADD:
2665     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2666   case ISD::SUB:
2667     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2668   case ISD::MUL:
2669     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2670   case ISD::MULHS:
2671     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2672   case ISD::MULHU:
2673     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2674   case ISD::AND:
2675     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2676                                               RISCVISD::AND_VL);
2677   case ISD::OR:
2678     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2679                                               RISCVISD::OR_VL);
2680   case ISD::XOR:
2681     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2682                                               RISCVISD::XOR_VL);
2683   case ISD::SDIV:
2684     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2685   case ISD::SREM:
2686     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2687   case ISD::UDIV:
2688     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2689   case ISD::UREM:
2690     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2691   case ISD::SHL:
2692   case ISD::SRA:
2693   case ISD::SRL:
2694     if (Op.getSimpleValueType().isFixedLengthVector())
2695       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2696     // This can be called for an i32 shift amount that needs to be promoted.
2697     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2698            "Unexpected custom legalisation");
2699     return SDValue();
2700   case ISD::SADDSAT:
2701     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
2702   case ISD::UADDSAT:
2703     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
2704   case ISD::SSUBSAT:
2705     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
2706   case ISD::USUBSAT:
2707     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
2708   case ISD::FADD:
2709     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2710   case ISD::FSUB:
2711     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2712   case ISD::FMUL:
2713     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2714   case ISD::FDIV:
2715     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2716   case ISD::FNEG:
2717     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2718   case ISD::FABS:
2719     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2720   case ISD::FSQRT:
2721     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2722   case ISD::FMA:
2723     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2724   case ISD::SMIN:
2725     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2726   case ISD::SMAX:
2727     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2728   case ISD::UMIN:
2729     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2730   case ISD::UMAX:
2731     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2732   case ISD::FMINNUM:
2733     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2734   case ISD::FMAXNUM:
2735     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2736   case ISD::ABS:
2737     return lowerABS(Op, DAG);
2738   case ISD::VSELECT:
2739     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2740   case ISD::FCOPYSIGN:
2741     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2742   case ISD::MGATHER:
2743   case ISD::VP_GATHER:
2744     return lowerMaskedGather(Op, DAG);
2745   case ISD::MSCATTER:
2746   case ISD::VP_SCATTER:
2747     return lowerMaskedScatter(Op, DAG);
2748   case ISD::FLT_ROUNDS_:
2749     return lowerGET_ROUNDING(Op, DAG);
2750   case ISD::SET_ROUNDING:
2751     return lowerSET_ROUNDING(Op, DAG);
2752   case ISD::VP_ADD:
2753     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2754   case ISD::VP_SUB:
2755     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2756   case ISD::VP_MUL:
2757     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2758   case ISD::VP_SDIV:
2759     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2760   case ISD::VP_UDIV:
2761     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2762   case ISD::VP_SREM:
2763     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2764   case ISD::VP_UREM:
2765     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2766   case ISD::VP_AND:
2767     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2768   case ISD::VP_OR:
2769     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2770   case ISD::VP_XOR:
2771     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2772   case ISD::VP_ASHR:
2773     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2774   case ISD::VP_LSHR:
2775     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2776   case ISD::VP_SHL:
2777     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2778   case ISD::VP_FADD:
2779     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2780   case ISD::VP_FSUB:
2781     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2782   case ISD::VP_FMUL:
2783     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2784   case ISD::VP_FDIV:
2785     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2786   }
2787 }
2788 
2789 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2790                              SelectionDAG &DAG, unsigned Flags) {
2791   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2792 }
2793 
2794 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2795                              SelectionDAG &DAG, unsigned Flags) {
2796   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2797                                    Flags);
2798 }
2799 
2800 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2801                              SelectionDAG &DAG, unsigned Flags) {
2802   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2803                                    N->getOffset(), Flags);
2804 }
2805 
2806 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2807                              SelectionDAG &DAG, unsigned Flags) {
2808   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2809 }
2810 
2811 template <class NodeTy>
2812 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2813                                      bool IsLocal) const {
2814   SDLoc DL(N);
2815   EVT Ty = getPointerTy(DAG.getDataLayout());
2816 
2817   if (isPositionIndependent()) {
2818     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2819     if (IsLocal)
2820       // Use PC-relative addressing to access the symbol. This generates the
2821       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2822       // %pcrel_lo(auipc)).
2823       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2824 
2825     // Use PC-relative addressing to access the GOT for this symbol, then load
2826     // the address from the GOT. This generates the pattern (PseudoLA sym),
2827     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2828     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2829   }
2830 
2831   switch (getTargetMachine().getCodeModel()) {
2832   default:
2833     report_fatal_error("Unsupported code model for lowering");
2834   case CodeModel::Small: {
2835     // Generate a sequence for accessing addresses within the first 2 GiB of
2836     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2837     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2838     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2839     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2840     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2841   }
2842   case CodeModel::Medium: {
2843     // Generate a sequence for accessing addresses within any 2GiB range within
2844     // the address space. This generates the pattern (PseudoLLA sym), which
2845     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2846     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2847     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2848   }
2849   }
2850 }
2851 
2852 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2853                                                 SelectionDAG &DAG) const {
2854   SDLoc DL(Op);
2855   EVT Ty = Op.getValueType();
2856   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2857   int64_t Offset = N->getOffset();
2858   MVT XLenVT = Subtarget.getXLenVT();
2859 
2860   const GlobalValue *GV = N->getGlobal();
2861   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2862   SDValue Addr = getAddr(N, DAG, IsLocal);
2863 
2864   // In order to maximise the opportunity for common subexpression elimination,
2865   // emit a separate ADD node for the global address offset instead of folding
2866   // it in the global address node. Later peephole optimisations may choose to
2867   // fold it back in when profitable.
2868   if (Offset != 0)
2869     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2870                        DAG.getConstant(Offset, DL, XLenVT));
2871   return Addr;
2872 }
2873 
2874 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2875                                                SelectionDAG &DAG) const {
2876   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2877 
2878   return getAddr(N, DAG);
2879 }
2880 
2881 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2882                                                SelectionDAG &DAG) const {
2883   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2884 
2885   return getAddr(N, DAG);
2886 }
2887 
2888 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2889                                             SelectionDAG &DAG) const {
2890   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2891 
2892   return getAddr(N, DAG);
2893 }
2894 
2895 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2896                                               SelectionDAG &DAG,
2897                                               bool UseGOT) const {
2898   SDLoc DL(N);
2899   EVT Ty = getPointerTy(DAG.getDataLayout());
2900   const GlobalValue *GV = N->getGlobal();
2901   MVT XLenVT = Subtarget.getXLenVT();
2902 
2903   if (UseGOT) {
2904     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2905     // load the address from the GOT and add the thread pointer. This generates
2906     // the pattern (PseudoLA_TLS_IE sym), which expands to
2907     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2908     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2909     SDValue Load =
2910         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2911 
2912     // Add the thread pointer.
2913     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2914     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2915   }
2916 
2917   // Generate a sequence for accessing the address relative to the thread
2918   // pointer, with the appropriate adjustment for the thread pointer offset.
2919   // This generates the pattern
2920   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2921   SDValue AddrHi =
2922       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2923   SDValue AddrAdd =
2924       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2925   SDValue AddrLo =
2926       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2927 
2928   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2929   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2930   SDValue MNAdd = SDValue(
2931       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2932       0);
2933   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2934 }
2935 
2936 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2937                                                SelectionDAG &DAG) const {
2938   SDLoc DL(N);
2939   EVT Ty = getPointerTy(DAG.getDataLayout());
2940   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2941   const GlobalValue *GV = N->getGlobal();
2942 
2943   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2944   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2945   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2946   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2947   SDValue Load =
2948       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2949 
2950   // Prepare argument list to generate call.
2951   ArgListTy Args;
2952   ArgListEntry Entry;
2953   Entry.Node = Load;
2954   Entry.Ty = CallTy;
2955   Args.push_back(Entry);
2956 
2957   // Setup call to __tls_get_addr.
2958   TargetLowering::CallLoweringInfo CLI(DAG);
2959   CLI.setDebugLoc(DL)
2960       .setChain(DAG.getEntryNode())
2961       .setLibCallee(CallingConv::C, CallTy,
2962                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2963                     std::move(Args));
2964 
2965   return LowerCallTo(CLI).first;
2966 }
2967 
2968 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2969                                                    SelectionDAG &DAG) const {
2970   SDLoc DL(Op);
2971   EVT Ty = Op.getValueType();
2972   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2973   int64_t Offset = N->getOffset();
2974   MVT XLenVT = Subtarget.getXLenVT();
2975 
2976   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2977 
2978   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2979       CallingConv::GHC)
2980     report_fatal_error("In GHC calling convention TLS is not supported");
2981 
2982   SDValue Addr;
2983   switch (Model) {
2984   case TLSModel::LocalExec:
2985     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2986     break;
2987   case TLSModel::InitialExec:
2988     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2989     break;
2990   case TLSModel::LocalDynamic:
2991   case TLSModel::GeneralDynamic:
2992     Addr = getDynamicTLSAddr(N, DAG);
2993     break;
2994   }
2995 
2996   // In order to maximise the opportunity for common subexpression elimination,
2997   // emit a separate ADD node for the global address offset instead of folding
2998   // it in the global address node. Later peephole optimisations may choose to
2999   // fold it back in when profitable.
3000   if (Offset != 0)
3001     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3002                        DAG.getConstant(Offset, DL, XLenVT));
3003   return Addr;
3004 }
3005 
3006 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3007   SDValue CondV = Op.getOperand(0);
3008   SDValue TrueV = Op.getOperand(1);
3009   SDValue FalseV = Op.getOperand(2);
3010   SDLoc DL(Op);
3011   MVT VT = Op.getSimpleValueType();
3012   MVT XLenVT = Subtarget.getXLenVT();
3013 
3014   // Lower vector SELECTs to VSELECTs by splatting the condition.
3015   if (VT.isVector()) {
3016     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3017     SDValue CondSplat = VT.isScalableVector()
3018                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3019                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3020     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3021   }
3022 
3023   // If the result type is XLenVT and CondV is the output of a SETCC node
3024   // which also operated on XLenVT inputs, then merge the SETCC node into the
3025   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3026   // compare+branch instructions. i.e.:
3027   // (select (setcc lhs, rhs, cc), truev, falsev)
3028   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3029   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3030       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3031     SDValue LHS = CondV.getOperand(0);
3032     SDValue RHS = CondV.getOperand(1);
3033     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3034     ISD::CondCode CCVal = CC->get();
3035 
3036     // Special case for a select of 2 constants that have a diffence of 1.
3037     // Normally this is done by DAGCombine, but if the select is introduced by
3038     // type legalization or op legalization, we miss it. Restricting to SETLT
3039     // case for now because that is what signed saturating add/sub need.
3040     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3041     // but we would probably want to swap the true/false values if the condition
3042     // is SETGE/SETLE to avoid an XORI.
3043     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3044         CCVal == ISD::SETLT) {
3045       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3046       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3047       if (TrueVal - 1 == FalseVal)
3048         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3049       if (TrueVal + 1 == FalseVal)
3050         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3051     }
3052 
3053     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3054 
3055     SDValue TargetCC = DAG.getCondCode(CCVal);
3056     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3057     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3058   }
3059 
3060   // Otherwise:
3061   // (select condv, truev, falsev)
3062   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3063   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3064   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3065 
3066   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3067 
3068   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3069 }
3070 
3071 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3072   SDValue CondV = Op.getOperand(1);
3073   SDLoc DL(Op);
3074   MVT XLenVT = Subtarget.getXLenVT();
3075 
3076   if (CondV.getOpcode() == ISD::SETCC &&
3077       CondV.getOperand(0).getValueType() == XLenVT) {
3078     SDValue LHS = CondV.getOperand(0);
3079     SDValue RHS = CondV.getOperand(1);
3080     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3081 
3082     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3083 
3084     SDValue TargetCC = DAG.getCondCode(CCVal);
3085     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3086                        LHS, RHS, TargetCC, Op.getOperand(2));
3087   }
3088 
3089   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3090                      CondV, DAG.getConstant(0, DL, XLenVT),
3091                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3092 }
3093 
3094 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3095   MachineFunction &MF = DAG.getMachineFunction();
3096   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3097 
3098   SDLoc DL(Op);
3099   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3100                                  getPointerTy(MF.getDataLayout()));
3101 
3102   // vastart just stores the address of the VarArgsFrameIndex slot into the
3103   // memory location argument.
3104   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3105   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3106                       MachinePointerInfo(SV));
3107 }
3108 
3109 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3110                                             SelectionDAG &DAG) const {
3111   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3112   MachineFunction &MF = DAG.getMachineFunction();
3113   MachineFrameInfo &MFI = MF.getFrameInfo();
3114   MFI.setFrameAddressIsTaken(true);
3115   Register FrameReg = RI.getFrameRegister(MF);
3116   int XLenInBytes = Subtarget.getXLen() / 8;
3117 
3118   EVT VT = Op.getValueType();
3119   SDLoc DL(Op);
3120   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3121   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3122   while (Depth--) {
3123     int Offset = -(XLenInBytes * 2);
3124     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3125                               DAG.getIntPtrConstant(Offset, DL));
3126     FrameAddr =
3127         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3128   }
3129   return FrameAddr;
3130 }
3131 
3132 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3133                                              SelectionDAG &DAG) const {
3134   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3135   MachineFunction &MF = DAG.getMachineFunction();
3136   MachineFrameInfo &MFI = MF.getFrameInfo();
3137   MFI.setReturnAddressIsTaken(true);
3138   MVT XLenVT = Subtarget.getXLenVT();
3139   int XLenInBytes = Subtarget.getXLen() / 8;
3140 
3141   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3142     return SDValue();
3143 
3144   EVT VT = Op.getValueType();
3145   SDLoc DL(Op);
3146   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3147   if (Depth) {
3148     int Off = -XLenInBytes;
3149     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3150     SDValue Offset = DAG.getConstant(Off, DL, VT);
3151     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3152                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3153                        MachinePointerInfo());
3154   }
3155 
3156   // Return the value of the return address register, marking it an implicit
3157   // live-in.
3158   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3159   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3160 }
3161 
3162 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3163                                                  SelectionDAG &DAG) const {
3164   SDLoc DL(Op);
3165   SDValue Lo = Op.getOperand(0);
3166   SDValue Hi = Op.getOperand(1);
3167   SDValue Shamt = Op.getOperand(2);
3168   EVT VT = Lo.getValueType();
3169 
3170   // if Shamt-XLEN < 0: // Shamt < XLEN
3171   //   Lo = Lo << Shamt
3172   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3173   // else:
3174   //   Lo = 0
3175   //   Hi = Lo << (Shamt-XLEN)
3176 
3177   SDValue Zero = DAG.getConstant(0, DL, VT);
3178   SDValue One = DAG.getConstant(1, DL, VT);
3179   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3180   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3181   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3182   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3183 
3184   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3185   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3186   SDValue ShiftRightLo =
3187       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3188   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3189   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3190   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3191 
3192   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3193 
3194   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3195   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3196 
3197   SDValue Parts[2] = {Lo, Hi};
3198   return DAG.getMergeValues(Parts, DL);
3199 }
3200 
3201 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3202                                                   bool IsSRA) const {
3203   SDLoc DL(Op);
3204   SDValue Lo = Op.getOperand(0);
3205   SDValue Hi = Op.getOperand(1);
3206   SDValue Shamt = Op.getOperand(2);
3207   EVT VT = Lo.getValueType();
3208 
3209   // SRA expansion:
3210   //   if Shamt-XLEN < 0: // Shamt < XLEN
3211   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3212   //     Hi = Hi >>s Shamt
3213   //   else:
3214   //     Lo = Hi >>s (Shamt-XLEN);
3215   //     Hi = Hi >>s (XLEN-1)
3216   //
3217   // SRL expansion:
3218   //   if Shamt-XLEN < 0: // Shamt < XLEN
3219   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3220   //     Hi = Hi >>u Shamt
3221   //   else:
3222   //     Lo = Hi >>u (Shamt-XLEN);
3223   //     Hi = 0;
3224 
3225   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3226 
3227   SDValue Zero = DAG.getConstant(0, DL, VT);
3228   SDValue One = DAG.getConstant(1, DL, VT);
3229   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3230   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3231   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3232   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3233 
3234   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3235   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3236   SDValue ShiftLeftHi =
3237       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3238   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3239   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3240   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3241   SDValue HiFalse =
3242       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3243 
3244   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3245 
3246   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3247   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3248 
3249   SDValue Parts[2] = {Lo, Hi};
3250   return DAG.getMergeValues(Parts, DL);
3251 }
3252 
3253 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3254 // legal equivalently-sized i8 type, so we can use that as a go-between.
3255 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3256                                                   SelectionDAG &DAG) const {
3257   SDLoc DL(Op);
3258   MVT VT = Op.getSimpleValueType();
3259   SDValue SplatVal = Op.getOperand(0);
3260   // All-zeros or all-ones splats are handled specially.
3261   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3262     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3263     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3264   }
3265   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3266     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3267     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3268   }
3269   MVT XLenVT = Subtarget.getXLenVT();
3270   assert(SplatVal.getValueType() == XLenVT &&
3271          "Unexpected type for i1 splat value");
3272   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3273   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3274                          DAG.getConstant(1, DL, XLenVT));
3275   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3276   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3277   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3278 }
3279 
3280 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3281 // illegal (currently only vXi64 RV32).
3282 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3283 // them to SPLAT_VECTOR_I64
3284 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3285                                                      SelectionDAG &DAG) const {
3286   SDLoc DL(Op);
3287   MVT VecVT = Op.getSimpleValueType();
3288   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3289          "Unexpected SPLAT_VECTOR_PARTS lowering");
3290 
3291   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3292   SDValue Lo = Op.getOperand(0);
3293   SDValue Hi = Op.getOperand(1);
3294 
3295   if (VecVT.isFixedLengthVector()) {
3296     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3297     SDLoc DL(Op);
3298     SDValue Mask, VL;
3299     std::tie(Mask, VL) =
3300         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3301 
3302     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3303     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3304   }
3305 
3306   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3307     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3308     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3309     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3310     // node in order to try and match RVV vector/scalar instructions.
3311     if ((LoC >> 31) == HiC)
3312       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3313   }
3314 
3315   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3316   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3317       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3318       Hi.getConstantOperandVal(1) == 31)
3319     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3320 
3321   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3322   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3323                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
3324 }
3325 
3326 // Custom-lower extensions from mask vectors by using a vselect either with 1
3327 // for zero/any-extension or -1 for sign-extension:
3328 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3329 // Note that any-extension is lowered identically to zero-extension.
3330 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3331                                                 int64_t ExtTrueVal) const {
3332   SDLoc DL(Op);
3333   MVT VecVT = Op.getSimpleValueType();
3334   SDValue Src = Op.getOperand(0);
3335   // Only custom-lower extensions from mask types
3336   assert(Src.getValueType().isVector() &&
3337          Src.getValueType().getVectorElementType() == MVT::i1);
3338 
3339   MVT XLenVT = Subtarget.getXLenVT();
3340   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3341   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3342 
3343   if (VecVT.isScalableVector()) {
3344     // Be careful not to introduce illegal scalar types at this stage, and be
3345     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3346     // illegal and must be expanded. Since we know that the constants are
3347     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3348     bool IsRV32E64 =
3349         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3350 
3351     if (!IsRV32E64) {
3352       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3353       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3354     } else {
3355       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3356       SplatTrueVal =
3357           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3358     }
3359 
3360     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3361   }
3362 
3363   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3364   MVT I1ContainerVT =
3365       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3366 
3367   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3368 
3369   SDValue Mask, VL;
3370   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3371 
3372   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3373   SplatTrueVal =
3374       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3375   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3376                                SplatTrueVal, SplatZero, VL);
3377 
3378   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3379 }
3380 
3381 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3382     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3383   MVT ExtVT = Op.getSimpleValueType();
3384   // Only custom-lower extensions from fixed-length vector types.
3385   if (!ExtVT.isFixedLengthVector())
3386     return Op;
3387   MVT VT = Op.getOperand(0).getSimpleValueType();
3388   // Grab the canonical container type for the extended type. Infer the smaller
3389   // type from that to ensure the same number of vector elements, as we know
3390   // the LMUL will be sufficient to hold the smaller type.
3391   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3392   // Get the extended container type manually to ensure the same number of
3393   // vector elements between source and dest.
3394   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3395                                      ContainerExtVT.getVectorElementCount());
3396 
3397   SDValue Op1 =
3398       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3399 
3400   SDLoc DL(Op);
3401   SDValue Mask, VL;
3402   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3403 
3404   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3405 
3406   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3407 }
3408 
3409 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3410 // setcc operation:
3411 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3412 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3413                                                   SelectionDAG &DAG) const {
3414   SDLoc DL(Op);
3415   EVT MaskVT = Op.getValueType();
3416   // Only expect to custom-lower truncations to mask types
3417   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3418          "Unexpected type for vector mask lowering");
3419   SDValue Src = Op.getOperand(0);
3420   MVT VecVT = Src.getSimpleValueType();
3421 
3422   // If this is a fixed vector, we need to convert it to a scalable vector.
3423   MVT ContainerVT = VecVT;
3424   if (VecVT.isFixedLengthVector()) {
3425     ContainerVT = getContainerForFixedLengthVector(VecVT);
3426     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3427   }
3428 
3429   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3430   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3431 
3432   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3433   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3434 
3435   if (VecVT.isScalableVector()) {
3436     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3437     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3438   }
3439 
3440   SDValue Mask, VL;
3441   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3442 
3443   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3444   SDValue Trunc =
3445       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3446   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3447                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3448   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3449 }
3450 
3451 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3452 // first position of a vector, and that vector is slid up to the insert index.
3453 // By limiting the active vector length to index+1 and merging with the
3454 // original vector (with an undisturbed tail policy for elements >= VL), we
3455 // achieve the desired result of leaving all elements untouched except the one
3456 // at VL-1, which is replaced with the desired value.
3457 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3458                                                     SelectionDAG &DAG) const {
3459   SDLoc DL(Op);
3460   MVT VecVT = Op.getSimpleValueType();
3461   SDValue Vec = Op.getOperand(0);
3462   SDValue Val = Op.getOperand(1);
3463   SDValue Idx = Op.getOperand(2);
3464 
3465   if (VecVT.getVectorElementType() == MVT::i1) {
3466     // FIXME: For now we just promote to an i8 vector and insert into that,
3467     // but this is probably not optimal.
3468     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3469     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3470     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3471     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3472   }
3473 
3474   MVT ContainerVT = VecVT;
3475   // If the operand is a fixed-length vector, convert to a scalable one.
3476   if (VecVT.isFixedLengthVector()) {
3477     ContainerVT = getContainerForFixedLengthVector(VecVT);
3478     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3479   }
3480 
3481   MVT XLenVT = Subtarget.getXLenVT();
3482 
3483   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3484   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3485   // Even i64-element vectors on RV32 can be lowered without scalar
3486   // legalization if the most-significant 32 bits of the value are not affected
3487   // by the sign-extension of the lower 32 bits.
3488   // TODO: We could also catch sign extensions of a 32-bit value.
3489   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3490     const auto *CVal = cast<ConstantSDNode>(Val);
3491     if (isInt<32>(CVal->getSExtValue())) {
3492       IsLegalInsert = true;
3493       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3494     }
3495   }
3496 
3497   SDValue Mask, VL;
3498   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3499 
3500   SDValue ValInVec;
3501 
3502   if (IsLegalInsert) {
3503     unsigned Opc =
3504         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3505     if (isNullConstant(Idx)) {
3506       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3507       if (!VecVT.isFixedLengthVector())
3508         return Vec;
3509       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3510     }
3511     ValInVec =
3512         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3513   } else {
3514     // On RV32, i64-element vectors must be specially handled to place the
3515     // value at element 0, by using two vslide1up instructions in sequence on
3516     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3517     // this.
3518     SDValue One = DAG.getConstant(1, DL, XLenVT);
3519     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3520     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3521     MVT I32ContainerVT =
3522         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3523     SDValue I32Mask =
3524         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3525     // Limit the active VL to two.
3526     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3527     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3528     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3529     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3530                            InsertI64VL);
3531     // First slide in the hi value, then the lo in underneath it.
3532     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3533                            ValHi, I32Mask, InsertI64VL);
3534     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3535                            ValLo, I32Mask, InsertI64VL);
3536     // Bitcast back to the right container type.
3537     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3538   }
3539 
3540   // Now that the value is in a vector, slide it into position.
3541   SDValue InsertVL =
3542       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3543   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3544                                 ValInVec, Idx, Mask, InsertVL);
3545   if (!VecVT.isFixedLengthVector())
3546     return Slideup;
3547   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3548 }
3549 
3550 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3551 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3552 // types this is done using VMV_X_S to allow us to glean information about the
3553 // sign bits of the result.
3554 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3555                                                      SelectionDAG &DAG) const {
3556   SDLoc DL(Op);
3557   SDValue Idx = Op.getOperand(1);
3558   SDValue Vec = Op.getOperand(0);
3559   EVT EltVT = Op.getValueType();
3560   MVT VecVT = Vec.getSimpleValueType();
3561   MVT XLenVT = Subtarget.getXLenVT();
3562 
3563   if (VecVT.getVectorElementType() == MVT::i1) {
3564     // FIXME: For now we just promote to an i8 vector and extract from that,
3565     // but this is probably not optimal.
3566     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3567     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3568     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3569   }
3570 
3571   // If this is a fixed vector, we need to convert it to a scalable vector.
3572   MVT ContainerVT = VecVT;
3573   if (VecVT.isFixedLengthVector()) {
3574     ContainerVT = getContainerForFixedLengthVector(VecVT);
3575     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3576   }
3577 
3578   // If the index is 0, the vector is already in the right position.
3579   if (!isNullConstant(Idx)) {
3580     // Use a VL of 1 to avoid processing more elements than we need.
3581     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3582     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3583     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3584     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3585                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3586   }
3587 
3588   if (!EltVT.isInteger()) {
3589     // Floating-point extracts are handled in TableGen.
3590     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3591                        DAG.getConstant(0, DL, XLenVT));
3592   }
3593 
3594   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3595   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3596 }
3597 
3598 // Some RVV intrinsics may claim that they want an integer operand to be
3599 // promoted or expanded.
3600 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3601                                           const RISCVSubtarget &Subtarget) {
3602   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3603           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3604          "Unexpected opcode");
3605 
3606   if (!Subtarget.hasStdExtV())
3607     return SDValue();
3608 
3609   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3610   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3611   SDLoc DL(Op);
3612 
3613   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3614       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3615   if (!II || !II->SplatOperand)
3616     return SDValue();
3617 
3618   unsigned SplatOp = II->SplatOperand + HasChain;
3619   assert(SplatOp < Op.getNumOperands());
3620 
3621   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3622   SDValue &ScalarOp = Operands[SplatOp];
3623   MVT OpVT = ScalarOp.getSimpleValueType();
3624   MVT XLenVT = Subtarget.getXLenVT();
3625 
3626   // If this isn't a scalar, or its type is XLenVT we're done.
3627   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3628     return SDValue();
3629 
3630   // Simplest case is that the operand needs to be promoted to XLenVT.
3631   if (OpVT.bitsLT(XLenVT)) {
3632     // If the operand is a constant, sign extend to increase our chances
3633     // of being able to use a .vi instruction. ANY_EXTEND would become a
3634     // a zero extend and the simm5 check in isel would fail.
3635     // FIXME: Should we ignore the upper bits in isel instead?
3636     unsigned ExtOpc =
3637         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3638     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3639     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3640   }
3641 
3642   // Use the previous operand to get the vXi64 VT. The result might be a mask
3643   // VT for compares. Using the previous operand assumes that the previous
3644   // operand will never have a smaller element size than a scalar operand and
3645   // that a widening operation never uses SEW=64.
3646   // NOTE: If this fails the below assert, we can probably just find the
3647   // element count from any operand or result and use it to construct the VT.
3648   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3649   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3650 
3651   // The more complex case is when the scalar is larger than XLenVT.
3652   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3653          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3654 
3655   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3656   // on the instruction to sign-extend since SEW>XLEN.
3657   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3658     if (isInt<32>(CVal->getSExtValue())) {
3659       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3660       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3661     }
3662   }
3663 
3664   // We need to convert the scalar to a splat vector.
3665   // FIXME: Can we implicitly truncate the scalar if it is known to
3666   // be sign extended?
3667   // VL should be the last operand.
3668   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3669   assert(VL.getValueType() == XLenVT);
3670   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3671   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3672 }
3673 
3674 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3675                                                      SelectionDAG &DAG) const {
3676   unsigned IntNo = Op.getConstantOperandVal(0);
3677   SDLoc DL(Op);
3678   MVT XLenVT = Subtarget.getXLenVT();
3679 
3680   switch (IntNo) {
3681   default:
3682     break; // Don't custom lower most intrinsics.
3683   case Intrinsic::thread_pointer: {
3684     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3685     return DAG.getRegister(RISCV::X4, PtrVT);
3686   }
3687   case Intrinsic::riscv_orc_b:
3688     // Lower to the GORCI encoding for orc.b.
3689     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3690                        DAG.getConstant(7, DL, XLenVT));
3691   case Intrinsic::riscv_grev:
3692   case Intrinsic::riscv_gorc: {
3693     unsigned Opc =
3694         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3695     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3696   }
3697   case Intrinsic::riscv_shfl:
3698   case Intrinsic::riscv_unshfl: {
3699     unsigned Opc =
3700         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3701     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3702   }
3703   case Intrinsic::riscv_bcompress:
3704   case Intrinsic::riscv_bdecompress: {
3705     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3706                                                        : RISCVISD::BDECOMPRESS;
3707     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3708   }
3709   case Intrinsic::riscv_vmv_x_s:
3710     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3711     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3712                        Op.getOperand(1));
3713   case Intrinsic::riscv_vmv_v_x:
3714     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3715                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3716   case Intrinsic::riscv_vfmv_v_f:
3717     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3718                        Op.getOperand(1), Op.getOperand(2));
3719   case Intrinsic::riscv_vmv_s_x: {
3720     SDValue Scalar = Op.getOperand(2);
3721 
3722     if (Scalar.getValueType().bitsLE(XLenVT)) {
3723       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3724       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3725                          Op.getOperand(1), Scalar, Op.getOperand(3));
3726     }
3727 
3728     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3729 
3730     // This is an i64 value that lives in two scalar registers. We have to
3731     // insert this in a convoluted way. First we build vXi64 splat containing
3732     // the/ two values that we assemble using some bit math. Next we'll use
3733     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3734     // to merge element 0 from our splat into the source vector.
3735     // FIXME: This is probably not the best way to do this, but it is
3736     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3737     // point.
3738     //   sw lo, (a0)
3739     //   sw hi, 4(a0)
3740     //   vlse vX, (a0)
3741     //
3742     //   vid.v      vVid
3743     //   vmseq.vx   mMask, vVid, 0
3744     //   vmerge.vvm vDest, vSrc, vVal, mMask
3745     MVT VT = Op.getSimpleValueType();
3746     SDValue Vec = Op.getOperand(1);
3747     SDValue VL = Op.getOperand(3);
3748 
3749     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3750     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3751                                       DAG.getConstant(0, DL, MVT::i32), VL);
3752 
3753     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3754     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3755     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3756     SDValue SelectCond =
3757         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3758                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3759     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3760                        Vec, VL);
3761   }
3762   case Intrinsic::riscv_vslide1up:
3763   case Intrinsic::riscv_vslide1down:
3764   case Intrinsic::riscv_vslide1up_mask:
3765   case Intrinsic::riscv_vslide1down_mask: {
3766     // We need to special case these when the scalar is larger than XLen.
3767     unsigned NumOps = Op.getNumOperands();
3768     bool IsMasked = NumOps == 6;
3769     unsigned OpOffset = IsMasked ? 1 : 0;
3770     SDValue Scalar = Op.getOperand(2 + OpOffset);
3771     if (Scalar.getValueType().bitsLE(XLenVT))
3772       break;
3773 
3774     // Splatting a sign extended constant is fine.
3775     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3776       if (isInt<32>(CVal->getSExtValue()))
3777         break;
3778 
3779     MVT VT = Op.getSimpleValueType();
3780     assert(VT.getVectorElementType() == MVT::i64 &&
3781            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3782 
3783     // Convert the vector source to the equivalent nxvXi32 vector.
3784     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3785     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3786 
3787     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3788                                    DAG.getConstant(0, DL, XLenVT));
3789     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3790                                    DAG.getConstant(1, DL, XLenVT));
3791 
3792     // Double the VL since we halved SEW.
3793     SDValue VL = Op.getOperand(NumOps - 1);
3794     SDValue I32VL =
3795         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3796 
3797     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3798     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3799 
3800     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3801     // instructions.
3802     if (IntNo == Intrinsic::riscv_vslide1up ||
3803         IntNo == Intrinsic::riscv_vslide1up_mask) {
3804       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3805                         I32Mask, I32VL);
3806       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3807                         I32Mask, I32VL);
3808     } else {
3809       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3810                         I32Mask, I32VL);
3811       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3812                         I32Mask, I32VL);
3813     }
3814 
3815     // Convert back to nxvXi64.
3816     Vec = DAG.getBitcast(VT, Vec);
3817 
3818     if (!IsMasked)
3819       return Vec;
3820 
3821     // Apply mask after the operation.
3822     SDValue Mask = Op.getOperand(NumOps - 2);
3823     SDValue MaskedOff = Op.getOperand(1);
3824     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3825   }
3826   }
3827 
3828   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3829 }
3830 
3831 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3832                                                     SelectionDAG &DAG) const {
3833   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3834 }
3835 
3836 static MVT getLMUL1VT(MVT VT) {
3837   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3838          "Unexpected vector MVT");
3839   return MVT::getScalableVectorVT(
3840       VT.getVectorElementType(),
3841       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3842 }
3843 
3844 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3845   switch (ISDOpcode) {
3846   default:
3847     llvm_unreachable("Unhandled reduction");
3848   case ISD::VECREDUCE_ADD:
3849     return RISCVISD::VECREDUCE_ADD_VL;
3850   case ISD::VECREDUCE_UMAX:
3851     return RISCVISD::VECREDUCE_UMAX_VL;
3852   case ISD::VECREDUCE_SMAX:
3853     return RISCVISD::VECREDUCE_SMAX_VL;
3854   case ISD::VECREDUCE_UMIN:
3855     return RISCVISD::VECREDUCE_UMIN_VL;
3856   case ISD::VECREDUCE_SMIN:
3857     return RISCVISD::VECREDUCE_SMIN_VL;
3858   case ISD::VECREDUCE_AND:
3859     return RISCVISD::VECREDUCE_AND_VL;
3860   case ISD::VECREDUCE_OR:
3861     return RISCVISD::VECREDUCE_OR_VL;
3862   case ISD::VECREDUCE_XOR:
3863     return RISCVISD::VECREDUCE_XOR_VL;
3864   }
3865 }
3866 
3867 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3868                                                       SelectionDAG &DAG) const {
3869   SDLoc DL(Op);
3870   SDValue Vec = Op.getOperand(0);
3871   MVT VecVT = Vec.getSimpleValueType();
3872   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3873           Op.getOpcode() == ISD::VECREDUCE_OR ||
3874           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3875          "Unexpected reduction lowering");
3876 
3877   MVT XLenVT = Subtarget.getXLenVT();
3878   assert(Op.getValueType() == XLenVT &&
3879          "Expected reduction output to be legalized to XLenVT");
3880 
3881   MVT ContainerVT = VecVT;
3882   if (VecVT.isFixedLengthVector()) {
3883     ContainerVT = getContainerForFixedLengthVector(VecVT);
3884     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3885   }
3886 
3887   SDValue Mask, VL;
3888   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3889   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3890 
3891   switch (Op.getOpcode()) {
3892   default:
3893     llvm_unreachable("Unhandled reduction");
3894   case ISD::VECREDUCE_AND:
3895     // vpopc ~x == 0
3896     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3897     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3898     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3899   case ISD::VECREDUCE_OR:
3900     // vpopc x != 0
3901     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3902     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3903   case ISD::VECREDUCE_XOR: {
3904     // ((vpopc x) & 1) != 0
3905     SDValue One = DAG.getConstant(1, DL, XLenVT);
3906     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3907     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3908     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3909   }
3910   }
3911 }
3912 
3913 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3914                                             SelectionDAG &DAG) const {
3915   SDLoc DL(Op);
3916   SDValue Vec = Op.getOperand(0);
3917   EVT VecEVT = Vec.getValueType();
3918 
3919   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3920 
3921   // Due to ordering in legalize types we may have a vector type that needs to
3922   // be split. Do that manually so we can get down to a legal type.
3923   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3924          TargetLowering::TypeSplitVector) {
3925     SDValue Lo, Hi;
3926     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3927     VecEVT = Lo.getValueType();
3928     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3929   }
3930 
3931   // TODO: The type may need to be widened rather than split. Or widened before
3932   // it can be split.
3933   if (!isTypeLegal(VecEVT))
3934     return SDValue();
3935 
3936   MVT VecVT = VecEVT.getSimpleVT();
3937   MVT VecEltVT = VecVT.getVectorElementType();
3938   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3939 
3940   MVT ContainerVT = VecVT;
3941   if (VecVT.isFixedLengthVector()) {
3942     ContainerVT = getContainerForFixedLengthVector(VecVT);
3943     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3944   }
3945 
3946   MVT M1VT = getLMUL1VT(ContainerVT);
3947 
3948   SDValue Mask, VL;
3949   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3950 
3951   // FIXME: This is a VLMAX splat which might be too large and can prevent
3952   // vsetvli removal.
3953   SDValue NeutralElem =
3954       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3955   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3956   SDValue Reduction =
3957       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3958   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3959                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3960   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3961 }
3962 
3963 // Given a reduction op, this function returns the matching reduction opcode,
3964 // the vector SDValue and the scalar SDValue required to lower this to a
3965 // RISCVISD node.
3966 static std::tuple<unsigned, SDValue, SDValue>
3967 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3968   SDLoc DL(Op);
3969   auto Flags = Op->getFlags();
3970   unsigned Opcode = Op.getOpcode();
3971   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3972   switch (Opcode) {
3973   default:
3974     llvm_unreachable("Unhandled reduction");
3975   case ISD::VECREDUCE_FADD:
3976     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3977                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3978   case ISD::VECREDUCE_SEQ_FADD:
3979     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3980                            Op.getOperand(0));
3981   case ISD::VECREDUCE_FMIN:
3982     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3983                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3984   case ISD::VECREDUCE_FMAX:
3985     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3986                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3987   }
3988 }
3989 
3990 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3991                                               SelectionDAG &DAG) const {
3992   SDLoc DL(Op);
3993   MVT VecEltVT = Op.getSimpleValueType();
3994 
3995   unsigned RVVOpcode;
3996   SDValue VectorVal, ScalarVal;
3997   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3998       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3999   MVT VecVT = VectorVal.getSimpleValueType();
4000 
4001   MVT ContainerVT = VecVT;
4002   if (VecVT.isFixedLengthVector()) {
4003     ContainerVT = getContainerForFixedLengthVector(VecVT);
4004     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4005   }
4006 
4007   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4008 
4009   SDValue Mask, VL;
4010   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4011 
4012   // FIXME: This is a VLMAX splat which might be too large and can prevent
4013   // vsetvli removal.
4014   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
4015   SDValue Reduction =
4016       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
4017   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4018                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4019 }
4020 
4021 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4022                                                    SelectionDAG &DAG) const {
4023   SDValue Vec = Op.getOperand(0);
4024   SDValue SubVec = Op.getOperand(1);
4025   MVT VecVT = Vec.getSimpleValueType();
4026   MVT SubVecVT = SubVec.getSimpleValueType();
4027 
4028   SDLoc DL(Op);
4029   MVT XLenVT = Subtarget.getXLenVT();
4030   unsigned OrigIdx = Op.getConstantOperandVal(2);
4031   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4032 
4033   // We don't have the ability to slide mask vectors up indexed by their i1
4034   // elements; the smallest we can do is i8. Often we are able to bitcast to
4035   // equivalent i8 vectors. Note that when inserting a fixed-length vector
4036   // into a scalable one, we might not necessarily have enough scalable
4037   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4038   if (SubVecVT.getVectorElementType() == MVT::i1 &&
4039       (OrigIdx != 0 || !Vec.isUndef())) {
4040     if (VecVT.getVectorMinNumElements() >= 8 &&
4041         SubVecVT.getVectorMinNumElements() >= 8) {
4042       assert(OrigIdx % 8 == 0 && "Invalid index");
4043       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4044              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4045              "Unexpected mask vector lowering");
4046       OrigIdx /= 8;
4047       SubVecVT =
4048           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4049                            SubVecVT.isScalableVector());
4050       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4051                                VecVT.isScalableVector());
4052       Vec = DAG.getBitcast(VecVT, Vec);
4053       SubVec = DAG.getBitcast(SubVecVT, SubVec);
4054     } else {
4055       // We can't slide this mask vector up indexed by its i1 elements.
4056       // This poses a problem when we wish to insert a scalable vector which
4057       // can't be re-expressed as a larger type. Just choose the slow path and
4058       // extend to a larger type, then truncate back down.
4059       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4060       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4061       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4062       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4063       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4064                         Op.getOperand(2));
4065       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4066       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4067     }
4068   }
4069 
4070   // If the subvector vector is a fixed-length type, we cannot use subregister
4071   // manipulation to simplify the codegen; we don't know which register of a
4072   // LMUL group contains the specific subvector as we only know the minimum
4073   // register size. Therefore we must slide the vector group up the full
4074   // amount.
4075   if (SubVecVT.isFixedLengthVector()) {
4076     if (OrigIdx == 0 && Vec.isUndef())
4077       return Op;
4078     MVT ContainerVT = VecVT;
4079     if (VecVT.isFixedLengthVector()) {
4080       ContainerVT = getContainerForFixedLengthVector(VecVT);
4081       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4082     }
4083     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4084                          DAG.getUNDEF(ContainerVT), SubVec,
4085                          DAG.getConstant(0, DL, XLenVT));
4086     SDValue Mask =
4087         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4088     // Set the vector length to only the number of elements we care about. Note
4089     // that for slideup this includes the offset.
4090     SDValue VL =
4091         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4092     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4093     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4094                                   SubVec, SlideupAmt, Mask, VL);
4095     if (VecVT.isFixedLengthVector())
4096       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4097     return DAG.getBitcast(Op.getValueType(), Slideup);
4098   }
4099 
4100   unsigned SubRegIdx, RemIdx;
4101   std::tie(SubRegIdx, RemIdx) =
4102       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4103           VecVT, SubVecVT, OrigIdx, TRI);
4104 
4105   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4106   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4107                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4108                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4109 
4110   // 1. If the Idx has been completely eliminated and this subvector's size is
4111   // a vector register or a multiple thereof, or the surrounding elements are
4112   // undef, then this is a subvector insert which naturally aligns to a vector
4113   // register. These can easily be handled using subregister manipulation.
4114   // 2. If the subvector is smaller than a vector register, then the insertion
4115   // must preserve the undisturbed elements of the register. We do this by
4116   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4117   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4118   // subvector within the vector register, and an INSERT_SUBVECTOR of that
4119   // LMUL=1 type back into the larger vector (resolving to another subregister
4120   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4121   // to avoid allocating a large register group to hold our subvector.
4122   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4123     return Op;
4124 
4125   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4126   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4127   // (in our case undisturbed). This means we can set up a subvector insertion
4128   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4129   // size of the subvector.
4130   MVT InterSubVT = VecVT;
4131   SDValue AlignedExtract = Vec;
4132   unsigned AlignedIdx = OrigIdx - RemIdx;
4133   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4134     InterSubVT = getLMUL1VT(VecVT);
4135     // Extract a subvector equal to the nearest full vector register type. This
4136     // should resolve to a EXTRACT_SUBREG instruction.
4137     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4138                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4139   }
4140 
4141   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4142   // For scalable vectors this must be further multiplied by vscale.
4143   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4144 
4145   SDValue Mask, VL;
4146   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4147 
4148   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4149   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4150   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4151   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4152 
4153   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4154                        DAG.getUNDEF(InterSubVT), SubVec,
4155                        DAG.getConstant(0, DL, XLenVT));
4156 
4157   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4158                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4159 
4160   // If required, insert this subvector back into the correct vector register.
4161   // This should resolve to an INSERT_SUBREG instruction.
4162   if (VecVT.bitsGT(InterSubVT))
4163     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4164                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4165 
4166   // We might have bitcast from a mask type: cast back to the original type if
4167   // required.
4168   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4169 }
4170 
4171 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4172                                                     SelectionDAG &DAG) const {
4173   SDValue Vec = Op.getOperand(0);
4174   MVT SubVecVT = Op.getSimpleValueType();
4175   MVT VecVT = Vec.getSimpleValueType();
4176 
4177   SDLoc DL(Op);
4178   MVT XLenVT = Subtarget.getXLenVT();
4179   unsigned OrigIdx = Op.getConstantOperandVal(1);
4180   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4181 
4182   // We don't have the ability to slide mask vectors down indexed by their i1
4183   // elements; the smallest we can do is i8. Often we are able to bitcast to
4184   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4185   // from a scalable one, we might not necessarily have enough scalable
4186   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4187   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4188     if (VecVT.getVectorMinNumElements() >= 8 &&
4189         SubVecVT.getVectorMinNumElements() >= 8) {
4190       assert(OrigIdx % 8 == 0 && "Invalid index");
4191       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4192              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4193              "Unexpected mask vector lowering");
4194       OrigIdx /= 8;
4195       SubVecVT =
4196           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4197                            SubVecVT.isScalableVector());
4198       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4199                                VecVT.isScalableVector());
4200       Vec = DAG.getBitcast(VecVT, Vec);
4201     } else {
4202       // We can't slide this mask vector down, indexed by its i1 elements.
4203       // This poses a problem when we wish to extract a scalable vector which
4204       // can't be re-expressed as a larger type. Just choose the slow path and
4205       // extend to a larger type, then truncate back down.
4206       // TODO: We could probably improve this when extracting certain fixed
4207       // from fixed, where we can extract as i8 and shift the correct element
4208       // right to reach the desired subvector?
4209       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4210       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4211       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4212       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4213                         Op.getOperand(1));
4214       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4215       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4216     }
4217   }
4218 
4219   // If the subvector vector is a fixed-length type, we cannot use subregister
4220   // manipulation to simplify the codegen; we don't know which register of a
4221   // LMUL group contains the specific subvector as we only know the minimum
4222   // register size. Therefore we must slide the vector group down the full
4223   // amount.
4224   if (SubVecVT.isFixedLengthVector()) {
4225     // With an index of 0 this is a cast-like subvector, which can be performed
4226     // with subregister operations.
4227     if (OrigIdx == 0)
4228       return Op;
4229     MVT ContainerVT = VecVT;
4230     if (VecVT.isFixedLengthVector()) {
4231       ContainerVT = getContainerForFixedLengthVector(VecVT);
4232       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4233     }
4234     SDValue Mask =
4235         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4236     // Set the vector length to only the number of elements we care about. This
4237     // avoids sliding down elements we're going to discard straight away.
4238     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4239     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4240     SDValue Slidedown =
4241         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4242                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4243     // Now we can use a cast-like subvector extract to get the result.
4244     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4245                             DAG.getConstant(0, DL, XLenVT));
4246     return DAG.getBitcast(Op.getValueType(), Slidedown);
4247   }
4248 
4249   unsigned SubRegIdx, RemIdx;
4250   std::tie(SubRegIdx, RemIdx) =
4251       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4252           VecVT, SubVecVT, OrigIdx, TRI);
4253 
4254   // If the Idx has been completely eliminated then this is a subvector extract
4255   // which naturally aligns to a vector register. These can easily be handled
4256   // using subregister manipulation.
4257   if (RemIdx == 0)
4258     return Op;
4259 
4260   // Else we must shift our vector register directly to extract the subvector.
4261   // Do this using VSLIDEDOWN.
4262 
4263   // If the vector type is an LMUL-group type, extract a subvector equal to the
4264   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4265   // instruction.
4266   MVT InterSubVT = VecVT;
4267   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4268     InterSubVT = getLMUL1VT(VecVT);
4269     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4270                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4271   }
4272 
4273   // Slide this vector register down by the desired number of elements in order
4274   // to place the desired subvector starting at element 0.
4275   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4276   // For scalable vectors this must be further multiplied by vscale.
4277   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4278 
4279   SDValue Mask, VL;
4280   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4281   SDValue Slidedown =
4282       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4283                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4284 
4285   // Now the vector is in the right position, extract our final subvector. This
4286   // should resolve to a COPY.
4287   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4288                           DAG.getConstant(0, DL, XLenVT));
4289 
4290   // We might have bitcast from a mask type: cast back to the original type if
4291   // required.
4292   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4293 }
4294 
4295 // Lower step_vector to the vid instruction. Any non-identity step value must
4296 // be accounted for my manual expansion.
4297 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4298                                               SelectionDAG &DAG) const {
4299   SDLoc DL(Op);
4300   MVT VT = Op.getSimpleValueType();
4301   MVT XLenVT = Subtarget.getXLenVT();
4302   SDValue Mask, VL;
4303   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4304   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4305   uint64_t StepValImm = Op.getConstantOperandVal(0);
4306   if (StepValImm != 1) {
4307     if (isPowerOf2_64(StepValImm)) {
4308       SDValue StepVal =
4309           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4310                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4311       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4312     } else {
4313       SDValue StepVal = lowerScalarSplat(
4314           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4315           DL, DAG, Subtarget);
4316       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4317     }
4318   }
4319   return StepVec;
4320 }
4321 
4322 // Implement vector_reverse using vrgather.vv with indices determined by
4323 // subtracting the id of each element from (VLMAX-1). This will convert
4324 // the indices like so:
4325 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4326 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4327 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4328                                                  SelectionDAG &DAG) const {
4329   SDLoc DL(Op);
4330   MVT VecVT = Op.getSimpleValueType();
4331   unsigned EltSize = VecVT.getScalarSizeInBits();
4332   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4333 
4334   unsigned MaxVLMAX = 0;
4335   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4336   if (VectorBitsMax != 0)
4337     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4338 
4339   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4340   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4341 
4342   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4343   // to use vrgatherei16.vv.
4344   // TODO: It's also possible to use vrgatherei16.vv for other types to
4345   // decrease register width for the index calculation.
4346   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4347     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4348     // Reverse each half, then reassemble them in reverse order.
4349     // NOTE: It's also possible that after splitting that VLMAX no longer
4350     // requires vrgatherei16.vv.
4351     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4352       SDValue Lo, Hi;
4353       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4354       EVT LoVT, HiVT;
4355       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4356       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4357       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4358       // Reassemble the low and high pieces reversed.
4359       // FIXME: This is a CONCAT_VECTORS.
4360       SDValue Res =
4361           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4362                       DAG.getIntPtrConstant(0, DL));
4363       return DAG.getNode(
4364           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4365           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4366     }
4367 
4368     // Just promote the int type to i16 which will double the LMUL.
4369     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4370     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4371   }
4372 
4373   MVT XLenVT = Subtarget.getXLenVT();
4374   SDValue Mask, VL;
4375   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4376 
4377   // Calculate VLMAX-1 for the desired SEW.
4378   unsigned MinElts = VecVT.getVectorMinNumElements();
4379   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4380                               DAG.getConstant(MinElts, DL, XLenVT));
4381   SDValue VLMinus1 =
4382       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4383 
4384   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4385   bool IsRV32E64 =
4386       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4387   SDValue SplatVL;
4388   if (!IsRV32E64)
4389     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4390   else
4391     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4392 
4393   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4394   SDValue Indices =
4395       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4396 
4397   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4398 }
4399 
4400 SDValue
4401 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4402                                                      SelectionDAG &DAG) const {
4403   SDLoc DL(Op);
4404   auto *Load = cast<LoadSDNode>(Op);
4405 
4406   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4407                                         Load->getMemoryVT(),
4408                                         *Load->getMemOperand()) &&
4409          "Expecting a correctly-aligned load");
4410 
4411   MVT VT = Op.getSimpleValueType();
4412   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4413 
4414   SDValue VL =
4415       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4416 
4417   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4418   SDValue NewLoad = DAG.getMemIntrinsicNode(
4419       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4420       Load->getMemoryVT(), Load->getMemOperand());
4421 
4422   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4423   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4424 }
4425 
4426 SDValue
4427 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4428                                                       SelectionDAG &DAG) const {
4429   SDLoc DL(Op);
4430   auto *Store = cast<StoreSDNode>(Op);
4431 
4432   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4433                                         Store->getMemoryVT(),
4434                                         *Store->getMemOperand()) &&
4435          "Expecting a correctly-aligned store");
4436 
4437   SDValue StoreVal = Store->getValue();
4438   MVT VT = StoreVal.getSimpleValueType();
4439 
4440   // If the size less than a byte, we need to pad with zeros to make a byte.
4441   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4442     VT = MVT::v8i1;
4443     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4444                            DAG.getConstant(0, DL, VT), StoreVal,
4445                            DAG.getIntPtrConstant(0, DL));
4446   }
4447 
4448   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4449 
4450   SDValue VL =
4451       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4452 
4453   SDValue NewValue =
4454       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4455   return DAG.getMemIntrinsicNode(
4456       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4457       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4458       Store->getMemoryVT(), Store->getMemOperand());
4459 }
4460 
4461 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
4462                                              SelectionDAG &DAG) const {
4463   SDLoc DL(Op);
4464   MVT VT = Op.getSimpleValueType();
4465 
4466   const auto *MemSD = cast<MemSDNode>(Op);
4467   EVT MemVT = MemSD->getMemoryVT();
4468   MachineMemOperand *MMO = MemSD->getMemOperand();
4469   SDValue Chain = MemSD->getChain();
4470   SDValue BasePtr = MemSD->getBasePtr();
4471 
4472   SDValue Mask, PassThru, VL;
4473   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
4474     Mask = VPLoad->getMask();
4475     PassThru = DAG.getUNDEF(VT);
4476     VL = VPLoad->getVectorLength();
4477   } else {
4478     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
4479     Mask = MLoad->getMask();
4480     PassThru = MLoad->getPassThru();
4481   }
4482 
4483   MVT XLenVT = Subtarget.getXLenVT();
4484 
4485   MVT ContainerVT = VT;
4486   if (VT.isFixedLengthVector()) {
4487     ContainerVT = getContainerForFixedLengthVector(VT);
4488     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4489 
4490     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4491     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4492   }
4493 
4494   if (!VL)
4495     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4496 
4497   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4498   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4499   SDValue Ops[] = {Chain, IntID, PassThru, BasePtr, Mask, VL};
4500   SDValue Result =
4501       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
4502   Chain = Result.getValue(1);
4503 
4504   if (VT.isFixedLengthVector())
4505     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4506 
4507   return DAG.getMergeValues({Result, Chain}, DL);
4508 }
4509 
4510 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
4511                                               SelectionDAG &DAG) const {
4512   SDLoc DL(Op);
4513 
4514   const auto *MemSD = cast<MemSDNode>(Op);
4515   EVT MemVT = MemSD->getMemoryVT();
4516   MachineMemOperand *MMO = MemSD->getMemOperand();
4517   SDValue Chain = MemSD->getChain();
4518   SDValue BasePtr = MemSD->getBasePtr();
4519   SDValue Val, Mask, VL;
4520 
4521   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
4522     Val = VPStore->getValue();
4523     Mask = VPStore->getMask();
4524     VL = VPStore->getVectorLength();
4525   } else {
4526     const auto *MStore = cast<MaskedStoreSDNode>(Op);
4527     Val = MStore->getValue();
4528     Mask = MStore->getMask();
4529   }
4530 
4531   MVT VT = Val.getSimpleValueType();
4532   MVT XLenVT = Subtarget.getXLenVT();
4533 
4534   MVT ContainerVT = VT;
4535   if (VT.isFixedLengthVector()) {
4536     ContainerVT = getContainerForFixedLengthVector(VT);
4537     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4538 
4539     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4540     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4541   }
4542 
4543   if (!VL)
4544     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4545 
4546   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4547   return DAG.getMemIntrinsicNode(
4548       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4549       {Chain, IntID, Val, BasePtr, Mask, VL}, MemVT, MMO);
4550 }
4551 
4552 SDValue
4553 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4554                                                       SelectionDAG &DAG) const {
4555   MVT InVT = Op.getOperand(0).getSimpleValueType();
4556   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4557 
4558   MVT VT = Op.getSimpleValueType();
4559 
4560   SDValue Op1 =
4561       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4562   SDValue Op2 =
4563       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4564 
4565   SDLoc DL(Op);
4566   SDValue VL =
4567       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4568 
4569   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4570   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4571 
4572   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4573                             Op.getOperand(2), Mask, VL);
4574 
4575   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4576 }
4577 
4578 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4579     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4580   MVT VT = Op.getSimpleValueType();
4581 
4582   if (VT.getVectorElementType() == MVT::i1)
4583     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4584 
4585   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4586 }
4587 
4588 SDValue
4589 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
4590                                                       SelectionDAG &DAG) const {
4591   unsigned Opc;
4592   switch (Op.getOpcode()) {
4593   default: llvm_unreachable("Unexpected opcode!");
4594   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
4595   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
4596   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
4597   }
4598 
4599   return lowerToScalableOp(Op, DAG, Opc);
4600 }
4601 
4602 // Lower vector ABS to smax(X, sub(0, X)).
4603 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4604   SDLoc DL(Op);
4605   MVT VT = Op.getSimpleValueType();
4606   SDValue X = Op.getOperand(0);
4607 
4608   assert(VT.isFixedLengthVector() && "Unexpected type");
4609 
4610   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4611   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4612 
4613   SDValue Mask, VL;
4614   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4615 
4616   SDValue SplatZero =
4617       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4618                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4619   SDValue NegX =
4620       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4621   SDValue Max =
4622       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4623 
4624   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4625 }
4626 
4627 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4628     SDValue Op, SelectionDAG &DAG) const {
4629   SDLoc DL(Op);
4630   MVT VT = Op.getSimpleValueType();
4631   SDValue Mag = Op.getOperand(0);
4632   SDValue Sign = Op.getOperand(1);
4633   assert(Mag.getValueType() == Sign.getValueType() &&
4634          "Can only handle COPYSIGN with matching types.");
4635 
4636   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4637   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4638   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4639 
4640   SDValue Mask, VL;
4641   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4642 
4643   SDValue CopySign =
4644       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4645 
4646   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4647 }
4648 
4649 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4650     SDValue Op, SelectionDAG &DAG) const {
4651   MVT VT = Op.getSimpleValueType();
4652   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4653 
4654   MVT I1ContainerVT =
4655       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4656 
4657   SDValue CC =
4658       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4659   SDValue Op1 =
4660       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4661   SDValue Op2 =
4662       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4663 
4664   SDLoc DL(Op);
4665   SDValue Mask, VL;
4666   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4667 
4668   SDValue Select =
4669       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4670 
4671   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4672 }
4673 
4674 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4675                                                unsigned NewOpc,
4676                                                bool HasMask) const {
4677   MVT VT = Op.getSimpleValueType();
4678   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4679 
4680   // Create list of operands by converting existing ones to scalable types.
4681   SmallVector<SDValue, 6> Ops;
4682   for (const SDValue &V : Op->op_values()) {
4683     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4684 
4685     // Pass through non-vector operands.
4686     if (!V.getValueType().isVector()) {
4687       Ops.push_back(V);
4688       continue;
4689     }
4690 
4691     // "cast" fixed length vector to a scalable vector.
4692     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4693            "Only fixed length vectors are supported!");
4694     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4695   }
4696 
4697   SDLoc DL(Op);
4698   SDValue Mask, VL;
4699   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4700   if (HasMask)
4701     Ops.push_back(Mask);
4702   Ops.push_back(VL);
4703 
4704   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4705   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4706 }
4707 
4708 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4709 // * Operands of each node are assumed to be in the same order.
4710 // * The EVL operand is promoted from i32 to i64 on RV64.
4711 // * Fixed-length vectors are converted to their scalable-vector container
4712 //   types.
4713 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4714                                        unsigned RISCVISDOpc) const {
4715   SDLoc DL(Op);
4716   MVT VT = Op.getSimpleValueType();
4717   SmallVector<SDValue, 4> Ops;
4718 
4719   for (const auto &OpIdx : enumerate(Op->ops())) {
4720     SDValue V = OpIdx.value();
4721     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4722     // Pass through operands which aren't fixed-length vectors.
4723     if (!V.getValueType().isFixedLengthVector()) {
4724       Ops.push_back(V);
4725       continue;
4726     }
4727     // "cast" fixed length vector to a scalable vector.
4728     MVT OpVT = V.getSimpleValueType();
4729     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4730     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4731            "Only fixed length vectors are supported!");
4732     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4733   }
4734 
4735   if (!VT.isFixedLengthVector())
4736     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4737 
4738   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4739 
4740   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4741 
4742   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4743 }
4744 
4745 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
4746 // matched to a RVV indexed load. The RVV indexed load instructions only
4747 // support the "unsigned unscaled" addressing mode; indices are implicitly
4748 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
4749 // signed or scaled indexing is extended to the XLEN value type and scaled
4750 // accordingly.
4751 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
4752                                                SelectionDAG &DAG) const {
4753   SDLoc DL(Op);
4754   MVT VT = Op.getSimpleValueType();
4755 
4756   const auto *MemSD = cast<MemSDNode>(Op.getNode());
4757   EVT MemVT = MemSD->getMemoryVT();
4758   MachineMemOperand *MMO = MemSD->getMemOperand();
4759   SDValue Chain = MemSD->getChain();
4760   SDValue BasePtr = MemSD->getBasePtr();
4761 
4762   ISD::LoadExtType LoadExtType;
4763   SDValue Index, Mask, PassThru, VL;
4764 
4765   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
4766     Index = VPGN->getIndex();
4767     Mask = VPGN->getMask();
4768     PassThru = DAG.getUNDEF(VT);
4769     VL = VPGN->getVectorLength();
4770     // VP doesn't support extending loads.
4771     LoadExtType = ISD::NON_EXTLOAD;
4772   } else {
4773     // Else it must be a MGATHER.
4774     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4775     Index = MGN->getIndex();
4776     Mask = MGN->getMask();
4777     PassThru = MGN->getPassThru();
4778     LoadExtType = MGN->getExtensionType();
4779   }
4780 
4781   MVT IndexVT = Index.getSimpleValueType();
4782   MVT XLenVT = Subtarget.getXLenVT();
4783 
4784   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4785          "Unexpected VTs!");
4786   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
4787   // Targets have to explicitly opt-in for extending vector loads.
4788   assert(LoadExtType == ISD::NON_EXTLOAD &&
4789          "Unexpected extending MGATHER/VP_GATHER");
4790   (void)LoadExtType;
4791 
4792   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4793   // the selection of the masked intrinsics doesn't do this for us.
4794   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4795 
4796   MVT ContainerVT = VT;
4797   if (VT.isFixedLengthVector()) {
4798     // We need to use the larger of the result and index type to determine the
4799     // scalable type to use so we don't increase LMUL for any operand/result.
4800     if (VT.bitsGE(IndexVT)) {
4801       ContainerVT = getContainerForFixedLengthVector(VT);
4802       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4803                                  ContainerVT.getVectorElementCount());
4804     } else {
4805       IndexVT = getContainerForFixedLengthVector(IndexVT);
4806       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4807                                      IndexVT.getVectorElementCount());
4808     }
4809 
4810     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4811 
4812     if (!IsUnmasked) {
4813       MVT MaskVT =
4814           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4815       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4816       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4817     }
4818   }
4819 
4820   if (!VL)
4821     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4822 
4823   unsigned IntID =
4824       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
4825   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
4826   if (!IsUnmasked)
4827     Ops.push_back(PassThru);
4828   Ops.push_back(BasePtr);
4829   Ops.push_back(Index);
4830   if (!IsUnmasked)
4831     Ops.push_back(Mask);
4832   Ops.push_back(VL);
4833 
4834   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4835   SDValue Result =
4836       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
4837   Chain = Result.getValue(1);
4838 
4839   if (VT.isFixedLengthVector())
4840     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4841 
4842   return DAG.getMergeValues({Result, Chain}, DL);
4843 }
4844 
4845 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
4846 // matched to a RVV indexed store. The RVV indexed store instructions only
4847 // support the "unsigned unscaled" addressing mode; indices are implicitly
4848 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
4849 // signed or scaled indexing is extended to the XLEN value type and scaled
4850 // accordingly.
4851 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
4852                                                 SelectionDAG &DAG) const {
4853   SDLoc DL(Op);
4854   const auto *MemSD = cast<MemSDNode>(Op.getNode());
4855   EVT MemVT = MemSD->getMemoryVT();
4856   MachineMemOperand *MMO = MemSD->getMemOperand();
4857   SDValue Chain = MemSD->getChain();
4858   SDValue BasePtr = MemSD->getBasePtr();
4859 
4860   bool IsTruncatingStore = false;
4861   SDValue Index, Mask, Val, VL;
4862 
4863   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
4864     Index = VPSN->getIndex();
4865     Mask = VPSN->getMask();
4866     Val = VPSN->getValue();
4867     VL = VPSN->getVectorLength();
4868     // VP doesn't support truncating stores.
4869     IsTruncatingStore = false;
4870   } else {
4871     // Else it must be a MSCATTER.
4872     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4873     Index = MSN->getIndex();
4874     Mask = MSN->getMask();
4875     Val = MSN->getValue();
4876     IsTruncatingStore = MSN->isTruncatingStore();
4877   }
4878 
4879   MVT VT = Val.getSimpleValueType();
4880   MVT IndexVT = Index.getSimpleValueType();
4881   MVT XLenVT = Subtarget.getXLenVT();
4882 
4883   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4884          "Unexpected VTs!");
4885   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
4886   // Targets have to explicitly opt-in for extending vector loads and
4887   // truncating vector stores.
4888   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
4889   (void)IsTruncatingStore;
4890 
4891   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4892   // the selection of the masked intrinsics doesn't do this for us.
4893   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4894 
4895   MVT ContainerVT = VT;
4896   if (VT.isFixedLengthVector()) {
4897     // We need to use the larger of the value and index type to determine the
4898     // scalable type to use so we don't increase LMUL for any operand/result.
4899     if (VT.bitsGE(IndexVT)) {
4900       ContainerVT = getContainerForFixedLengthVector(VT);
4901       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4902                                  ContainerVT.getVectorElementCount());
4903     } else {
4904       IndexVT = getContainerForFixedLengthVector(IndexVT);
4905       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4906                                      IndexVT.getVectorElementCount());
4907     }
4908 
4909     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4910     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4911 
4912     if (!IsUnmasked) {
4913       MVT MaskVT =
4914           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4915       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4916     }
4917   }
4918 
4919   if (!VL)
4920     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4921 
4922   unsigned IntID =
4923       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4924   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
4925   Ops.push_back(Val);
4926   Ops.push_back(BasePtr);
4927   Ops.push_back(Index);
4928   if (!IsUnmasked)
4929     Ops.push_back(Mask);
4930   Ops.push_back(VL);
4931 
4932   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
4933                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
4934 }
4935 
4936 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4937                                                SelectionDAG &DAG) const {
4938   const MVT XLenVT = Subtarget.getXLenVT();
4939   SDLoc DL(Op);
4940   SDValue Chain = Op->getOperand(0);
4941   SDValue SysRegNo = DAG.getConstant(
4942       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4943   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4944   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4945 
4946   // Encoding used for rounding mode in RISCV differs from that used in
4947   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4948   // table, which consists of a sequence of 4-bit fields, each representing
4949   // corresponding FLT_ROUNDS mode.
4950   static const int Table =
4951       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4952       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4953       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4954       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4955       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4956 
4957   SDValue Shift =
4958       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4959   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4960                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4961   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4962                                DAG.getConstant(7, DL, XLenVT));
4963 
4964   return DAG.getMergeValues({Masked, Chain}, DL);
4965 }
4966 
4967 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4968                                                SelectionDAG &DAG) const {
4969   const MVT XLenVT = Subtarget.getXLenVT();
4970   SDLoc DL(Op);
4971   SDValue Chain = Op->getOperand(0);
4972   SDValue RMValue = Op->getOperand(1);
4973   SDValue SysRegNo = DAG.getConstant(
4974       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4975 
4976   // Encoding used for rounding mode in RISCV differs from that used in
4977   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4978   // a table, which consists of a sequence of 4-bit fields, each representing
4979   // corresponding RISCV mode.
4980   static const unsigned Table =
4981       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4982       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4983       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4984       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4985       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4986 
4987   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4988                               DAG.getConstant(2, DL, XLenVT));
4989   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4990                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4991   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4992                         DAG.getConstant(0x7, DL, XLenVT));
4993   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4994                      RMValue);
4995 }
4996 
4997 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4998 // form of the given Opcode.
4999 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
5000   switch (Opcode) {
5001   default:
5002     llvm_unreachable("Unexpected opcode");
5003   case ISD::SHL:
5004     return RISCVISD::SLLW;
5005   case ISD::SRA:
5006     return RISCVISD::SRAW;
5007   case ISD::SRL:
5008     return RISCVISD::SRLW;
5009   case ISD::SDIV:
5010     return RISCVISD::DIVW;
5011   case ISD::UDIV:
5012     return RISCVISD::DIVUW;
5013   case ISD::UREM:
5014     return RISCVISD::REMUW;
5015   case ISD::ROTL:
5016     return RISCVISD::ROLW;
5017   case ISD::ROTR:
5018     return RISCVISD::RORW;
5019   case RISCVISD::GREV:
5020     return RISCVISD::GREVW;
5021   case RISCVISD::GORC:
5022     return RISCVISD::GORCW;
5023   }
5024 }
5025 
5026 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
5027 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
5028 // otherwise be promoted to i64, making it difficult to select the
5029 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
5030 // type i8/i16/i32 is lost.
5031 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
5032                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
5033   SDLoc DL(N);
5034   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5035   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
5036   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
5037   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5038   // ReplaceNodeResults requires we maintain the same type for the return value.
5039   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5040 }
5041 
5042 // Converts the given 32-bit operation to a i64 operation with signed extension
5043 // semantic to reduce the signed extension instructions.
5044 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
5045   SDLoc DL(N);
5046   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5047   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5048   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
5049   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5050                                DAG.getValueType(MVT::i32));
5051   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
5052 }
5053 
5054 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
5055                                              SmallVectorImpl<SDValue> &Results,
5056                                              SelectionDAG &DAG) const {
5057   SDLoc DL(N);
5058   switch (N->getOpcode()) {
5059   default:
5060     llvm_unreachable("Don't know how to custom type legalize this operation!");
5061   case ISD::STRICT_FP_TO_SINT:
5062   case ISD::STRICT_FP_TO_UINT:
5063   case ISD::FP_TO_SINT:
5064   case ISD::FP_TO_UINT: {
5065     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5066            "Unexpected custom legalisation");
5067     bool IsStrict = N->isStrictFPOpcode();
5068     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
5069                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
5070     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
5071     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
5072         TargetLowering::TypeSoftenFloat) {
5073       // FIXME: Support strict FP.
5074       if (IsStrict)
5075         return;
5076       if (!isTypeLegal(Op0.getValueType()))
5077         return;
5078       unsigned Opc =
5079           IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
5080       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
5081       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5082       return;
5083     }
5084     // If the FP type needs to be softened, emit a library call using the 'si'
5085     // version. If we left it to default legalization we'd end up with 'di'. If
5086     // the FP type doesn't need to be softened just let generic type
5087     // legalization promote the result type.
5088     RTLIB::Libcall LC;
5089     if (IsSigned)
5090       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
5091     else
5092       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
5093     MakeLibCallOptions CallOptions;
5094     EVT OpVT = Op0.getValueType();
5095     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5096     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5097     SDValue Result;
5098     std::tie(Result, Chain) =
5099         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5100     Results.push_back(Result);
5101     if (IsStrict)
5102       Results.push_back(Chain);
5103     break;
5104   }
5105   case ISD::READCYCLECOUNTER: {
5106     assert(!Subtarget.is64Bit() &&
5107            "READCYCLECOUNTER only has custom type legalization on riscv32");
5108 
5109     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5110     SDValue RCW =
5111         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5112 
5113     Results.push_back(
5114         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
5115     Results.push_back(RCW.getValue(2));
5116     break;
5117   }
5118   case ISD::MUL: {
5119     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
5120     unsigned XLen = Subtarget.getXLen();
5121     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
5122     if (Size > XLen) {
5123       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
5124       SDValue LHS = N->getOperand(0);
5125       SDValue RHS = N->getOperand(1);
5126       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
5127 
5128       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
5129       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
5130       // We need exactly one side to be unsigned.
5131       if (LHSIsU == RHSIsU)
5132         return;
5133 
5134       auto MakeMULPair = [&](SDValue S, SDValue U) {
5135         MVT XLenVT = Subtarget.getXLenVT();
5136         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
5137         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
5138         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
5139         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
5140         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
5141       };
5142 
5143       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
5144       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
5145 
5146       // The other operand should be signed, but still prefer MULH when
5147       // possible.
5148       if (RHSIsU && LHSIsS && !RHSIsS)
5149         Results.push_back(MakeMULPair(LHS, RHS));
5150       else if (LHSIsU && RHSIsS && !LHSIsS)
5151         Results.push_back(MakeMULPair(RHS, LHS));
5152 
5153       return;
5154     }
5155     LLVM_FALLTHROUGH;
5156   }
5157   case ISD::ADD:
5158   case ISD::SUB:
5159     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5160            "Unexpected custom legalisation");
5161     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
5162     break;
5163   case ISD::SHL:
5164   case ISD::SRA:
5165   case ISD::SRL:
5166     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5167            "Unexpected custom legalisation");
5168     if (N->getOperand(1).getOpcode() != ISD::Constant) {
5169       Results.push_back(customLegalizeToWOp(N, DAG));
5170       break;
5171     }
5172 
5173     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
5174     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
5175     // shift amount.
5176     if (N->getOpcode() == ISD::SHL) {
5177       SDLoc DL(N);
5178       SDValue NewOp0 =
5179           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5180       SDValue NewOp1 =
5181           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
5182       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
5183       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5184                                    DAG.getValueType(MVT::i32));
5185       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5186     }
5187 
5188     break;
5189   case ISD::ROTL:
5190   case ISD::ROTR:
5191     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5192            "Unexpected custom legalisation");
5193     Results.push_back(customLegalizeToWOp(N, DAG));
5194     break;
5195   case ISD::CTTZ:
5196   case ISD::CTTZ_ZERO_UNDEF:
5197   case ISD::CTLZ:
5198   case ISD::CTLZ_ZERO_UNDEF: {
5199     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5200            "Unexpected custom legalisation");
5201 
5202     SDValue NewOp0 =
5203         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5204     bool IsCTZ =
5205         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
5206     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
5207     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
5208     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5209     return;
5210   }
5211   case ISD::SDIV:
5212   case ISD::UDIV:
5213   case ISD::UREM: {
5214     MVT VT = N->getSimpleValueType(0);
5215     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
5216            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
5217            "Unexpected custom legalisation");
5218     // Don't promote division/remainder by constant since we should expand those
5219     // to multiply by magic constant.
5220     // FIXME: What if the expansion is disabled for minsize.
5221     if (N->getOperand(1).getOpcode() == ISD::Constant)
5222       return;
5223 
5224     // If the input is i32, use ANY_EXTEND since the W instructions don't read
5225     // the upper 32 bits. For other types we need to sign or zero extend
5226     // based on the opcode.
5227     unsigned ExtOpc = ISD::ANY_EXTEND;
5228     if (VT != MVT::i32)
5229       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5230                                            : ISD::ZERO_EXTEND;
5231 
5232     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5233     break;
5234   }
5235   case ISD::UADDO:
5236   case ISD::USUBO: {
5237     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5238            "Unexpected custom legalisation");
5239     bool IsAdd = N->getOpcode() == ISD::UADDO;
5240     // Create an ADDW or SUBW.
5241     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5242     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5243     SDValue Res =
5244         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5245     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5246                       DAG.getValueType(MVT::i32));
5247 
5248     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5249     // Since the inputs are sign extended from i32, this is equivalent to
5250     // comparing the lower 32 bits.
5251     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5252     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5253                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5254 
5255     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5256     Results.push_back(Overflow);
5257     return;
5258   }
5259   case ISD::UADDSAT:
5260   case ISD::USUBSAT: {
5261     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5262            "Unexpected custom legalisation");
5263     if (Subtarget.hasStdExtZbb()) {
5264       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5265       // sign extend allows overflow of the lower 32 bits to be detected on
5266       // the promoted size.
5267       SDValue LHS =
5268           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5269       SDValue RHS =
5270           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5271       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5272       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5273       return;
5274     }
5275 
5276     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5277     // promotion for UADDO/USUBO.
5278     Results.push_back(expandAddSubSat(N, DAG));
5279     return;
5280   }
5281   case ISD::BITCAST: {
5282     EVT VT = N->getValueType(0);
5283     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5284     SDValue Op0 = N->getOperand(0);
5285     EVT Op0VT = Op0.getValueType();
5286     MVT XLenVT = Subtarget.getXLenVT();
5287     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5288       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5289       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5290     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5291                Subtarget.hasStdExtF()) {
5292       SDValue FPConv =
5293           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5294       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5295     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5296                isTypeLegal(Op0VT)) {
5297       // Custom-legalize bitcasts from fixed-length vector types to illegal
5298       // scalar types in order to improve codegen. Bitcast the vector to a
5299       // one-element vector type whose element type is the same as the result
5300       // type, and extract the first element.
5301       LLVMContext &Context = *DAG.getContext();
5302       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
5303       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5304                                     DAG.getConstant(0, DL, XLenVT)));
5305     }
5306     break;
5307   }
5308   case RISCVISD::GREV:
5309   case RISCVISD::GORC: {
5310     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5311            "Unexpected custom legalisation");
5312     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5313     // This is similar to customLegalizeToWOp, except that we pass the second
5314     // operand (a TargetConstant) straight through: it is already of type
5315     // XLenVT.
5316     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5317     SDValue NewOp0 =
5318         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5319     SDValue NewOp1 =
5320         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5321     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5322     // ReplaceNodeResults requires we maintain the same type for the return
5323     // value.
5324     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5325     break;
5326   }
5327   case RISCVISD::SHFL: {
5328     // There is no SHFLIW instruction, but we can just promote the operation.
5329     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5330            "Unexpected custom legalisation");
5331     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5332     SDValue NewOp0 =
5333         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5334     SDValue NewOp1 =
5335         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5336     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5337     // ReplaceNodeResults requires we maintain the same type for the return
5338     // value.
5339     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5340     break;
5341   }
5342   case ISD::BSWAP:
5343   case ISD::BITREVERSE: {
5344     MVT VT = N->getSimpleValueType(0);
5345     MVT XLenVT = Subtarget.getXLenVT();
5346     assert((VT == MVT::i8 || VT == MVT::i16 ||
5347             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5348            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5349     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5350     unsigned Imm = VT.getSizeInBits() - 1;
5351     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5352     if (N->getOpcode() == ISD::BSWAP)
5353       Imm &= ~0x7U;
5354     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5355     SDValue GREVI =
5356         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5357     // ReplaceNodeResults requires we maintain the same type for the return
5358     // value.
5359     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5360     break;
5361   }
5362   case ISD::FSHL:
5363   case ISD::FSHR: {
5364     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5365            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5366     SDValue NewOp0 =
5367         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5368     SDValue NewOp1 =
5369         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5370     SDValue NewOp2 =
5371         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5372     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5373     // Mask the shift amount to 5 bits.
5374     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5375                          DAG.getConstant(0x1f, DL, MVT::i64));
5376     unsigned Opc =
5377         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5378     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5379     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5380     break;
5381   }
5382   case ISD::EXTRACT_VECTOR_ELT: {
5383     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5384     // type is illegal (currently only vXi64 RV32).
5385     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5386     // transferred to the destination register. We issue two of these from the
5387     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5388     // first element.
5389     SDValue Vec = N->getOperand(0);
5390     SDValue Idx = N->getOperand(1);
5391 
5392     // The vector type hasn't been legalized yet so we can't issue target
5393     // specific nodes if it needs legalization.
5394     // FIXME: We would manually legalize if it's important.
5395     if (!isTypeLegal(Vec.getValueType()))
5396       return;
5397 
5398     MVT VecVT = Vec.getSimpleValueType();
5399 
5400     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5401            VecVT.getVectorElementType() == MVT::i64 &&
5402            "Unexpected EXTRACT_VECTOR_ELT legalization");
5403 
5404     // If this is a fixed vector, we need to convert it to a scalable vector.
5405     MVT ContainerVT = VecVT;
5406     if (VecVT.isFixedLengthVector()) {
5407       ContainerVT = getContainerForFixedLengthVector(VecVT);
5408       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5409     }
5410 
5411     MVT XLenVT = Subtarget.getXLenVT();
5412 
5413     // Use a VL of 1 to avoid processing more elements than we need.
5414     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5415     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5416     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5417 
5418     // Unless the index is known to be 0, we must slide the vector down to get
5419     // the desired element into index 0.
5420     if (!isNullConstant(Idx)) {
5421       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5422                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5423     }
5424 
5425     // Extract the lower XLEN bits of the correct vector element.
5426     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5427 
5428     // To extract the upper XLEN bits of the vector element, shift the first
5429     // element right by 32 bits and re-extract the lower XLEN bits.
5430     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5431                                      DAG.getConstant(32, DL, XLenVT), VL);
5432     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5433                                  ThirtyTwoV, Mask, VL);
5434 
5435     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5436 
5437     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5438     break;
5439   }
5440   case ISD::INTRINSIC_WO_CHAIN: {
5441     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5442     switch (IntNo) {
5443     default:
5444       llvm_unreachable(
5445           "Don't know how to custom type legalize this intrinsic!");
5446     case Intrinsic::riscv_orc_b: {
5447       // Lower to the GORCI encoding for orc.b with the operand extended.
5448       SDValue NewOp =
5449           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5450       // If Zbp is enabled, use GORCIW which will sign extend the result.
5451       unsigned Opc =
5452           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5453       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5454                                 DAG.getConstant(7, DL, MVT::i64));
5455       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5456       return;
5457     }
5458     case Intrinsic::riscv_grev:
5459     case Intrinsic::riscv_gorc: {
5460       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5461              "Unexpected custom legalisation");
5462       SDValue NewOp1 =
5463           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5464       SDValue NewOp2 =
5465           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5466       unsigned Opc =
5467           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5468       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5469       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5470       break;
5471     }
5472     case Intrinsic::riscv_shfl:
5473     case Intrinsic::riscv_unshfl: {
5474       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5475              "Unexpected custom legalisation");
5476       SDValue NewOp1 =
5477           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5478       SDValue NewOp2 =
5479           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5480       unsigned Opc =
5481           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5482       if (isa<ConstantSDNode>(N->getOperand(2))) {
5483         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5484                              DAG.getConstant(0xf, DL, MVT::i64));
5485         Opc =
5486             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5487       }
5488       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5489       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5490       break;
5491     }
5492     case Intrinsic::riscv_bcompress:
5493     case Intrinsic::riscv_bdecompress: {
5494       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5495              "Unexpected custom legalisation");
5496       SDValue NewOp1 =
5497           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5498       SDValue NewOp2 =
5499           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5500       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5501                          ? RISCVISD::BCOMPRESSW
5502                          : RISCVISD::BDECOMPRESSW;
5503       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5504       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5505       break;
5506     }
5507     case Intrinsic::riscv_vmv_x_s: {
5508       EVT VT = N->getValueType(0);
5509       MVT XLenVT = Subtarget.getXLenVT();
5510       if (VT.bitsLT(XLenVT)) {
5511         // Simple case just extract using vmv.x.s and truncate.
5512         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5513                                       Subtarget.getXLenVT(), N->getOperand(1));
5514         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5515         return;
5516       }
5517 
5518       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5519              "Unexpected custom legalization");
5520 
5521       // We need to do the move in two steps.
5522       SDValue Vec = N->getOperand(1);
5523       MVT VecVT = Vec.getSimpleValueType();
5524 
5525       // First extract the lower XLEN bits of the element.
5526       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5527 
5528       // To extract the upper XLEN bits of the vector element, shift the first
5529       // element right by 32 bits and re-extract the lower XLEN bits.
5530       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5531       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5532       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5533       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5534                                        DAG.getConstant(32, DL, XLenVT), VL);
5535       SDValue LShr32 =
5536           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5537       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5538 
5539       Results.push_back(
5540           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5541       break;
5542     }
5543     }
5544     break;
5545   }
5546   case ISD::VECREDUCE_ADD:
5547   case ISD::VECREDUCE_AND:
5548   case ISD::VECREDUCE_OR:
5549   case ISD::VECREDUCE_XOR:
5550   case ISD::VECREDUCE_SMAX:
5551   case ISD::VECREDUCE_UMAX:
5552   case ISD::VECREDUCE_SMIN:
5553   case ISD::VECREDUCE_UMIN:
5554     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5555       Results.push_back(V);
5556     break;
5557   case ISD::FLT_ROUNDS_: {
5558     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5559     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5560     Results.push_back(Res.getValue(0));
5561     Results.push_back(Res.getValue(1));
5562     break;
5563   }
5564   }
5565 }
5566 
5567 // A structure to hold one of the bit-manipulation patterns below. Together, a
5568 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5569 //   (or (and (shl x, 1), 0xAAAAAAAA),
5570 //       (and (srl x, 1), 0x55555555))
5571 struct RISCVBitmanipPat {
5572   SDValue Op;
5573   unsigned ShAmt;
5574   bool IsSHL;
5575 
5576   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5577     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5578   }
5579 };
5580 
5581 // Matches patterns of the form
5582 //   (and (shl x, C2), (C1 << C2))
5583 //   (and (srl x, C2), C1)
5584 //   (shl (and x, C1), C2)
5585 //   (srl (and x, (C1 << C2)), C2)
5586 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5587 // The expected masks for each shift amount are specified in BitmanipMasks where
5588 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5589 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5590 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5591 // XLen is 64.
5592 static Optional<RISCVBitmanipPat>
5593 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5594   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5595          "Unexpected number of masks");
5596   Optional<uint64_t> Mask;
5597   // Optionally consume a mask around the shift operation.
5598   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5599     Mask = Op.getConstantOperandVal(1);
5600     Op = Op.getOperand(0);
5601   }
5602   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5603     return None;
5604   bool IsSHL = Op.getOpcode() == ISD::SHL;
5605 
5606   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5607     return None;
5608   uint64_t ShAmt = Op.getConstantOperandVal(1);
5609 
5610   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5611   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5612     return None;
5613   // If we don't have enough masks for 64 bit, then we must be trying to
5614   // match SHFL so we're only allowed to shift 1/4 of the width.
5615   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5616     return None;
5617 
5618   SDValue Src = Op.getOperand(0);
5619 
5620   // The expected mask is shifted left when the AND is found around SHL
5621   // patterns.
5622   //   ((x >> 1) & 0x55555555)
5623   //   ((x << 1) & 0xAAAAAAAA)
5624   bool SHLExpMask = IsSHL;
5625 
5626   if (!Mask) {
5627     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5628     // the mask is all ones: consume that now.
5629     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5630       Mask = Src.getConstantOperandVal(1);
5631       Src = Src.getOperand(0);
5632       // The expected mask is now in fact shifted left for SRL, so reverse the
5633       // decision.
5634       //   ((x & 0xAAAAAAAA) >> 1)
5635       //   ((x & 0x55555555) << 1)
5636       SHLExpMask = !SHLExpMask;
5637     } else {
5638       // Use a default shifted mask of all-ones if there's no AND, truncated
5639       // down to the expected width. This simplifies the logic later on.
5640       Mask = maskTrailingOnes<uint64_t>(Width);
5641       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5642     }
5643   }
5644 
5645   unsigned MaskIdx = Log2_32(ShAmt);
5646   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5647 
5648   if (SHLExpMask)
5649     ExpMask <<= ShAmt;
5650 
5651   if (Mask != ExpMask)
5652     return None;
5653 
5654   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5655 }
5656 
5657 // Matches any of the following bit-manipulation patterns:
5658 //   (and (shl x, 1), (0x55555555 << 1))
5659 //   (and (srl x, 1), 0x55555555)
5660 //   (shl (and x, 0x55555555), 1)
5661 //   (srl (and x, (0x55555555 << 1)), 1)
5662 // where the shift amount and mask may vary thus:
5663 //   [1]  = 0x55555555 / 0xAAAAAAAA
5664 //   [2]  = 0x33333333 / 0xCCCCCCCC
5665 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5666 //   [8]  = 0x00FF00FF / 0xFF00FF00
5667 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5668 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5669 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5670   // These are the unshifted masks which we use to match bit-manipulation
5671   // patterns. They may be shifted left in certain circumstances.
5672   static const uint64_t BitmanipMasks[] = {
5673       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5674       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5675 
5676   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5677 }
5678 
5679 // Match the following pattern as a GREVI(W) operation
5680 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5681 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5682                                const RISCVSubtarget &Subtarget) {
5683   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5684   EVT VT = Op.getValueType();
5685 
5686   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5687     auto LHS = matchGREVIPat(Op.getOperand(0));
5688     auto RHS = matchGREVIPat(Op.getOperand(1));
5689     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5690       SDLoc DL(Op);
5691       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5692                          DAG.getConstant(LHS->ShAmt, DL, VT));
5693     }
5694   }
5695   return SDValue();
5696 }
5697 
5698 // Matches any the following pattern as a GORCI(W) operation
5699 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5700 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5701 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5702 // Note that with the variant of 3.,
5703 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5704 // the inner pattern will first be matched as GREVI and then the outer
5705 // pattern will be matched to GORC via the first rule above.
5706 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5707 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5708                                const RISCVSubtarget &Subtarget) {
5709   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5710   EVT VT = Op.getValueType();
5711 
5712   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5713     SDLoc DL(Op);
5714     SDValue Op0 = Op.getOperand(0);
5715     SDValue Op1 = Op.getOperand(1);
5716 
5717     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5718       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5719           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5720           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5721         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5722       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5723       if ((Reverse.getOpcode() == ISD::ROTL ||
5724            Reverse.getOpcode() == ISD::ROTR) &&
5725           Reverse.getOperand(0) == X &&
5726           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5727         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5728         if (RotAmt == (VT.getSizeInBits() / 2))
5729           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5730                              DAG.getConstant(RotAmt, DL, VT));
5731       }
5732       return SDValue();
5733     };
5734 
5735     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5736     if (SDValue V = MatchOROfReverse(Op0, Op1))
5737       return V;
5738     if (SDValue V = MatchOROfReverse(Op1, Op0))
5739       return V;
5740 
5741     // OR is commutable so canonicalize its OR operand to the left
5742     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5743       std::swap(Op0, Op1);
5744     if (Op0.getOpcode() != ISD::OR)
5745       return SDValue();
5746     SDValue OrOp0 = Op0.getOperand(0);
5747     SDValue OrOp1 = Op0.getOperand(1);
5748     auto LHS = matchGREVIPat(OrOp0);
5749     // OR is commutable so swap the operands and try again: x might have been
5750     // on the left
5751     if (!LHS) {
5752       std::swap(OrOp0, OrOp1);
5753       LHS = matchGREVIPat(OrOp0);
5754     }
5755     auto RHS = matchGREVIPat(Op1);
5756     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5757       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5758                          DAG.getConstant(LHS->ShAmt, DL, VT));
5759     }
5760   }
5761   return SDValue();
5762 }
5763 
5764 // Matches any of the following bit-manipulation patterns:
5765 //   (and (shl x, 1), (0x22222222 << 1))
5766 //   (and (srl x, 1), 0x22222222)
5767 //   (shl (and x, 0x22222222), 1)
5768 //   (srl (and x, (0x22222222 << 1)), 1)
5769 // where the shift amount and mask may vary thus:
5770 //   [1]  = 0x22222222 / 0x44444444
5771 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5772 //   [4]  = 0x00F000F0 / 0x0F000F00
5773 //   [8]  = 0x0000FF00 / 0x00FF0000
5774 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5775 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5776   // These are the unshifted masks which we use to match bit-manipulation
5777   // patterns. They may be shifted left in certain circumstances.
5778   static const uint64_t BitmanipMasks[] = {
5779       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5780       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5781 
5782   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5783 }
5784 
5785 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5786 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5787                                const RISCVSubtarget &Subtarget) {
5788   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5789   EVT VT = Op.getValueType();
5790 
5791   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5792     return SDValue();
5793 
5794   SDValue Op0 = Op.getOperand(0);
5795   SDValue Op1 = Op.getOperand(1);
5796 
5797   // Or is commutable so canonicalize the second OR to the LHS.
5798   if (Op0.getOpcode() != ISD::OR)
5799     std::swap(Op0, Op1);
5800   if (Op0.getOpcode() != ISD::OR)
5801     return SDValue();
5802 
5803   // We found an inner OR, so our operands are the operands of the inner OR
5804   // and the other operand of the outer OR.
5805   SDValue A = Op0.getOperand(0);
5806   SDValue B = Op0.getOperand(1);
5807   SDValue C = Op1;
5808 
5809   auto Match1 = matchSHFLPat(A);
5810   auto Match2 = matchSHFLPat(B);
5811 
5812   // If neither matched, we failed.
5813   if (!Match1 && !Match2)
5814     return SDValue();
5815 
5816   // We had at least one match. if one failed, try the remaining C operand.
5817   if (!Match1) {
5818     std::swap(A, C);
5819     Match1 = matchSHFLPat(A);
5820     if (!Match1)
5821       return SDValue();
5822   } else if (!Match2) {
5823     std::swap(B, C);
5824     Match2 = matchSHFLPat(B);
5825     if (!Match2)
5826       return SDValue();
5827   }
5828   assert(Match1 && Match2);
5829 
5830   // Make sure our matches pair up.
5831   if (!Match1->formsPairWith(*Match2))
5832     return SDValue();
5833 
5834   // All the remains is to make sure C is an AND with the same input, that masks
5835   // out the bits that are being shuffled.
5836   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5837       C.getOperand(0) != Match1->Op)
5838     return SDValue();
5839 
5840   uint64_t Mask = C.getConstantOperandVal(1);
5841 
5842   static const uint64_t BitmanipMasks[] = {
5843       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5844       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5845   };
5846 
5847   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5848   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5849   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5850 
5851   if (Mask != ExpMask)
5852     return SDValue();
5853 
5854   SDLoc DL(Op);
5855   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5856                      DAG.getConstant(Match1->ShAmt, DL, VT));
5857 }
5858 
5859 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5860 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5861 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5862 // not undo itself, but they are redundant.
5863 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5864   SDValue Src = N->getOperand(0);
5865 
5866   if (Src.getOpcode() != N->getOpcode())
5867     return SDValue();
5868 
5869   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5870       !isa<ConstantSDNode>(Src.getOperand(1)))
5871     return SDValue();
5872 
5873   unsigned ShAmt1 = N->getConstantOperandVal(1);
5874   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5875   Src = Src.getOperand(0);
5876 
5877   unsigned CombinedShAmt;
5878   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5879     CombinedShAmt = ShAmt1 | ShAmt2;
5880   else
5881     CombinedShAmt = ShAmt1 ^ ShAmt2;
5882 
5883   if (CombinedShAmt == 0)
5884     return Src;
5885 
5886   SDLoc DL(N);
5887   return DAG.getNode(
5888       N->getOpcode(), DL, N->getValueType(0), Src,
5889       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5890 }
5891 
5892 // Combine a constant select operand into its use:
5893 //
5894 // (and (select cond, -1, c), x)
5895 //   -> (select cond, x, (and x, c))  [AllOnes=1]
5896 // (or  (select cond, 0, c), x)
5897 //   -> (select cond, x, (or x, c))  [AllOnes=0]
5898 // (xor (select cond, 0, c), x)
5899 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
5900 // (add (select cond, 0, c), x)
5901 //   -> (select cond, x, (add x, c))  [AllOnes=0]
5902 // (sub x, (select cond, 0, c))
5903 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
5904 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5905                                    SelectionDAG &DAG, bool AllOnes) {
5906   EVT VT = N->getValueType(0);
5907 
5908   // Skip vectors.
5909   if (VT.isVector())
5910     return SDValue();
5911 
5912   if ((Slct.getOpcode() != ISD::SELECT &&
5913        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
5914       !Slct.hasOneUse())
5915     return SDValue();
5916 
5917   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5918     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5919   };
5920 
5921   bool SwapSelectOps;
5922   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
5923   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
5924   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
5925   SDValue NonConstantVal;
5926   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5927     SwapSelectOps = false;
5928     NonConstantVal = FalseVal;
5929   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5930     SwapSelectOps = true;
5931     NonConstantVal = TrueVal;
5932   } else
5933     return SDValue();
5934 
5935   // Slct is now know to be the desired identity constant when CC is true.
5936   TrueVal = OtherOp;
5937   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5938   // Unless SwapSelectOps says the condition should be false.
5939   if (SwapSelectOps)
5940     std::swap(TrueVal, FalseVal);
5941 
5942   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
5943     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5944                        {Slct.getOperand(0), Slct.getOperand(1),
5945                         Slct.getOperand(2), TrueVal, FalseVal});
5946 
5947   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
5948                      {Slct.getOperand(0), TrueVal, FalseVal});
5949 }
5950 
5951 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5952 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5953                                               bool AllOnes) {
5954   SDValue N0 = N->getOperand(0);
5955   SDValue N1 = N->getOperand(1);
5956   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
5957     return Result;
5958   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
5959     return Result;
5960   return SDValue();
5961 }
5962 
5963 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG) {
5964   // fold (add (select lhs, rhs, cc, 0, y), x) ->
5965   //      (select lhs, rhs, cc, x, (add x, y))
5966   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
5967 }
5968 
5969 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
5970   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
5971   //      (select lhs, rhs, cc, x, (sub x, y))
5972   SDValue N0 = N->getOperand(0);
5973   SDValue N1 = N->getOperand(1);
5974   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
5975 }
5976 
5977 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
5978   // fold (and (select lhs, rhs, cc, -1, y), x) ->
5979   //      (select lhs, rhs, cc, x, (and x, y))
5980   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
5981 }
5982 
5983 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
5984                                 const RISCVSubtarget &Subtarget) {
5985   if (Subtarget.hasStdExtZbp()) {
5986     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5987       return GREV;
5988     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5989       return GORC;
5990     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5991       return SHFL;
5992   }
5993 
5994   // fold (or (select cond, 0, y), x) ->
5995   //      (select cond, x, (or x, y))
5996   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
5997 }
5998 
5999 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
6000   // fold (xor (select cond, 0, y), x) ->
6001   //      (select cond, x, (xor x, y))
6002   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6003 }
6004 
6005 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
6006 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
6007 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
6008 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
6009 // ADDW/SUBW/MULW.
6010 static SDValue performANY_EXTENDCombine(SDNode *N,
6011                                         TargetLowering::DAGCombinerInfo &DCI,
6012                                         const RISCVSubtarget &Subtarget) {
6013   if (!Subtarget.is64Bit())
6014     return SDValue();
6015 
6016   SelectionDAG &DAG = DCI.DAG;
6017 
6018   SDValue Src = N->getOperand(0);
6019   EVT VT = N->getValueType(0);
6020   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
6021     return SDValue();
6022 
6023   // The opcode must be one that can implicitly sign_extend.
6024   // FIXME: Additional opcodes.
6025   switch (Src.getOpcode()) {
6026   default:
6027     return SDValue();
6028   case ISD::MUL:
6029     if (!Subtarget.hasStdExtM())
6030       return SDValue();
6031     LLVM_FALLTHROUGH;
6032   case ISD::ADD:
6033   case ISD::SUB:
6034     break;
6035   }
6036 
6037   // Only handle cases where the result is used by a CopyToReg. That likely
6038   // means the value is a liveout of the basic block. This helps prevent
6039   // infinite combine loops like PR51206.
6040   if (none_of(N->uses(),
6041               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
6042     return SDValue();
6043 
6044   SmallVector<SDNode *, 4> SetCCs;
6045   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
6046                             UE = Src.getNode()->use_end();
6047        UI != UE; ++UI) {
6048     SDNode *User = *UI;
6049     if (User == N)
6050       continue;
6051     if (UI.getUse().getResNo() != Src.getResNo())
6052       continue;
6053     // All i32 setccs are legalized by sign extending operands.
6054     if (User->getOpcode() == ISD::SETCC) {
6055       SetCCs.push_back(User);
6056       continue;
6057     }
6058     // We don't know if we can extend this user.
6059     break;
6060   }
6061 
6062   // If we don't have any SetCCs, this isn't worthwhile.
6063   if (SetCCs.empty())
6064     return SDValue();
6065 
6066   SDLoc DL(N);
6067   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
6068   DCI.CombineTo(N, SExt);
6069 
6070   // Promote all the setccs.
6071   for (SDNode *SetCC : SetCCs) {
6072     SmallVector<SDValue, 4> Ops;
6073 
6074     for (unsigned j = 0; j != 2; ++j) {
6075       SDValue SOp = SetCC->getOperand(j);
6076       if (SOp == Src)
6077         Ops.push_back(SExt);
6078       else
6079         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
6080     }
6081 
6082     Ops.push_back(SetCC->getOperand(2));
6083     DCI.CombineTo(SetCC,
6084                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
6085   }
6086   return SDValue(N, 0);
6087 }
6088 
6089 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
6090                                                DAGCombinerInfo &DCI) const {
6091   SelectionDAG &DAG = DCI.DAG;
6092 
6093   // Helper to call SimplifyDemandedBits on an operand of N where only some low
6094   // bits are demanded. N will be added to the Worklist if it was not deleted.
6095   // Caller should return SDValue(N, 0) if this returns true.
6096   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
6097     SDValue Op = N->getOperand(OpNo);
6098     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
6099     if (!SimplifyDemandedBits(Op, Mask, DCI))
6100       return false;
6101 
6102     if (N->getOpcode() != ISD::DELETED_NODE)
6103       DCI.AddToWorklist(N);
6104     return true;
6105   };
6106 
6107   switch (N->getOpcode()) {
6108   default:
6109     break;
6110   case RISCVISD::SplitF64: {
6111     SDValue Op0 = N->getOperand(0);
6112     // If the input to SplitF64 is just BuildPairF64 then the operation is
6113     // redundant. Instead, use BuildPairF64's operands directly.
6114     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
6115       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
6116 
6117     SDLoc DL(N);
6118 
6119     // It's cheaper to materialise two 32-bit integers than to load a double
6120     // from the constant pool and transfer it to integer registers through the
6121     // stack.
6122     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
6123       APInt V = C->getValueAPF().bitcastToAPInt();
6124       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
6125       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
6126       return DCI.CombineTo(N, Lo, Hi);
6127     }
6128 
6129     // This is a target-specific version of a DAGCombine performed in
6130     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6131     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6132     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6133     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6134         !Op0.getNode()->hasOneUse())
6135       break;
6136     SDValue NewSplitF64 =
6137         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
6138                     Op0.getOperand(0));
6139     SDValue Lo = NewSplitF64.getValue(0);
6140     SDValue Hi = NewSplitF64.getValue(1);
6141     APInt SignBit = APInt::getSignMask(32);
6142     if (Op0.getOpcode() == ISD::FNEG) {
6143       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
6144                                   DAG.getConstant(SignBit, DL, MVT::i32));
6145       return DCI.CombineTo(N, Lo, NewHi);
6146     }
6147     assert(Op0.getOpcode() == ISD::FABS);
6148     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
6149                                 DAG.getConstant(~SignBit, DL, MVT::i32));
6150     return DCI.CombineTo(N, Lo, NewHi);
6151   }
6152   case RISCVISD::SLLW:
6153   case RISCVISD::SRAW:
6154   case RISCVISD::SRLW:
6155   case RISCVISD::ROLW:
6156   case RISCVISD::RORW: {
6157     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6158     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6159         SimplifyDemandedLowBitsHelper(1, 5))
6160       return SDValue(N, 0);
6161     break;
6162   }
6163   case RISCVISD::CLZW:
6164   case RISCVISD::CTZW: {
6165     // Only the lower 32 bits of the first operand are read
6166     if (SimplifyDemandedLowBitsHelper(0, 32))
6167       return SDValue(N, 0);
6168     break;
6169   }
6170   case RISCVISD::FSL:
6171   case RISCVISD::FSR: {
6172     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
6173     unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
6174     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6175     if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
6176       return SDValue(N, 0);
6177     break;
6178   }
6179   case RISCVISD::FSLW:
6180   case RISCVISD::FSRW: {
6181     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
6182     // read.
6183     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6184         SimplifyDemandedLowBitsHelper(1, 32) ||
6185         SimplifyDemandedLowBitsHelper(2, 6))
6186       return SDValue(N, 0);
6187     break;
6188   }
6189   case RISCVISD::GREV:
6190   case RISCVISD::GORC: {
6191     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
6192     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6193     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6194     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
6195       return SDValue(N, 0);
6196 
6197     return combineGREVI_GORCI(N, DCI.DAG);
6198   }
6199   case RISCVISD::GREVW:
6200   case RISCVISD::GORCW: {
6201     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6202     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6203         SimplifyDemandedLowBitsHelper(1, 5))
6204       return SDValue(N, 0);
6205 
6206     return combineGREVI_GORCI(N, DCI.DAG);
6207   }
6208   case RISCVISD::SHFL:
6209   case RISCVISD::UNSHFL: {
6210     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
6211     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6212     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6213     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
6214       return SDValue(N, 0);
6215 
6216     break;
6217   }
6218   case RISCVISD::SHFLW:
6219   case RISCVISD::UNSHFLW: {
6220     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
6221     SDValue LHS = N->getOperand(0);
6222     SDValue RHS = N->getOperand(1);
6223     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6224     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
6225     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6226         SimplifyDemandedLowBitsHelper(1, 4))
6227       return SDValue(N, 0);
6228 
6229     break;
6230   }
6231   case RISCVISD::BCOMPRESSW:
6232   case RISCVISD::BDECOMPRESSW: {
6233     // Only the lower 32 bits of LHS and RHS are read.
6234     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6235         SimplifyDemandedLowBitsHelper(1, 32))
6236       return SDValue(N, 0);
6237 
6238     break;
6239   }
6240   case RISCVISD::FMV_X_ANYEXTH:
6241   case RISCVISD::FMV_X_ANYEXTW_RV64: {
6242     SDLoc DL(N);
6243     SDValue Op0 = N->getOperand(0);
6244     MVT VT = N->getSimpleValueType(0);
6245     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
6246     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
6247     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
6248     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
6249          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
6250         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
6251          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
6252       assert(Op0.getOperand(0).getValueType() == VT &&
6253              "Unexpected value type!");
6254       return Op0.getOperand(0);
6255     }
6256 
6257     // This is a target-specific version of a DAGCombine performed in
6258     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6259     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6260     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6261     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6262         !Op0.getNode()->hasOneUse())
6263       break;
6264     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
6265     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
6266     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
6267     if (Op0.getOpcode() == ISD::FNEG)
6268       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
6269                          DAG.getConstant(SignBit, DL, VT));
6270 
6271     assert(Op0.getOpcode() == ISD::FABS);
6272     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
6273                        DAG.getConstant(~SignBit, DL, VT));
6274   }
6275   case ISD::ADD:
6276     return performADDCombine(N, DAG);
6277   case ISD::SUB:
6278     return performSUBCombine(N, DAG);
6279   case ISD::AND:
6280     return performANDCombine(N, DAG);
6281   case ISD::OR:
6282     return performORCombine(N, DAG, Subtarget);
6283   case ISD::XOR:
6284     return performXORCombine(N, DAG);
6285   case ISD::ANY_EXTEND:
6286     return performANY_EXTENDCombine(N, DCI, Subtarget);
6287   case ISD::ZERO_EXTEND:
6288     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
6289     // type legalization. This is safe because fp_to_uint produces poison if
6290     // it overflows.
6291     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() &&
6292         N->getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
6293         isTypeLegal(N->getOperand(0).getOperand(0).getValueType()))
6294       return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
6295                          N->getOperand(0).getOperand(0));
6296     return SDValue();
6297   case RISCVISD::SELECT_CC: {
6298     // Transform
6299     SDValue LHS = N->getOperand(0);
6300     SDValue RHS = N->getOperand(1);
6301     SDValue TrueV = N->getOperand(3);
6302     SDValue FalseV = N->getOperand(4);
6303 
6304     // If the True and False values are the same, we don't need a select_cc.
6305     if (TrueV == FalseV)
6306       return TrueV;
6307 
6308     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
6309     if (!ISD::isIntEqualitySetCC(CCVal))
6310       break;
6311 
6312     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
6313     //      (select_cc X, Y, lt, trueV, falseV)
6314     // Sometimes the setcc is introduced after select_cc has been formed.
6315     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6316         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6317       // If we're looking for eq 0 instead of ne 0, we need to invert the
6318       // condition.
6319       bool Invert = CCVal == ISD::SETEQ;
6320       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6321       if (Invert)
6322         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6323 
6324       SDLoc DL(N);
6325       RHS = LHS.getOperand(1);
6326       LHS = LHS.getOperand(0);
6327       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6328 
6329       SDValue TargetCC = DAG.getCondCode(CCVal);
6330       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
6331                          {LHS, RHS, TargetCC, TrueV, FalseV});
6332     }
6333 
6334     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
6335     //      (select_cc X, Y, eq/ne, trueV, falseV)
6336     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6337       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
6338                          {LHS.getOperand(0), LHS.getOperand(1),
6339                           N->getOperand(2), TrueV, FalseV});
6340     // (select_cc X, 1, setne, trueV, falseV) ->
6341     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
6342     // This can occur when legalizing some floating point comparisons.
6343     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6344     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6345       SDLoc DL(N);
6346       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6347       SDValue TargetCC = DAG.getCondCode(CCVal);
6348       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6349       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
6350                          {LHS, RHS, TargetCC, TrueV, FalseV});
6351     }
6352 
6353     break;
6354   }
6355   case RISCVISD::BR_CC: {
6356     SDValue LHS = N->getOperand(1);
6357     SDValue RHS = N->getOperand(2);
6358     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6359     if (!ISD::isIntEqualitySetCC(CCVal))
6360       break;
6361 
6362     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6363     //      (br_cc X, Y, lt, dest)
6364     // Sometimes the setcc is introduced after br_cc has been formed.
6365     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6366         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6367       // If we're looking for eq 0 instead of ne 0, we need to invert the
6368       // condition.
6369       bool Invert = CCVal == ISD::SETEQ;
6370       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6371       if (Invert)
6372         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6373 
6374       SDLoc DL(N);
6375       RHS = LHS.getOperand(1);
6376       LHS = LHS.getOperand(0);
6377       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6378 
6379       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6380                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6381                          N->getOperand(4));
6382     }
6383 
6384     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6385     //      (br_cc X, Y, eq/ne, trueV, falseV)
6386     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6387       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6388                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6389                          N->getOperand(3), N->getOperand(4));
6390 
6391     // (br_cc X, 1, setne, br_cc) ->
6392     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6393     // This can occur when legalizing some floating point comparisons.
6394     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6395     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6396       SDLoc DL(N);
6397       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6398       SDValue TargetCC = DAG.getCondCode(CCVal);
6399       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6400       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6401                          N->getOperand(0), LHS, RHS, TargetCC,
6402                          N->getOperand(4));
6403     }
6404     break;
6405   }
6406   case ISD::FCOPYSIGN: {
6407     EVT VT = N->getValueType(0);
6408     if (!VT.isVector())
6409       break;
6410     // There is a form of VFSGNJ which injects the negated sign of its second
6411     // operand. Try and bubble any FNEG up after the extend/round to produce
6412     // this optimized pattern. Avoid modifying cases where FP_ROUND and
6413     // TRUNC=1.
6414     SDValue In2 = N->getOperand(1);
6415     // Avoid cases where the extend/round has multiple uses, as duplicating
6416     // those is typically more expensive than removing a fneg.
6417     if (!In2.hasOneUse())
6418       break;
6419     if (In2.getOpcode() != ISD::FP_EXTEND &&
6420         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6421       break;
6422     In2 = In2.getOperand(0);
6423     if (In2.getOpcode() != ISD::FNEG)
6424       break;
6425     SDLoc DL(N);
6426     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
6427     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
6428                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
6429   }
6430   case ISD::MGATHER:
6431   case ISD::MSCATTER:
6432   case ISD::VP_GATHER:
6433   case ISD::VP_SCATTER: {
6434     if (!DCI.isBeforeLegalize())
6435       break;
6436     SDValue Index, ScaleOp;
6437     bool IsIndexScaled = false;
6438     bool IsIndexSigned = false;
6439     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
6440       Index = VPGSN->getIndex();
6441       ScaleOp = VPGSN->getScale();
6442       IsIndexScaled = VPGSN->isIndexScaled();
6443       IsIndexSigned = VPGSN->isIndexSigned();
6444     } else {
6445       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
6446       Index = MGSN->getIndex();
6447       ScaleOp = MGSN->getScale();
6448       IsIndexScaled = MGSN->isIndexScaled();
6449       IsIndexSigned = MGSN->isIndexSigned();
6450     }
6451     EVT IndexVT = Index.getValueType();
6452     MVT XLenVT = Subtarget.getXLenVT();
6453     // RISCV indexed loads only support the "unsigned unscaled" addressing
6454     // mode, so anything else must be manually legalized.
6455     bool NeedsIdxLegalization =
6456         IsIndexScaled ||
6457         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
6458     if (!NeedsIdxLegalization)
6459       break;
6460 
6461     SDLoc DL(N);
6462 
6463     // Any index legalization should first promote to XLenVT, so we don't lose
6464     // bits when scaling. This may create an illegal index type so we let
6465     // LLVM's legalization take care of the splitting.
6466     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
6467     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
6468       IndexVT = IndexVT.changeVectorElementType(XLenVT);
6469       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
6470                           DL, IndexVT, Index);
6471     }
6472 
6473     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
6474     if (IsIndexScaled && Scale != 1) {
6475       // Manually scale the indices by the element size.
6476       // TODO: Sanitize the scale operand here?
6477       // TODO: For VP nodes, should we use VP_SHL here?
6478       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
6479       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
6480       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
6481     }
6482 
6483     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6484     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
6485       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
6486                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
6487                               VPGN->getScale(), VPGN->getMask(),
6488                               VPGN->getVectorLength()},
6489                              VPGN->getMemOperand(), NewIndexTy);
6490     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
6491       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
6492                               {VPSN->getChain(), VPSN->getValue(),
6493                                VPSN->getBasePtr(), Index, VPSN->getScale(),
6494                                VPSN->getMask(), VPSN->getVectorLength()},
6495                               VPSN->getMemOperand(), NewIndexTy);
6496     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
6497       return DAG.getMaskedGather(
6498           N->getVTList(), MGN->getMemoryVT(), DL,
6499           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
6500            MGN->getBasePtr(), Index, MGN->getScale()},
6501           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6502     const auto *MSN = cast<MaskedScatterSDNode>(N);
6503     return DAG.getMaskedScatter(
6504         N->getVTList(), MSN->getMemoryVT(), DL,
6505         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
6506          Index, MSN->getScale()},
6507         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6508   }
6509   case RISCVISD::SRA_VL:
6510   case RISCVISD::SRL_VL:
6511   case RISCVISD::SHL_VL: {
6512     SDValue ShAmt = N->getOperand(1);
6513     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6514       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6515       SDLoc DL(N);
6516       SDValue VL = N->getOperand(3);
6517       EVT VT = N->getValueType(0);
6518       ShAmt =
6519           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6520       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6521                          N->getOperand(2), N->getOperand(3));
6522     }
6523     break;
6524   }
6525   case ISD::SRA:
6526   case ISD::SRL:
6527   case ISD::SHL: {
6528     SDValue ShAmt = N->getOperand(1);
6529     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6530       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6531       SDLoc DL(N);
6532       EVT VT = N->getValueType(0);
6533       ShAmt =
6534           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6535       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6536     }
6537     break;
6538   }
6539   case RISCVISD::MUL_VL: {
6540     // Try to form VWMUL or VWMULU.
6541     // FIXME: Look for splat of extended scalar as well.
6542     // FIXME: Support VWMULSU.
6543     SDValue Op0 = N->getOperand(0);
6544     SDValue Op1 = N->getOperand(1);
6545     bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6546     bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6547     if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode())
6548       return SDValue();
6549 
6550     // Make sure the extends have a single use.
6551     if (!Op0.hasOneUse() || !Op1.hasOneUse())
6552       return SDValue();
6553 
6554     SDValue Mask = N->getOperand(2);
6555     SDValue VL = N->getOperand(3);
6556     if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask ||
6557         Op0.getOperand(2) != VL || Op1.getOperand(2) != VL)
6558       return SDValue();
6559 
6560     Op0 = Op0.getOperand(0);
6561     Op1 = Op1.getOperand(0);
6562 
6563     MVT VT = N->getSimpleValueType(0);
6564     MVT NarrowVT =
6565         MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2),
6566                          VT.getVectorElementCount());
6567 
6568     SDLoc DL(N);
6569 
6570     // Re-introduce narrower extends if needed.
6571     unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6572     if (Op0.getValueType() != NarrowVT)
6573       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6574     if (Op1.getValueType() != NarrowVT)
6575       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6576 
6577     unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6578     return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6579   }
6580   }
6581 
6582   return SDValue();
6583 }
6584 
6585 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6586     const SDNode *N, CombineLevel Level) const {
6587   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6588   // materialised in fewer instructions than `(OP _, c1)`:
6589   //
6590   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6591   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6592   SDValue N0 = N->getOperand(0);
6593   EVT Ty = N0.getValueType();
6594   if (Ty.isScalarInteger() &&
6595       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6596     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6597     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6598     if (C1 && C2) {
6599       const APInt &C1Int = C1->getAPIntValue();
6600       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6601 
6602       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6603       // and the combine should happen, to potentially allow further combines
6604       // later.
6605       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6606           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6607         return true;
6608 
6609       // We can materialise `c1` in an add immediate, so it's "free", and the
6610       // combine should be prevented.
6611       if (C1Int.getMinSignedBits() <= 64 &&
6612           isLegalAddImmediate(C1Int.getSExtValue()))
6613         return false;
6614 
6615       // Neither constant will fit into an immediate, so find materialisation
6616       // costs.
6617       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6618                                               Subtarget.getFeatureBits(),
6619                                               /*CompressionCost*/true);
6620       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6621           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
6622           /*CompressionCost*/true);
6623 
6624       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6625       // combine should be prevented.
6626       if (C1Cost < ShiftedC1Cost)
6627         return false;
6628     }
6629   }
6630   return true;
6631 }
6632 
6633 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6634     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6635     TargetLoweringOpt &TLO) const {
6636   // Delay this optimization as late as possible.
6637   if (!TLO.LegalOps)
6638     return false;
6639 
6640   EVT VT = Op.getValueType();
6641   if (VT.isVector())
6642     return false;
6643 
6644   // Only handle AND for now.
6645   if (Op.getOpcode() != ISD::AND)
6646     return false;
6647 
6648   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6649   if (!C)
6650     return false;
6651 
6652   const APInt &Mask = C->getAPIntValue();
6653 
6654   // Clear all non-demanded bits initially.
6655   APInt ShrunkMask = Mask & DemandedBits;
6656 
6657   // Try to make a smaller immediate by setting undemanded bits.
6658 
6659   APInt ExpandedMask = Mask | ~DemandedBits;
6660 
6661   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6662     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6663   };
6664   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6665     if (NewMask == Mask)
6666       return true;
6667     SDLoc DL(Op);
6668     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6669     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6670     return TLO.CombineTo(Op, NewOp);
6671   };
6672 
6673   // If the shrunk mask fits in sign extended 12 bits, let the target
6674   // independent code apply it.
6675   if (ShrunkMask.isSignedIntN(12))
6676     return false;
6677 
6678   // Preserve (and X, 0xffff) when zext.h is supported.
6679   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6680     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6681     if (IsLegalMask(NewMask))
6682       return UseMask(NewMask);
6683   }
6684 
6685   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6686   if (VT == MVT::i64) {
6687     APInt NewMask = APInt(64, 0xffffffff);
6688     if (IsLegalMask(NewMask))
6689       return UseMask(NewMask);
6690   }
6691 
6692   // For the remaining optimizations, we need to be able to make a negative
6693   // number through a combination of mask and undemanded bits.
6694   if (!ExpandedMask.isNegative())
6695     return false;
6696 
6697   // What is the fewest number of bits we need to represent the negative number.
6698   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6699 
6700   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6701   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6702   APInt NewMask = ShrunkMask;
6703   if (MinSignedBits <= 12)
6704     NewMask.setBitsFrom(11);
6705   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6706     NewMask.setBitsFrom(31);
6707   else
6708     return false;
6709 
6710   // Sanity check that our new mask is a subset of the demanded mask.
6711   assert(IsLegalMask(NewMask));
6712   return UseMask(NewMask);
6713 }
6714 
6715 static void computeGREV(APInt &Src, unsigned ShAmt) {
6716   ShAmt &= Src.getBitWidth() - 1;
6717   uint64_t x = Src.getZExtValue();
6718   if (ShAmt & 1)
6719     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6720   if (ShAmt & 2)
6721     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6722   if (ShAmt & 4)
6723     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6724   if (ShAmt & 8)
6725     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6726   if (ShAmt & 16)
6727     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6728   if (ShAmt & 32)
6729     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6730   Src = x;
6731 }
6732 
6733 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6734                                                         KnownBits &Known,
6735                                                         const APInt &DemandedElts,
6736                                                         const SelectionDAG &DAG,
6737                                                         unsigned Depth) const {
6738   unsigned BitWidth = Known.getBitWidth();
6739   unsigned Opc = Op.getOpcode();
6740   assert((Opc >= ISD::BUILTIN_OP_END ||
6741           Opc == ISD::INTRINSIC_WO_CHAIN ||
6742           Opc == ISD::INTRINSIC_W_CHAIN ||
6743           Opc == ISD::INTRINSIC_VOID) &&
6744          "Should use MaskedValueIsZero if you don't know whether Op"
6745          " is a target node!");
6746 
6747   Known.resetAll();
6748   switch (Opc) {
6749   default: break;
6750   case RISCVISD::SELECT_CC: {
6751     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6752     // If we don't know any bits, early out.
6753     if (Known.isUnknown())
6754       break;
6755     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6756 
6757     // Only known if known in both the LHS and RHS.
6758     Known = KnownBits::commonBits(Known, Known2);
6759     break;
6760   }
6761   case RISCVISD::REMUW: {
6762     KnownBits Known2;
6763     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6764     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6765     // We only care about the lower 32 bits.
6766     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6767     // Restore the original width by sign extending.
6768     Known = Known.sext(BitWidth);
6769     break;
6770   }
6771   case RISCVISD::DIVUW: {
6772     KnownBits Known2;
6773     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6774     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6775     // We only care about the lower 32 bits.
6776     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6777     // Restore the original width by sign extending.
6778     Known = Known.sext(BitWidth);
6779     break;
6780   }
6781   case RISCVISD::CTZW: {
6782     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6783     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6784     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6785     Known.Zero.setBitsFrom(LowBits);
6786     break;
6787   }
6788   case RISCVISD::CLZW: {
6789     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6790     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6791     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6792     Known.Zero.setBitsFrom(LowBits);
6793     break;
6794   }
6795   case RISCVISD::GREV:
6796   case RISCVISD::GREVW: {
6797     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6798       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6799       if (Opc == RISCVISD::GREVW)
6800         Known = Known.trunc(32);
6801       unsigned ShAmt = C->getZExtValue();
6802       computeGREV(Known.Zero, ShAmt);
6803       computeGREV(Known.One, ShAmt);
6804       if (Opc == RISCVISD::GREVW)
6805         Known = Known.sext(BitWidth);
6806     }
6807     break;
6808   }
6809   case RISCVISD::READ_VLENB:
6810     // We assume VLENB is at least 16 bytes.
6811     Known.Zero.setLowBits(4);
6812     // We assume VLENB is no more than 65536 / 8 bytes.
6813     Known.Zero.setBitsFrom(14);
6814     break;
6815   case ISD::INTRINSIC_W_CHAIN: {
6816     unsigned IntNo = Op.getConstantOperandVal(1);
6817     switch (IntNo) {
6818     default:
6819       // We can't do anything for most intrinsics.
6820       break;
6821     case Intrinsic::riscv_vsetvli:
6822     case Intrinsic::riscv_vsetvlimax:
6823       // Assume that VL output is positive and would fit in an int32_t.
6824       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6825       if (BitWidth >= 32)
6826         Known.Zero.setBitsFrom(31);
6827       break;
6828     }
6829     break;
6830   }
6831   }
6832 }
6833 
6834 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6835     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6836     unsigned Depth) const {
6837   switch (Op.getOpcode()) {
6838   default:
6839     break;
6840   case RISCVISD::SELECT_CC: {
6841     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
6842     if (Tmp == 1) return 1;  // Early out.
6843     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
6844     return std::min(Tmp, Tmp2);
6845   }
6846   case RISCVISD::SLLW:
6847   case RISCVISD::SRAW:
6848   case RISCVISD::SRLW:
6849   case RISCVISD::DIVW:
6850   case RISCVISD::DIVUW:
6851   case RISCVISD::REMUW:
6852   case RISCVISD::ROLW:
6853   case RISCVISD::RORW:
6854   case RISCVISD::GREVW:
6855   case RISCVISD::GORCW:
6856   case RISCVISD::FSLW:
6857   case RISCVISD::FSRW:
6858   case RISCVISD::SHFLW:
6859   case RISCVISD::UNSHFLW:
6860   case RISCVISD::BCOMPRESSW:
6861   case RISCVISD::BDECOMPRESSW:
6862   case RISCVISD::FCVT_W_RTZ_RV64:
6863   case RISCVISD::FCVT_WU_RTZ_RV64:
6864     // TODO: As the result is sign-extended, this is conservatively correct. A
6865     // more precise answer could be calculated for SRAW depending on known
6866     // bits in the shift amount.
6867     return 33;
6868   case RISCVISD::SHFL:
6869   case RISCVISD::UNSHFL: {
6870     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6871     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6872     // will stay within the upper 32 bits. If there were more than 32 sign bits
6873     // before there will be at least 33 sign bits after.
6874     if (Op.getValueType() == MVT::i64 &&
6875         isa<ConstantSDNode>(Op.getOperand(1)) &&
6876         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6877       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6878       if (Tmp > 32)
6879         return 33;
6880     }
6881     break;
6882   }
6883   case RISCVISD::VMV_X_S:
6884     // The number of sign bits of the scalar result is computed by obtaining the
6885     // element type of the input vector operand, subtracting its width from the
6886     // XLEN, and then adding one (sign bit within the element type). If the
6887     // element type is wider than XLen, the least-significant XLEN bits are
6888     // taken.
6889     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6890       return 1;
6891     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6892   }
6893 
6894   return 1;
6895 }
6896 
6897 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6898                                                   MachineBasicBlock *BB) {
6899   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6900 
6901   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6902   // Should the count have wrapped while it was being read, we need to try
6903   // again.
6904   // ...
6905   // read:
6906   // rdcycleh x3 # load high word of cycle
6907   // rdcycle  x2 # load low word of cycle
6908   // rdcycleh x4 # load high word of cycle
6909   // bne x3, x4, read # check if high word reads match, otherwise try again
6910   // ...
6911 
6912   MachineFunction &MF = *BB->getParent();
6913   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6914   MachineFunction::iterator It = ++BB->getIterator();
6915 
6916   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6917   MF.insert(It, LoopMBB);
6918 
6919   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6920   MF.insert(It, DoneMBB);
6921 
6922   // Transfer the remainder of BB and its successor edges to DoneMBB.
6923   DoneMBB->splice(DoneMBB->begin(), BB,
6924                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6925   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6926 
6927   BB->addSuccessor(LoopMBB);
6928 
6929   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6930   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6931   Register LoReg = MI.getOperand(0).getReg();
6932   Register HiReg = MI.getOperand(1).getReg();
6933   DebugLoc DL = MI.getDebugLoc();
6934 
6935   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6936   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6937       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6938       .addReg(RISCV::X0);
6939   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6940       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6941       .addReg(RISCV::X0);
6942   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6943       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6944       .addReg(RISCV::X0);
6945 
6946   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6947       .addReg(HiReg)
6948       .addReg(ReadAgainReg)
6949       .addMBB(LoopMBB);
6950 
6951   LoopMBB->addSuccessor(LoopMBB);
6952   LoopMBB->addSuccessor(DoneMBB);
6953 
6954   MI.eraseFromParent();
6955 
6956   return DoneMBB;
6957 }
6958 
6959 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6960                                              MachineBasicBlock *BB) {
6961   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6962 
6963   MachineFunction &MF = *BB->getParent();
6964   DebugLoc DL = MI.getDebugLoc();
6965   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6966   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6967   Register LoReg = MI.getOperand(0).getReg();
6968   Register HiReg = MI.getOperand(1).getReg();
6969   Register SrcReg = MI.getOperand(2).getReg();
6970   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6971   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6972 
6973   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6974                           RI);
6975   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6976   MachineMemOperand *MMOLo =
6977       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6978   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6979       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6980   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6981       .addFrameIndex(FI)
6982       .addImm(0)
6983       .addMemOperand(MMOLo);
6984   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6985       .addFrameIndex(FI)
6986       .addImm(4)
6987       .addMemOperand(MMOHi);
6988   MI.eraseFromParent(); // The pseudo instruction is gone now.
6989   return BB;
6990 }
6991 
6992 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6993                                                  MachineBasicBlock *BB) {
6994   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6995          "Unexpected instruction");
6996 
6997   MachineFunction &MF = *BB->getParent();
6998   DebugLoc DL = MI.getDebugLoc();
6999   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7000   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7001   Register DstReg = MI.getOperand(0).getReg();
7002   Register LoReg = MI.getOperand(1).getReg();
7003   Register HiReg = MI.getOperand(2).getReg();
7004   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
7005   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7006 
7007   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7008   MachineMemOperand *MMOLo =
7009       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
7010   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7011       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
7012   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7013       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
7014       .addFrameIndex(FI)
7015       .addImm(0)
7016       .addMemOperand(MMOLo);
7017   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7018       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
7019       .addFrameIndex(FI)
7020       .addImm(4)
7021       .addMemOperand(MMOHi);
7022   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
7023   MI.eraseFromParent(); // The pseudo instruction is gone now.
7024   return BB;
7025 }
7026 
7027 static bool isSelectPseudo(MachineInstr &MI) {
7028   switch (MI.getOpcode()) {
7029   default:
7030     return false;
7031   case RISCV::Select_GPR_Using_CC_GPR:
7032   case RISCV::Select_FPR16_Using_CC_GPR:
7033   case RISCV::Select_FPR32_Using_CC_GPR:
7034   case RISCV::Select_FPR64_Using_CC_GPR:
7035     return true;
7036   }
7037 }
7038 
7039 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
7040                                            MachineBasicBlock *BB,
7041                                            const RISCVSubtarget &Subtarget) {
7042   // To "insert" Select_* instructions, we actually have to insert the triangle
7043   // control-flow pattern.  The incoming instructions know the destination vreg
7044   // to set, the condition code register to branch on, the true/false values to
7045   // select between, and the condcode to use to select the appropriate branch.
7046   //
7047   // We produce the following control flow:
7048   //     HeadMBB
7049   //     |  \
7050   //     |  IfFalseMBB
7051   //     | /
7052   //    TailMBB
7053   //
7054   // When we find a sequence of selects we attempt to optimize their emission
7055   // by sharing the control flow. Currently we only handle cases where we have
7056   // multiple selects with the exact same condition (same LHS, RHS and CC).
7057   // The selects may be interleaved with other instructions if the other
7058   // instructions meet some requirements we deem safe:
7059   // - They are debug instructions. Otherwise,
7060   // - They do not have side-effects, do not access memory and their inputs do
7061   //   not depend on the results of the select pseudo-instructions.
7062   // The TrueV/FalseV operands of the selects cannot depend on the result of
7063   // previous selects in the sequence.
7064   // These conditions could be further relaxed. See the X86 target for a
7065   // related approach and more information.
7066   Register LHS = MI.getOperand(1).getReg();
7067   Register RHS = MI.getOperand(2).getReg();
7068   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
7069 
7070   SmallVector<MachineInstr *, 4> SelectDebugValues;
7071   SmallSet<Register, 4> SelectDests;
7072   SelectDests.insert(MI.getOperand(0).getReg());
7073 
7074   MachineInstr *LastSelectPseudo = &MI;
7075 
7076   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
7077        SequenceMBBI != E; ++SequenceMBBI) {
7078     if (SequenceMBBI->isDebugInstr())
7079       continue;
7080     else if (isSelectPseudo(*SequenceMBBI)) {
7081       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
7082           SequenceMBBI->getOperand(2).getReg() != RHS ||
7083           SequenceMBBI->getOperand(3).getImm() != CC ||
7084           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
7085           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
7086         break;
7087       LastSelectPseudo = &*SequenceMBBI;
7088       SequenceMBBI->collectDebugValues(SelectDebugValues);
7089       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
7090     } else {
7091       if (SequenceMBBI->hasUnmodeledSideEffects() ||
7092           SequenceMBBI->mayLoadOrStore())
7093         break;
7094       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
7095             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
7096           }))
7097         break;
7098     }
7099   }
7100 
7101   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
7102   const BasicBlock *LLVM_BB = BB->getBasicBlock();
7103   DebugLoc DL = MI.getDebugLoc();
7104   MachineFunction::iterator I = ++BB->getIterator();
7105 
7106   MachineBasicBlock *HeadMBB = BB;
7107   MachineFunction *F = BB->getParent();
7108   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
7109   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
7110 
7111   F->insert(I, IfFalseMBB);
7112   F->insert(I, TailMBB);
7113 
7114   // Transfer debug instructions associated with the selects to TailMBB.
7115   for (MachineInstr *DebugInstr : SelectDebugValues) {
7116     TailMBB->push_back(DebugInstr->removeFromParent());
7117   }
7118 
7119   // Move all instructions after the sequence to TailMBB.
7120   TailMBB->splice(TailMBB->end(), HeadMBB,
7121                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
7122   // Update machine-CFG edges by transferring all successors of the current
7123   // block to the new block which will contain the Phi nodes for the selects.
7124   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
7125   // Set the successors for HeadMBB.
7126   HeadMBB->addSuccessor(IfFalseMBB);
7127   HeadMBB->addSuccessor(TailMBB);
7128 
7129   // Insert appropriate branch.
7130   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
7131     .addReg(LHS)
7132     .addReg(RHS)
7133     .addMBB(TailMBB);
7134 
7135   // IfFalseMBB just falls through to TailMBB.
7136   IfFalseMBB->addSuccessor(TailMBB);
7137 
7138   // Create PHIs for all of the select pseudo-instructions.
7139   auto SelectMBBI = MI.getIterator();
7140   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
7141   auto InsertionPoint = TailMBB->begin();
7142   while (SelectMBBI != SelectEnd) {
7143     auto Next = std::next(SelectMBBI);
7144     if (isSelectPseudo(*SelectMBBI)) {
7145       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
7146       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
7147               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
7148           .addReg(SelectMBBI->getOperand(4).getReg())
7149           .addMBB(HeadMBB)
7150           .addReg(SelectMBBI->getOperand(5).getReg())
7151           .addMBB(IfFalseMBB);
7152       SelectMBBI->eraseFromParent();
7153     }
7154     SelectMBBI = Next;
7155   }
7156 
7157   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
7158   return TailMBB;
7159 }
7160 
7161 MachineBasicBlock *
7162 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
7163                                                  MachineBasicBlock *BB) const {
7164   switch (MI.getOpcode()) {
7165   default:
7166     llvm_unreachable("Unexpected instr type to insert");
7167   case RISCV::ReadCycleWide:
7168     assert(!Subtarget.is64Bit() &&
7169            "ReadCycleWrite is only to be used on riscv32");
7170     return emitReadCycleWidePseudo(MI, BB);
7171   case RISCV::Select_GPR_Using_CC_GPR:
7172   case RISCV::Select_FPR16_Using_CC_GPR:
7173   case RISCV::Select_FPR32_Using_CC_GPR:
7174   case RISCV::Select_FPR64_Using_CC_GPR:
7175     return emitSelectPseudo(MI, BB, Subtarget);
7176   case RISCV::BuildPairF64Pseudo:
7177     return emitBuildPairF64Pseudo(MI, BB);
7178   case RISCV::SplitF64Pseudo:
7179     return emitSplitF64Pseudo(MI, BB);
7180   }
7181 }
7182 
7183 // Calling Convention Implementation.
7184 // The expectations for frontend ABI lowering vary from target to target.
7185 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
7186 // details, but this is a longer term goal. For now, we simply try to keep the
7187 // role of the frontend as simple and well-defined as possible. The rules can
7188 // be summarised as:
7189 // * Never split up large scalar arguments. We handle them here.
7190 // * If a hardfloat calling convention is being used, and the struct may be
7191 // passed in a pair of registers (fp+fp, int+fp), and both registers are
7192 // available, then pass as two separate arguments. If either the GPRs or FPRs
7193 // are exhausted, then pass according to the rule below.
7194 // * If a struct could never be passed in registers or directly in a stack
7195 // slot (as it is larger than 2*XLEN and the floating point rules don't
7196 // apply), then pass it using a pointer with the byval attribute.
7197 // * If a struct is less than 2*XLEN, then coerce to either a two-element
7198 // word-sized array or a 2*XLEN scalar (depending on alignment).
7199 // * The frontend can determine whether a struct is returned by reference or
7200 // not based on its size and fields. If it will be returned by reference, the
7201 // frontend must modify the prototype so a pointer with the sret annotation is
7202 // passed as the first argument. This is not necessary for large scalar
7203 // returns.
7204 // * Struct return values and varargs should be coerced to structs containing
7205 // register-size fields in the same situations they would be for fixed
7206 // arguments.
7207 
7208 static const MCPhysReg ArgGPRs[] = {
7209   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
7210   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
7211 };
7212 static const MCPhysReg ArgFPR16s[] = {
7213   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
7214   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
7215 };
7216 static const MCPhysReg ArgFPR32s[] = {
7217   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
7218   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
7219 };
7220 static const MCPhysReg ArgFPR64s[] = {
7221   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
7222   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
7223 };
7224 // This is an interim calling convention and it may be changed in the future.
7225 static const MCPhysReg ArgVRs[] = {
7226     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
7227     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
7228     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
7229 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
7230                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
7231                                      RISCV::V20M2, RISCV::V22M2};
7232 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
7233                                      RISCV::V20M4};
7234 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
7235 
7236 // Pass a 2*XLEN argument that has been split into two XLEN values through
7237 // registers or the stack as necessary.
7238 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
7239                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
7240                                 MVT ValVT2, MVT LocVT2,
7241                                 ISD::ArgFlagsTy ArgFlags2) {
7242   unsigned XLenInBytes = XLen / 8;
7243   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7244     // At least one half can be passed via register.
7245     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
7246                                      VA1.getLocVT(), CCValAssign::Full));
7247   } else {
7248     // Both halves must be passed on the stack, with proper alignment.
7249     Align StackAlign =
7250         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
7251     State.addLoc(
7252         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
7253                             State.AllocateStack(XLenInBytes, StackAlign),
7254                             VA1.getLocVT(), CCValAssign::Full));
7255     State.addLoc(CCValAssign::getMem(
7256         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7257         LocVT2, CCValAssign::Full));
7258     return false;
7259   }
7260 
7261   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7262     // The second half can also be passed via register.
7263     State.addLoc(
7264         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
7265   } else {
7266     // The second half is passed via the stack, without additional alignment.
7267     State.addLoc(CCValAssign::getMem(
7268         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7269         LocVT2, CCValAssign::Full));
7270   }
7271 
7272   return false;
7273 }
7274 
7275 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
7276                                Optional<unsigned> FirstMaskArgument,
7277                                CCState &State, const RISCVTargetLowering &TLI) {
7278   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
7279   if (RC == &RISCV::VRRegClass) {
7280     // Assign the first mask argument to V0.
7281     // This is an interim calling convention and it may be changed in the
7282     // future.
7283     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
7284       return State.AllocateReg(RISCV::V0);
7285     return State.AllocateReg(ArgVRs);
7286   }
7287   if (RC == &RISCV::VRM2RegClass)
7288     return State.AllocateReg(ArgVRM2s);
7289   if (RC == &RISCV::VRM4RegClass)
7290     return State.AllocateReg(ArgVRM4s);
7291   if (RC == &RISCV::VRM8RegClass)
7292     return State.AllocateReg(ArgVRM8s);
7293   llvm_unreachable("Unhandled register class for ValueType");
7294 }
7295 
7296 // Implements the RISC-V calling convention. Returns true upon failure.
7297 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
7298                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
7299                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
7300                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
7301                      Optional<unsigned> FirstMaskArgument) {
7302   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
7303   assert(XLen == 32 || XLen == 64);
7304   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
7305 
7306   // Any return value split in to more than two values can't be returned
7307   // directly. Vectors are returned via the available vector registers.
7308   if (!LocVT.isVector() && IsRet && ValNo > 1)
7309     return true;
7310 
7311   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
7312   // variadic argument, or if no F16/F32 argument registers are available.
7313   bool UseGPRForF16_F32 = true;
7314   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
7315   // variadic argument, or if no F64 argument registers are available.
7316   bool UseGPRForF64 = true;
7317 
7318   switch (ABI) {
7319   default:
7320     llvm_unreachable("Unexpected ABI");
7321   case RISCVABI::ABI_ILP32:
7322   case RISCVABI::ABI_LP64:
7323     break;
7324   case RISCVABI::ABI_ILP32F:
7325   case RISCVABI::ABI_LP64F:
7326     UseGPRForF16_F32 = !IsFixed;
7327     break;
7328   case RISCVABI::ABI_ILP32D:
7329   case RISCVABI::ABI_LP64D:
7330     UseGPRForF16_F32 = !IsFixed;
7331     UseGPRForF64 = !IsFixed;
7332     break;
7333   }
7334 
7335   // FPR16, FPR32, and FPR64 alias each other.
7336   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
7337     UseGPRForF16_F32 = true;
7338     UseGPRForF64 = true;
7339   }
7340 
7341   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
7342   // similar local variables rather than directly checking against the target
7343   // ABI.
7344 
7345   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
7346     LocVT = XLenVT;
7347     LocInfo = CCValAssign::BCvt;
7348   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
7349     LocVT = MVT::i64;
7350     LocInfo = CCValAssign::BCvt;
7351   }
7352 
7353   // If this is a variadic argument, the RISC-V calling convention requires
7354   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
7355   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
7356   // be used regardless of whether the original argument was split during
7357   // legalisation or not. The argument will not be passed by registers if the
7358   // original type is larger than 2*XLEN, so the register alignment rule does
7359   // not apply.
7360   unsigned TwoXLenInBytes = (2 * XLen) / 8;
7361   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
7362       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
7363     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
7364     // Skip 'odd' register if necessary.
7365     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
7366       State.AllocateReg(ArgGPRs);
7367   }
7368 
7369   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
7370   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
7371       State.getPendingArgFlags();
7372 
7373   assert(PendingLocs.size() == PendingArgFlags.size() &&
7374          "PendingLocs and PendingArgFlags out of sync");
7375 
7376   // Handle passing f64 on RV32D with a soft float ABI or when floating point
7377   // registers are exhausted.
7378   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
7379     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
7380            "Can't lower f64 if it is split");
7381     // Depending on available argument GPRS, f64 may be passed in a pair of
7382     // GPRs, split between a GPR and the stack, or passed completely on the
7383     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
7384     // cases.
7385     Register Reg = State.AllocateReg(ArgGPRs);
7386     LocVT = MVT::i32;
7387     if (!Reg) {
7388       unsigned StackOffset = State.AllocateStack(8, Align(8));
7389       State.addLoc(
7390           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7391       return false;
7392     }
7393     if (!State.AllocateReg(ArgGPRs))
7394       State.AllocateStack(4, Align(4));
7395     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7396     return false;
7397   }
7398 
7399   // Fixed-length vectors are located in the corresponding scalable-vector
7400   // container types.
7401   if (ValVT.isFixedLengthVector())
7402     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7403 
7404   // Split arguments might be passed indirectly, so keep track of the pending
7405   // values. Split vectors are passed via a mix of registers and indirectly, so
7406   // treat them as we would any other argument.
7407   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
7408     LocVT = XLenVT;
7409     LocInfo = CCValAssign::Indirect;
7410     PendingLocs.push_back(
7411         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
7412     PendingArgFlags.push_back(ArgFlags);
7413     if (!ArgFlags.isSplitEnd()) {
7414       return false;
7415     }
7416   }
7417 
7418   // If the split argument only had two elements, it should be passed directly
7419   // in registers or on the stack.
7420   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
7421       PendingLocs.size() <= 2) {
7422     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
7423     // Apply the normal calling convention rules to the first half of the
7424     // split argument.
7425     CCValAssign VA = PendingLocs[0];
7426     ISD::ArgFlagsTy AF = PendingArgFlags[0];
7427     PendingLocs.clear();
7428     PendingArgFlags.clear();
7429     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
7430                                ArgFlags);
7431   }
7432 
7433   // Allocate to a register if possible, or else a stack slot.
7434   Register Reg;
7435   unsigned StoreSizeBytes = XLen / 8;
7436   Align StackAlign = Align(XLen / 8);
7437 
7438   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
7439     Reg = State.AllocateReg(ArgFPR16s);
7440   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
7441     Reg = State.AllocateReg(ArgFPR32s);
7442   else if (ValVT == MVT::f64 && !UseGPRForF64)
7443     Reg = State.AllocateReg(ArgFPR64s);
7444   else if (ValVT.isVector()) {
7445     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
7446     if (!Reg) {
7447       // For return values, the vector must be passed fully via registers or
7448       // via the stack.
7449       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
7450       // but we're using all of them.
7451       if (IsRet)
7452         return true;
7453       // Try using a GPR to pass the address
7454       if ((Reg = State.AllocateReg(ArgGPRs))) {
7455         LocVT = XLenVT;
7456         LocInfo = CCValAssign::Indirect;
7457       } else if (ValVT.isScalableVector()) {
7458         report_fatal_error("Unable to pass scalable vector types on the stack");
7459       } else {
7460         // Pass fixed-length vectors on the stack.
7461         LocVT = ValVT;
7462         StoreSizeBytes = ValVT.getStoreSize();
7463         // Align vectors to their element sizes, being careful for vXi1
7464         // vectors.
7465         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7466       }
7467     }
7468   } else {
7469     Reg = State.AllocateReg(ArgGPRs);
7470   }
7471 
7472   unsigned StackOffset =
7473       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7474 
7475   // If we reach this point and PendingLocs is non-empty, we must be at the
7476   // end of a split argument that must be passed indirectly.
7477   if (!PendingLocs.empty()) {
7478     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
7479     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
7480 
7481     for (auto &It : PendingLocs) {
7482       if (Reg)
7483         It.convertToReg(Reg);
7484       else
7485         It.convertToMem(StackOffset);
7486       State.addLoc(It);
7487     }
7488     PendingLocs.clear();
7489     PendingArgFlags.clear();
7490     return false;
7491   }
7492 
7493   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
7494           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
7495          "Expected an XLenVT or vector types at this stage");
7496 
7497   if (Reg) {
7498     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7499     return false;
7500   }
7501 
7502   // When a floating-point value is passed on the stack, no bit-conversion is
7503   // needed.
7504   if (ValVT.isFloatingPoint()) {
7505     LocVT = ValVT;
7506     LocInfo = CCValAssign::Full;
7507   }
7508   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7509   return false;
7510 }
7511 
7512 template <typename ArgTy>
7513 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
7514   for (const auto &ArgIdx : enumerate(Args)) {
7515     MVT ArgVT = ArgIdx.value().VT;
7516     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
7517       return ArgIdx.index();
7518   }
7519   return None;
7520 }
7521 
7522 void RISCVTargetLowering::analyzeInputArgs(
7523     MachineFunction &MF, CCState &CCInfo,
7524     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
7525     RISCVCCAssignFn Fn) const {
7526   unsigned NumArgs = Ins.size();
7527   FunctionType *FType = MF.getFunction().getFunctionType();
7528 
7529   Optional<unsigned> FirstMaskArgument;
7530   if (Subtarget.hasStdExtV())
7531     FirstMaskArgument = preAssignMask(Ins);
7532 
7533   for (unsigned i = 0; i != NumArgs; ++i) {
7534     MVT ArgVT = Ins[i].VT;
7535     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
7536 
7537     Type *ArgTy = nullptr;
7538     if (IsRet)
7539       ArgTy = FType->getReturnType();
7540     else if (Ins[i].isOrigArg())
7541       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7542 
7543     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7544     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7545            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7546            FirstMaskArgument)) {
7547       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7548                         << EVT(ArgVT).getEVTString() << '\n');
7549       llvm_unreachable(nullptr);
7550     }
7551   }
7552 }
7553 
7554 void RISCVTargetLowering::analyzeOutputArgs(
7555     MachineFunction &MF, CCState &CCInfo,
7556     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7557     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7558   unsigned NumArgs = Outs.size();
7559 
7560   Optional<unsigned> FirstMaskArgument;
7561   if (Subtarget.hasStdExtV())
7562     FirstMaskArgument = preAssignMask(Outs);
7563 
7564   for (unsigned i = 0; i != NumArgs; i++) {
7565     MVT ArgVT = Outs[i].VT;
7566     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7567     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7568 
7569     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7570     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7571            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7572            FirstMaskArgument)) {
7573       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7574                         << EVT(ArgVT).getEVTString() << "\n");
7575       llvm_unreachable(nullptr);
7576     }
7577   }
7578 }
7579 
7580 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7581 // values.
7582 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7583                                    const CCValAssign &VA, const SDLoc &DL,
7584                                    const RISCVSubtarget &Subtarget) {
7585   switch (VA.getLocInfo()) {
7586   default:
7587     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7588   case CCValAssign::Full:
7589     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7590       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7591     break;
7592   case CCValAssign::BCvt:
7593     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7594       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7595     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7596       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7597     else
7598       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7599     break;
7600   }
7601   return Val;
7602 }
7603 
7604 // The caller is responsible for loading the full value if the argument is
7605 // passed with CCValAssign::Indirect.
7606 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7607                                 const CCValAssign &VA, const SDLoc &DL,
7608                                 const RISCVTargetLowering &TLI) {
7609   MachineFunction &MF = DAG.getMachineFunction();
7610   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7611   EVT LocVT = VA.getLocVT();
7612   SDValue Val;
7613   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7614   Register VReg = RegInfo.createVirtualRegister(RC);
7615   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7616   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7617 
7618   if (VA.getLocInfo() == CCValAssign::Indirect)
7619     return Val;
7620 
7621   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7622 }
7623 
7624 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7625                                    const CCValAssign &VA, const SDLoc &DL,
7626                                    const RISCVSubtarget &Subtarget) {
7627   EVT LocVT = VA.getLocVT();
7628 
7629   switch (VA.getLocInfo()) {
7630   default:
7631     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7632   case CCValAssign::Full:
7633     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7634       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7635     break;
7636   case CCValAssign::BCvt:
7637     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7638       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7639     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7640       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7641     else
7642       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7643     break;
7644   }
7645   return Val;
7646 }
7647 
7648 // The caller is responsible for loading the full value if the argument is
7649 // passed with CCValAssign::Indirect.
7650 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7651                                 const CCValAssign &VA, const SDLoc &DL) {
7652   MachineFunction &MF = DAG.getMachineFunction();
7653   MachineFrameInfo &MFI = MF.getFrameInfo();
7654   EVT LocVT = VA.getLocVT();
7655   EVT ValVT = VA.getValVT();
7656   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7657   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7658                                  /*Immutable=*/true);
7659   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7660   SDValue Val;
7661 
7662   ISD::LoadExtType ExtType;
7663   switch (VA.getLocInfo()) {
7664   default:
7665     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7666   case CCValAssign::Full:
7667   case CCValAssign::Indirect:
7668   case CCValAssign::BCvt:
7669     ExtType = ISD::NON_EXTLOAD;
7670     break;
7671   }
7672   Val = DAG.getExtLoad(
7673       ExtType, DL, LocVT, Chain, FIN,
7674       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7675   return Val;
7676 }
7677 
7678 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7679                                        const CCValAssign &VA, const SDLoc &DL) {
7680   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7681          "Unexpected VA");
7682   MachineFunction &MF = DAG.getMachineFunction();
7683   MachineFrameInfo &MFI = MF.getFrameInfo();
7684   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7685 
7686   if (VA.isMemLoc()) {
7687     // f64 is passed on the stack.
7688     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7689     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7690     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7691                        MachinePointerInfo::getFixedStack(MF, FI));
7692   }
7693 
7694   assert(VA.isRegLoc() && "Expected register VA assignment");
7695 
7696   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7697   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7698   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7699   SDValue Hi;
7700   if (VA.getLocReg() == RISCV::X17) {
7701     // Second half of f64 is passed on the stack.
7702     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7703     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7704     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7705                      MachinePointerInfo::getFixedStack(MF, FI));
7706   } else {
7707     // Second half of f64 is passed in another GPR.
7708     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7709     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7710     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7711   }
7712   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7713 }
7714 
7715 // FastCC has less than 1% performance improvement for some particular
7716 // benchmark. But theoretically, it may has benenfit for some cases.
7717 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7718                             unsigned ValNo, MVT ValVT, MVT LocVT,
7719                             CCValAssign::LocInfo LocInfo,
7720                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7721                             bool IsFixed, bool IsRet, Type *OrigTy,
7722                             const RISCVTargetLowering &TLI,
7723                             Optional<unsigned> FirstMaskArgument) {
7724 
7725   // X5 and X6 might be used for save-restore libcall.
7726   static const MCPhysReg GPRList[] = {
7727       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7728       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7729       RISCV::X29, RISCV::X30, RISCV::X31};
7730 
7731   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7732     if (unsigned Reg = State.AllocateReg(GPRList)) {
7733       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7734       return false;
7735     }
7736   }
7737 
7738   if (LocVT == MVT::f16) {
7739     static const MCPhysReg FPR16List[] = {
7740         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7741         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7742         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7743         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7744     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7745       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7746       return false;
7747     }
7748   }
7749 
7750   if (LocVT == MVT::f32) {
7751     static const MCPhysReg FPR32List[] = {
7752         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7753         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7754         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7755         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7756     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7757       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7758       return false;
7759     }
7760   }
7761 
7762   if (LocVT == MVT::f64) {
7763     static const MCPhysReg FPR64List[] = {
7764         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7765         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7766         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7767         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7768     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7769       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7770       return false;
7771     }
7772   }
7773 
7774   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7775     unsigned Offset4 = State.AllocateStack(4, Align(4));
7776     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7777     return false;
7778   }
7779 
7780   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7781     unsigned Offset5 = State.AllocateStack(8, Align(8));
7782     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7783     return false;
7784   }
7785 
7786   if (LocVT.isVector()) {
7787     if (unsigned Reg =
7788             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7789       // Fixed-length vectors are located in the corresponding scalable-vector
7790       // container types.
7791       if (ValVT.isFixedLengthVector())
7792         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7793       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7794     } else {
7795       // Try and pass the address via a "fast" GPR.
7796       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7797         LocInfo = CCValAssign::Indirect;
7798         LocVT = TLI.getSubtarget().getXLenVT();
7799         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7800       } else if (ValVT.isFixedLengthVector()) {
7801         auto StackAlign =
7802             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7803         unsigned StackOffset =
7804             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7805         State.addLoc(
7806             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7807       } else {
7808         // Can't pass scalable vectors on the stack.
7809         return true;
7810       }
7811     }
7812 
7813     return false;
7814   }
7815 
7816   return true; // CC didn't match.
7817 }
7818 
7819 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7820                          CCValAssign::LocInfo LocInfo,
7821                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7822 
7823   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7824     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7825     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7826     static const MCPhysReg GPRList[] = {
7827         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7828         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7829     if (unsigned Reg = State.AllocateReg(GPRList)) {
7830       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7831       return false;
7832     }
7833   }
7834 
7835   if (LocVT == MVT::f32) {
7836     // Pass in STG registers: F1, ..., F6
7837     //                        fs0 ... fs5
7838     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7839                                           RISCV::F18_F, RISCV::F19_F,
7840                                           RISCV::F20_F, RISCV::F21_F};
7841     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7842       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7843       return false;
7844     }
7845   }
7846 
7847   if (LocVT == MVT::f64) {
7848     // Pass in STG registers: D1, ..., D6
7849     //                        fs6 ... fs11
7850     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7851                                           RISCV::F24_D, RISCV::F25_D,
7852                                           RISCV::F26_D, RISCV::F27_D};
7853     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7854       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7855       return false;
7856     }
7857   }
7858 
7859   report_fatal_error("No registers left in GHC calling convention");
7860   return true;
7861 }
7862 
7863 // Transform physical registers into virtual registers.
7864 SDValue RISCVTargetLowering::LowerFormalArguments(
7865     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7866     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7867     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7868 
7869   MachineFunction &MF = DAG.getMachineFunction();
7870 
7871   switch (CallConv) {
7872   default:
7873     report_fatal_error("Unsupported calling convention");
7874   case CallingConv::C:
7875   case CallingConv::Fast:
7876     break;
7877   case CallingConv::GHC:
7878     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7879         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7880       report_fatal_error(
7881         "GHC calling convention requires the F and D instruction set extensions");
7882   }
7883 
7884   const Function &Func = MF.getFunction();
7885   if (Func.hasFnAttribute("interrupt")) {
7886     if (!Func.arg_empty())
7887       report_fatal_error(
7888         "Functions with the interrupt attribute cannot have arguments!");
7889 
7890     StringRef Kind =
7891       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7892 
7893     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7894       report_fatal_error(
7895         "Function interrupt attribute argument not supported!");
7896   }
7897 
7898   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7899   MVT XLenVT = Subtarget.getXLenVT();
7900   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7901   // Used with vargs to acumulate store chains.
7902   std::vector<SDValue> OutChains;
7903 
7904   // Assign locations to all of the incoming arguments.
7905   SmallVector<CCValAssign, 16> ArgLocs;
7906   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7907 
7908   if (CallConv == CallingConv::GHC)
7909     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7910   else
7911     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7912                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7913                                                    : CC_RISCV);
7914 
7915   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7916     CCValAssign &VA = ArgLocs[i];
7917     SDValue ArgValue;
7918     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7919     // case.
7920     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7921       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7922     else if (VA.isRegLoc())
7923       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7924     else
7925       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7926 
7927     if (VA.getLocInfo() == CCValAssign::Indirect) {
7928       // If the original argument was split and passed by reference (e.g. i128
7929       // on RV32), we need to load all parts of it here (using the same
7930       // address). Vectors may be partly split to registers and partly to the
7931       // stack, in which case the base address is partly offset and subsequent
7932       // stores are relative to that.
7933       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7934                                    MachinePointerInfo()));
7935       unsigned ArgIndex = Ins[i].OrigArgIndex;
7936       unsigned ArgPartOffset = Ins[i].PartOffset;
7937       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7938       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7939         CCValAssign &PartVA = ArgLocs[i + 1];
7940         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7941         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7942         if (PartVA.getValVT().isScalableVector())
7943           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7944         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7945         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7946                                      MachinePointerInfo()));
7947         ++i;
7948       }
7949       continue;
7950     }
7951     InVals.push_back(ArgValue);
7952   }
7953 
7954   if (IsVarArg) {
7955     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7956     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7957     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7958     MachineFrameInfo &MFI = MF.getFrameInfo();
7959     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7960     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7961 
7962     // Offset of the first variable argument from stack pointer, and size of
7963     // the vararg save area. For now, the varargs save area is either zero or
7964     // large enough to hold a0-a7.
7965     int VaArgOffset, VarArgsSaveSize;
7966 
7967     // If all registers are allocated, then all varargs must be passed on the
7968     // stack and we don't need to save any argregs.
7969     if (ArgRegs.size() == Idx) {
7970       VaArgOffset = CCInfo.getNextStackOffset();
7971       VarArgsSaveSize = 0;
7972     } else {
7973       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7974       VaArgOffset = -VarArgsSaveSize;
7975     }
7976 
7977     // Record the frame index of the first variable argument
7978     // which is a value necessary to VASTART.
7979     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7980     RVFI->setVarArgsFrameIndex(FI);
7981 
7982     // If saving an odd number of registers then create an extra stack slot to
7983     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7984     // offsets to even-numbered registered remain 2*XLEN-aligned.
7985     if (Idx % 2) {
7986       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7987       VarArgsSaveSize += XLenInBytes;
7988     }
7989 
7990     // Copy the integer registers that may have been used for passing varargs
7991     // to the vararg save area.
7992     for (unsigned I = Idx; I < ArgRegs.size();
7993          ++I, VaArgOffset += XLenInBytes) {
7994       const Register Reg = RegInfo.createVirtualRegister(RC);
7995       RegInfo.addLiveIn(ArgRegs[I], Reg);
7996       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7997       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7998       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7999       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
8000                                    MachinePointerInfo::getFixedStack(MF, FI));
8001       cast<StoreSDNode>(Store.getNode())
8002           ->getMemOperand()
8003           ->setValue((Value *)nullptr);
8004       OutChains.push_back(Store);
8005     }
8006     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
8007   }
8008 
8009   // All stores are grouped in one node to allow the matching between
8010   // the size of Ins and InVals. This only happens for vararg functions.
8011   if (!OutChains.empty()) {
8012     OutChains.push_back(Chain);
8013     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
8014   }
8015 
8016   return Chain;
8017 }
8018 
8019 /// isEligibleForTailCallOptimization - Check whether the call is eligible
8020 /// for tail call optimization.
8021 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
8022 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
8023     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
8024     const SmallVector<CCValAssign, 16> &ArgLocs) const {
8025 
8026   auto &Callee = CLI.Callee;
8027   auto CalleeCC = CLI.CallConv;
8028   auto &Outs = CLI.Outs;
8029   auto &Caller = MF.getFunction();
8030   auto CallerCC = Caller.getCallingConv();
8031 
8032   // Exception-handling functions need a special set of instructions to
8033   // indicate a return to the hardware. Tail-calling another function would
8034   // probably break this.
8035   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
8036   // should be expanded as new function attributes are introduced.
8037   if (Caller.hasFnAttribute("interrupt"))
8038     return false;
8039 
8040   // Do not tail call opt if the stack is used to pass parameters.
8041   if (CCInfo.getNextStackOffset() != 0)
8042     return false;
8043 
8044   // Do not tail call opt if any parameters need to be passed indirectly.
8045   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
8046   // passed indirectly. So the address of the value will be passed in a
8047   // register, or if not available, then the address is put on the stack. In
8048   // order to pass indirectly, space on the stack often needs to be allocated
8049   // in order to store the value. In this case the CCInfo.getNextStackOffset()
8050   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
8051   // are passed CCValAssign::Indirect.
8052   for (auto &VA : ArgLocs)
8053     if (VA.getLocInfo() == CCValAssign::Indirect)
8054       return false;
8055 
8056   // Do not tail call opt if either caller or callee uses struct return
8057   // semantics.
8058   auto IsCallerStructRet = Caller.hasStructRetAttr();
8059   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
8060   if (IsCallerStructRet || IsCalleeStructRet)
8061     return false;
8062 
8063   // Externally-defined functions with weak linkage should not be
8064   // tail-called. The behaviour of branch instructions in this situation (as
8065   // used for tail calls) is implementation-defined, so we cannot rely on the
8066   // linker replacing the tail call with a return.
8067   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
8068     const GlobalValue *GV = G->getGlobal();
8069     if (GV->hasExternalWeakLinkage())
8070       return false;
8071   }
8072 
8073   // The callee has to preserve all registers the caller needs to preserve.
8074   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
8075   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
8076   if (CalleeCC != CallerCC) {
8077     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
8078     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
8079       return false;
8080   }
8081 
8082   // Byval parameters hand the function a pointer directly into the stack area
8083   // we want to reuse during a tail call. Working around this *is* possible
8084   // but less efficient and uglier in LowerCall.
8085   for (auto &Arg : Outs)
8086     if (Arg.Flags.isByVal())
8087       return false;
8088 
8089   return true;
8090 }
8091 
8092 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
8093   return DAG.getDataLayout().getPrefTypeAlign(
8094       VT.getTypeForEVT(*DAG.getContext()));
8095 }
8096 
8097 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
8098 // and output parameter nodes.
8099 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
8100                                        SmallVectorImpl<SDValue> &InVals) const {
8101   SelectionDAG &DAG = CLI.DAG;
8102   SDLoc &DL = CLI.DL;
8103   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
8104   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
8105   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
8106   SDValue Chain = CLI.Chain;
8107   SDValue Callee = CLI.Callee;
8108   bool &IsTailCall = CLI.IsTailCall;
8109   CallingConv::ID CallConv = CLI.CallConv;
8110   bool IsVarArg = CLI.IsVarArg;
8111   EVT PtrVT = getPointerTy(DAG.getDataLayout());
8112   MVT XLenVT = Subtarget.getXLenVT();
8113 
8114   MachineFunction &MF = DAG.getMachineFunction();
8115 
8116   // Analyze the operands of the call, assigning locations to each operand.
8117   SmallVector<CCValAssign, 16> ArgLocs;
8118   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8119 
8120   if (CallConv == CallingConv::GHC)
8121     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
8122   else
8123     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
8124                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8125                                                     : CC_RISCV);
8126 
8127   // Check if it's really possible to do a tail call.
8128   if (IsTailCall)
8129     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8130 
8131   if (IsTailCall)
8132     ++NumTailCalls;
8133   else if (CLI.CB && CLI.CB->isMustTailCall())
8134     report_fatal_error("failed to perform tail call elimination on a call "
8135                        "site marked musttail");
8136 
8137   // Get a count of how many bytes are to be pushed on the stack.
8138   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
8139 
8140   // Create local copies for byval args
8141   SmallVector<SDValue, 8> ByValArgs;
8142   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8143     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8144     if (!Flags.isByVal())
8145       continue;
8146 
8147     SDValue Arg = OutVals[i];
8148     unsigned Size = Flags.getByValSize();
8149     Align Alignment = Flags.getNonZeroByValAlign();
8150 
8151     int FI =
8152         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
8153     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8154     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
8155 
8156     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
8157                           /*IsVolatile=*/false,
8158                           /*AlwaysInline=*/false, IsTailCall,
8159                           MachinePointerInfo(), MachinePointerInfo());
8160     ByValArgs.push_back(FIPtr);
8161   }
8162 
8163   if (!IsTailCall)
8164     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
8165 
8166   // Copy argument values to their designated locations.
8167   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
8168   SmallVector<SDValue, 8> MemOpChains;
8169   SDValue StackPtr;
8170   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
8171     CCValAssign &VA = ArgLocs[i];
8172     SDValue ArgValue = OutVals[i];
8173     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8174 
8175     // Handle passing f64 on RV32D with a soft float ABI as a special case.
8176     bool IsF64OnRV32DSoftABI =
8177         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
8178     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
8179       SDValue SplitF64 = DAG.getNode(
8180           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
8181       SDValue Lo = SplitF64.getValue(0);
8182       SDValue Hi = SplitF64.getValue(1);
8183 
8184       Register RegLo = VA.getLocReg();
8185       RegsToPass.push_back(std::make_pair(RegLo, Lo));
8186 
8187       if (RegLo == RISCV::X17) {
8188         // Second half of f64 is passed on the stack.
8189         // Work out the address of the stack slot.
8190         if (!StackPtr.getNode())
8191           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8192         // Emit the store.
8193         MemOpChains.push_back(
8194             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
8195       } else {
8196         // Second half of f64 is passed in another GPR.
8197         assert(RegLo < RISCV::X31 && "Invalid register pair");
8198         Register RegHigh = RegLo + 1;
8199         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
8200       }
8201       continue;
8202     }
8203 
8204     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
8205     // as any other MemLoc.
8206 
8207     // Promote the value if needed.
8208     // For now, only handle fully promoted and indirect arguments.
8209     if (VA.getLocInfo() == CCValAssign::Indirect) {
8210       // Store the argument in a stack slot and pass its address.
8211       Align StackAlign =
8212           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
8213                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
8214       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
8215       // If the original argument was split (e.g. i128), we need
8216       // to store the required parts of it here (and pass just one address).
8217       // Vectors may be partly split to registers and partly to the stack, in
8218       // which case the base address is partly offset and subsequent stores are
8219       // relative to that.
8220       unsigned ArgIndex = Outs[i].OrigArgIndex;
8221       unsigned ArgPartOffset = Outs[i].PartOffset;
8222       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8223       // Calculate the total size to store. We don't have access to what we're
8224       // actually storing other than performing the loop and collecting the
8225       // info.
8226       SmallVector<std::pair<SDValue, SDValue>> Parts;
8227       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
8228         SDValue PartValue = OutVals[i + 1];
8229         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
8230         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8231         EVT PartVT = PartValue.getValueType();
8232         if (PartVT.isScalableVector())
8233           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8234         StoredSize += PartVT.getStoreSize();
8235         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
8236         Parts.push_back(std::make_pair(PartValue, Offset));
8237         ++i;
8238       }
8239       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
8240       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
8241       MemOpChains.push_back(
8242           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
8243                        MachinePointerInfo::getFixedStack(MF, FI)));
8244       for (const auto &Part : Parts) {
8245         SDValue PartValue = Part.first;
8246         SDValue PartOffset = Part.second;
8247         SDValue Address =
8248             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
8249         MemOpChains.push_back(
8250             DAG.getStore(Chain, DL, PartValue, Address,
8251                          MachinePointerInfo::getFixedStack(MF, FI)));
8252       }
8253       ArgValue = SpillSlot;
8254     } else {
8255       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
8256     }
8257 
8258     // Use local copy if it is a byval arg.
8259     if (Flags.isByVal())
8260       ArgValue = ByValArgs[j++];
8261 
8262     if (VA.isRegLoc()) {
8263       // Queue up the argument copies and emit them at the end.
8264       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
8265     } else {
8266       assert(VA.isMemLoc() && "Argument not register or memory");
8267       assert(!IsTailCall && "Tail call not allowed if stack is used "
8268                             "for passing parameters");
8269 
8270       // Work out the address of the stack slot.
8271       if (!StackPtr.getNode())
8272         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8273       SDValue Address =
8274           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
8275                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
8276 
8277       // Emit the store.
8278       MemOpChains.push_back(
8279           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
8280     }
8281   }
8282 
8283   // Join the stores, which are independent of one another.
8284   if (!MemOpChains.empty())
8285     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
8286 
8287   SDValue Glue;
8288 
8289   // Build a sequence of copy-to-reg nodes, chained and glued together.
8290   for (auto &Reg : RegsToPass) {
8291     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
8292     Glue = Chain.getValue(1);
8293   }
8294 
8295   // Validate that none of the argument registers have been marked as
8296   // reserved, if so report an error. Do the same for the return address if this
8297   // is not a tailcall.
8298   validateCCReservedRegs(RegsToPass, MF);
8299   if (!IsTailCall &&
8300       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
8301     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8302         MF.getFunction(),
8303         "Return address register required, but has been reserved."});
8304 
8305   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
8306   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
8307   // split it and then direct call can be matched by PseudoCALL.
8308   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
8309     const GlobalValue *GV = S->getGlobal();
8310 
8311     unsigned OpFlags = RISCVII::MO_CALL;
8312     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
8313       OpFlags = RISCVII::MO_PLT;
8314 
8315     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
8316   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
8317     unsigned OpFlags = RISCVII::MO_CALL;
8318 
8319     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
8320                                                  nullptr))
8321       OpFlags = RISCVII::MO_PLT;
8322 
8323     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
8324   }
8325 
8326   // The first call operand is the chain and the second is the target address.
8327   SmallVector<SDValue, 8> Ops;
8328   Ops.push_back(Chain);
8329   Ops.push_back(Callee);
8330 
8331   // Add argument registers to the end of the list so that they are
8332   // known live into the call.
8333   for (auto &Reg : RegsToPass)
8334     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
8335 
8336   if (!IsTailCall) {
8337     // Add a register mask operand representing the call-preserved registers.
8338     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
8339     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
8340     assert(Mask && "Missing call preserved mask for calling convention");
8341     Ops.push_back(DAG.getRegisterMask(Mask));
8342   }
8343 
8344   // Glue the call to the argument copies, if any.
8345   if (Glue.getNode())
8346     Ops.push_back(Glue);
8347 
8348   // Emit the call.
8349   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8350 
8351   if (IsTailCall) {
8352     MF.getFrameInfo().setHasTailCall();
8353     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
8354   }
8355 
8356   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
8357   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
8358   Glue = Chain.getValue(1);
8359 
8360   // Mark the end of the call, which is glued to the call itself.
8361   Chain = DAG.getCALLSEQ_END(Chain,
8362                              DAG.getConstant(NumBytes, DL, PtrVT, true),
8363                              DAG.getConstant(0, DL, PtrVT, true),
8364                              Glue, DL);
8365   Glue = Chain.getValue(1);
8366 
8367   // Assign locations to each value returned by this call.
8368   SmallVector<CCValAssign, 16> RVLocs;
8369   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
8370   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
8371 
8372   // Copy all of the result registers out of their specified physreg.
8373   for (auto &VA : RVLocs) {
8374     // Copy the value out
8375     SDValue RetValue =
8376         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
8377     // Glue the RetValue to the end of the call sequence
8378     Chain = RetValue.getValue(1);
8379     Glue = RetValue.getValue(2);
8380 
8381     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8382       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
8383       SDValue RetValue2 =
8384           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
8385       Chain = RetValue2.getValue(1);
8386       Glue = RetValue2.getValue(2);
8387       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8388                              RetValue2);
8389     }
8390 
8391     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8392 
8393     InVals.push_back(RetValue);
8394   }
8395 
8396   return Chain;
8397 }
8398 
8399 bool RISCVTargetLowering::CanLowerReturn(
8400     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
8401     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
8402   SmallVector<CCValAssign, 16> RVLocs;
8403   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8404 
8405   Optional<unsigned> FirstMaskArgument;
8406   if (Subtarget.hasStdExtV())
8407     FirstMaskArgument = preAssignMask(Outs);
8408 
8409   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8410     MVT VT = Outs[i].VT;
8411     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8412     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8413     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
8414                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
8415                  *this, FirstMaskArgument))
8416       return false;
8417   }
8418   return true;
8419 }
8420 
8421 SDValue
8422 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
8423                                  bool IsVarArg,
8424                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
8425                                  const SmallVectorImpl<SDValue> &OutVals,
8426                                  const SDLoc &DL, SelectionDAG &DAG) const {
8427   const MachineFunction &MF = DAG.getMachineFunction();
8428   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8429 
8430   // Stores the assignment of the return value to a location.
8431   SmallVector<CCValAssign, 16> RVLocs;
8432 
8433   // Info about the registers and stack slot.
8434   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
8435                  *DAG.getContext());
8436 
8437   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
8438                     nullptr, CC_RISCV);
8439 
8440   if (CallConv == CallingConv::GHC && !RVLocs.empty())
8441     report_fatal_error("GHC functions return void only");
8442 
8443   SDValue Glue;
8444   SmallVector<SDValue, 4> RetOps(1, Chain);
8445 
8446   // Copy the result values into the output registers.
8447   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
8448     SDValue Val = OutVals[i];
8449     CCValAssign &VA = RVLocs[i];
8450     assert(VA.isRegLoc() && "Can only return in registers!");
8451 
8452     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8453       // Handle returning f64 on RV32D with a soft float ABI.
8454       assert(VA.isRegLoc() && "Expected return via registers");
8455       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
8456                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
8457       SDValue Lo = SplitF64.getValue(0);
8458       SDValue Hi = SplitF64.getValue(1);
8459       Register RegLo = VA.getLocReg();
8460       assert(RegLo < RISCV::X31 && "Invalid register pair");
8461       Register RegHi = RegLo + 1;
8462 
8463       if (STI.isRegisterReservedByUser(RegLo) ||
8464           STI.isRegisterReservedByUser(RegHi))
8465         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8466             MF.getFunction(),
8467             "Return value register required, but has been reserved."});
8468 
8469       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
8470       Glue = Chain.getValue(1);
8471       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
8472       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
8473       Glue = Chain.getValue(1);
8474       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
8475     } else {
8476       // Handle a 'normal' return.
8477       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
8478       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
8479 
8480       if (STI.isRegisterReservedByUser(VA.getLocReg()))
8481         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8482             MF.getFunction(),
8483             "Return value register required, but has been reserved."});
8484 
8485       // Guarantee that all emitted copies are stuck together.
8486       Glue = Chain.getValue(1);
8487       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
8488     }
8489   }
8490 
8491   RetOps[0] = Chain; // Update chain.
8492 
8493   // Add the glue node if we have it.
8494   if (Glue.getNode()) {
8495     RetOps.push_back(Glue);
8496   }
8497 
8498   unsigned RetOpc = RISCVISD::RET_FLAG;
8499   // Interrupt service routines use different return instructions.
8500   const Function &Func = DAG.getMachineFunction().getFunction();
8501   if (Func.hasFnAttribute("interrupt")) {
8502     if (!Func.getReturnType()->isVoidTy())
8503       report_fatal_error(
8504           "Functions with the interrupt attribute must have void return type!");
8505 
8506     MachineFunction &MF = DAG.getMachineFunction();
8507     StringRef Kind =
8508       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8509 
8510     if (Kind == "user")
8511       RetOpc = RISCVISD::URET_FLAG;
8512     else if (Kind == "supervisor")
8513       RetOpc = RISCVISD::SRET_FLAG;
8514     else
8515       RetOpc = RISCVISD::MRET_FLAG;
8516   }
8517 
8518   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
8519 }
8520 
8521 void RISCVTargetLowering::validateCCReservedRegs(
8522     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
8523     MachineFunction &MF) const {
8524   const Function &F = MF.getFunction();
8525   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8526 
8527   if (llvm::any_of(Regs, [&STI](auto Reg) {
8528         return STI.isRegisterReservedByUser(Reg.first);
8529       }))
8530     F.getContext().diagnose(DiagnosticInfoUnsupported{
8531         F, "Argument register required, but has been reserved."});
8532 }
8533 
8534 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
8535   return CI->isTailCall();
8536 }
8537 
8538 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
8539 #define NODE_NAME_CASE(NODE)                                                   \
8540   case RISCVISD::NODE:                                                         \
8541     return "RISCVISD::" #NODE;
8542   // clang-format off
8543   switch ((RISCVISD::NodeType)Opcode) {
8544   case RISCVISD::FIRST_NUMBER:
8545     break;
8546   NODE_NAME_CASE(RET_FLAG)
8547   NODE_NAME_CASE(URET_FLAG)
8548   NODE_NAME_CASE(SRET_FLAG)
8549   NODE_NAME_CASE(MRET_FLAG)
8550   NODE_NAME_CASE(CALL)
8551   NODE_NAME_CASE(SELECT_CC)
8552   NODE_NAME_CASE(BR_CC)
8553   NODE_NAME_CASE(BuildPairF64)
8554   NODE_NAME_CASE(SplitF64)
8555   NODE_NAME_CASE(TAIL)
8556   NODE_NAME_CASE(MULHSU)
8557   NODE_NAME_CASE(SLLW)
8558   NODE_NAME_CASE(SRAW)
8559   NODE_NAME_CASE(SRLW)
8560   NODE_NAME_CASE(DIVW)
8561   NODE_NAME_CASE(DIVUW)
8562   NODE_NAME_CASE(REMUW)
8563   NODE_NAME_CASE(ROLW)
8564   NODE_NAME_CASE(RORW)
8565   NODE_NAME_CASE(CLZW)
8566   NODE_NAME_CASE(CTZW)
8567   NODE_NAME_CASE(FSLW)
8568   NODE_NAME_CASE(FSRW)
8569   NODE_NAME_CASE(FSL)
8570   NODE_NAME_CASE(FSR)
8571   NODE_NAME_CASE(FMV_H_X)
8572   NODE_NAME_CASE(FMV_X_ANYEXTH)
8573   NODE_NAME_CASE(FMV_W_X_RV64)
8574   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8575   NODE_NAME_CASE(FCVT_X_RTZ)
8576   NODE_NAME_CASE(FCVT_XU_RTZ)
8577   NODE_NAME_CASE(FCVT_W_RTZ_RV64)
8578   NODE_NAME_CASE(FCVT_WU_RTZ_RV64)
8579   NODE_NAME_CASE(READ_CYCLE_WIDE)
8580   NODE_NAME_CASE(GREV)
8581   NODE_NAME_CASE(GREVW)
8582   NODE_NAME_CASE(GORC)
8583   NODE_NAME_CASE(GORCW)
8584   NODE_NAME_CASE(SHFL)
8585   NODE_NAME_CASE(SHFLW)
8586   NODE_NAME_CASE(UNSHFL)
8587   NODE_NAME_CASE(UNSHFLW)
8588   NODE_NAME_CASE(BCOMPRESS)
8589   NODE_NAME_CASE(BCOMPRESSW)
8590   NODE_NAME_CASE(BDECOMPRESS)
8591   NODE_NAME_CASE(BDECOMPRESSW)
8592   NODE_NAME_CASE(VMV_V_X_VL)
8593   NODE_NAME_CASE(VFMV_V_F_VL)
8594   NODE_NAME_CASE(VMV_X_S)
8595   NODE_NAME_CASE(VMV_S_X_VL)
8596   NODE_NAME_CASE(VFMV_S_F_VL)
8597   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8598   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8599   NODE_NAME_CASE(READ_VLENB)
8600   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8601   NODE_NAME_CASE(VSLIDEUP_VL)
8602   NODE_NAME_CASE(VSLIDE1UP_VL)
8603   NODE_NAME_CASE(VSLIDEDOWN_VL)
8604   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8605   NODE_NAME_CASE(VID_VL)
8606   NODE_NAME_CASE(VFNCVT_ROD_VL)
8607   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8608   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8609   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8610   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8611   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8612   NODE_NAME_CASE(VECREDUCE_AND_VL)
8613   NODE_NAME_CASE(VECREDUCE_OR_VL)
8614   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8615   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8616   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8617   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8618   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8619   NODE_NAME_CASE(ADD_VL)
8620   NODE_NAME_CASE(AND_VL)
8621   NODE_NAME_CASE(MUL_VL)
8622   NODE_NAME_CASE(OR_VL)
8623   NODE_NAME_CASE(SDIV_VL)
8624   NODE_NAME_CASE(SHL_VL)
8625   NODE_NAME_CASE(SREM_VL)
8626   NODE_NAME_CASE(SRA_VL)
8627   NODE_NAME_CASE(SRL_VL)
8628   NODE_NAME_CASE(SUB_VL)
8629   NODE_NAME_CASE(UDIV_VL)
8630   NODE_NAME_CASE(UREM_VL)
8631   NODE_NAME_CASE(XOR_VL)
8632   NODE_NAME_CASE(SADDSAT_VL)
8633   NODE_NAME_CASE(UADDSAT_VL)
8634   NODE_NAME_CASE(SSUBSAT_VL)
8635   NODE_NAME_CASE(USUBSAT_VL)
8636   NODE_NAME_CASE(FADD_VL)
8637   NODE_NAME_CASE(FSUB_VL)
8638   NODE_NAME_CASE(FMUL_VL)
8639   NODE_NAME_CASE(FDIV_VL)
8640   NODE_NAME_CASE(FNEG_VL)
8641   NODE_NAME_CASE(FABS_VL)
8642   NODE_NAME_CASE(FSQRT_VL)
8643   NODE_NAME_CASE(FMA_VL)
8644   NODE_NAME_CASE(FCOPYSIGN_VL)
8645   NODE_NAME_CASE(SMIN_VL)
8646   NODE_NAME_CASE(SMAX_VL)
8647   NODE_NAME_CASE(UMIN_VL)
8648   NODE_NAME_CASE(UMAX_VL)
8649   NODE_NAME_CASE(FMINNUM_VL)
8650   NODE_NAME_CASE(FMAXNUM_VL)
8651   NODE_NAME_CASE(MULHS_VL)
8652   NODE_NAME_CASE(MULHU_VL)
8653   NODE_NAME_CASE(FP_TO_SINT_VL)
8654   NODE_NAME_CASE(FP_TO_UINT_VL)
8655   NODE_NAME_CASE(SINT_TO_FP_VL)
8656   NODE_NAME_CASE(UINT_TO_FP_VL)
8657   NODE_NAME_CASE(FP_EXTEND_VL)
8658   NODE_NAME_CASE(FP_ROUND_VL)
8659   NODE_NAME_CASE(VWMUL_VL)
8660   NODE_NAME_CASE(VWMULU_VL)
8661   NODE_NAME_CASE(SETCC_VL)
8662   NODE_NAME_CASE(VSELECT_VL)
8663   NODE_NAME_CASE(VMAND_VL)
8664   NODE_NAME_CASE(VMOR_VL)
8665   NODE_NAME_CASE(VMXOR_VL)
8666   NODE_NAME_CASE(VMCLR_VL)
8667   NODE_NAME_CASE(VMSET_VL)
8668   NODE_NAME_CASE(VRGATHER_VX_VL)
8669   NODE_NAME_CASE(VRGATHER_VV_VL)
8670   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8671   NODE_NAME_CASE(VSEXT_VL)
8672   NODE_NAME_CASE(VZEXT_VL)
8673   NODE_NAME_CASE(VPOPC_VL)
8674   NODE_NAME_CASE(VLE_VL)
8675   NODE_NAME_CASE(VSE_VL)
8676   NODE_NAME_CASE(READ_CSR)
8677   NODE_NAME_CASE(WRITE_CSR)
8678   NODE_NAME_CASE(SWAP_CSR)
8679   }
8680   // clang-format on
8681   return nullptr;
8682 #undef NODE_NAME_CASE
8683 }
8684 
8685 /// getConstraintType - Given a constraint letter, return the type of
8686 /// constraint it is for this target.
8687 RISCVTargetLowering::ConstraintType
8688 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8689   if (Constraint.size() == 1) {
8690     switch (Constraint[0]) {
8691     default:
8692       break;
8693     case 'f':
8694       return C_RegisterClass;
8695     case 'I':
8696     case 'J':
8697     case 'K':
8698       return C_Immediate;
8699     case 'A':
8700       return C_Memory;
8701     case 'S': // A symbolic address
8702       return C_Other;
8703     }
8704   } else {
8705     if (Constraint == "vr" || Constraint == "vm")
8706       return C_RegisterClass;
8707   }
8708   return TargetLowering::getConstraintType(Constraint);
8709 }
8710 
8711 std::pair<unsigned, const TargetRegisterClass *>
8712 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8713                                                   StringRef Constraint,
8714                                                   MVT VT) const {
8715   // First, see if this is a constraint that directly corresponds to a
8716   // RISCV register class.
8717   if (Constraint.size() == 1) {
8718     switch (Constraint[0]) {
8719     case 'r':
8720       return std::make_pair(0U, &RISCV::GPRRegClass);
8721     case 'f':
8722       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8723         return std::make_pair(0U, &RISCV::FPR16RegClass);
8724       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8725         return std::make_pair(0U, &RISCV::FPR32RegClass);
8726       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8727         return std::make_pair(0U, &RISCV::FPR64RegClass);
8728       break;
8729     default:
8730       break;
8731     }
8732   } else {
8733     if (Constraint == "vr") {
8734       for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
8735                              &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8736         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8737           return std::make_pair(0U, RC);
8738       }
8739     } else if (Constraint == "vm") {
8740       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8741         return std::make_pair(0U, &RISCV::VMRegClass);
8742     }
8743   }
8744 
8745   // Clang will correctly decode the usage of register name aliases into their
8746   // official names. However, other frontends like `rustc` do not. This allows
8747   // users of these frontends to use the ABI names for registers in LLVM-style
8748   // register constraints.
8749   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8750                                .Case("{zero}", RISCV::X0)
8751                                .Case("{ra}", RISCV::X1)
8752                                .Case("{sp}", RISCV::X2)
8753                                .Case("{gp}", RISCV::X3)
8754                                .Case("{tp}", RISCV::X4)
8755                                .Case("{t0}", RISCV::X5)
8756                                .Case("{t1}", RISCV::X6)
8757                                .Case("{t2}", RISCV::X7)
8758                                .Cases("{s0}", "{fp}", RISCV::X8)
8759                                .Case("{s1}", RISCV::X9)
8760                                .Case("{a0}", RISCV::X10)
8761                                .Case("{a1}", RISCV::X11)
8762                                .Case("{a2}", RISCV::X12)
8763                                .Case("{a3}", RISCV::X13)
8764                                .Case("{a4}", RISCV::X14)
8765                                .Case("{a5}", RISCV::X15)
8766                                .Case("{a6}", RISCV::X16)
8767                                .Case("{a7}", RISCV::X17)
8768                                .Case("{s2}", RISCV::X18)
8769                                .Case("{s3}", RISCV::X19)
8770                                .Case("{s4}", RISCV::X20)
8771                                .Case("{s5}", RISCV::X21)
8772                                .Case("{s6}", RISCV::X22)
8773                                .Case("{s7}", RISCV::X23)
8774                                .Case("{s8}", RISCV::X24)
8775                                .Case("{s9}", RISCV::X25)
8776                                .Case("{s10}", RISCV::X26)
8777                                .Case("{s11}", RISCV::X27)
8778                                .Case("{t3}", RISCV::X28)
8779                                .Case("{t4}", RISCV::X29)
8780                                .Case("{t5}", RISCV::X30)
8781                                .Case("{t6}", RISCV::X31)
8782                                .Default(RISCV::NoRegister);
8783   if (XRegFromAlias != RISCV::NoRegister)
8784     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8785 
8786   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8787   // TableGen record rather than the AsmName to choose registers for InlineAsm
8788   // constraints, plus we want to match those names to the widest floating point
8789   // register type available, manually select floating point registers here.
8790   //
8791   // The second case is the ABI name of the register, so that frontends can also
8792   // use the ABI names in register constraint lists.
8793   if (Subtarget.hasStdExtF()) {
8794     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8795                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8796                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8797                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8798                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8799                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8800                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8801                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8802                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8803                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8804                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8805                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8806                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8807                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8808                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8809                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8810                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8811                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8812                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8813                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8814                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8815                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8816                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8817                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8818                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8819                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8820                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8821                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8822                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8823                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8824                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8825                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8826                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8827                         .Default(RISCV::NoRegister);
8828     if (FReg != RISCV::NoRegister) {
8829       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8830       if (Subtarget.hasStdExtD()) {
8831         unsigned RegNo = FReg - RISCV::F0_F;
8832         unsigned DReg = RISCV::F0_D + RegNo;
8833         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8834       }
8835       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8836     }
8837   }
8838 
8839   if (Subtarget.hasStdExtV()) {
8840     Register VReg = StringSwitch<Register>(Constraint.lower())
8841                         .Case("{v0}", RISCV::V0)
8842                         .Case("{v1}", RISCV::V1)
8843                         .Case("{v2}", RISCV::V2)
8844                         .Case("{v3}", RISCV::V3)
8845                         .Case("{v4}", RISCV::V4)
8846                         .Case("{v5}", RISCV::V5)
8847                         .Case("{v6}", RISCV::V6)
8848                         .Case("{v7}", RISCV::V7)
8849                         .Case("{v8}", RISCV::V8)
8850                         .Case("{v9}", RISCV::V9)
8851                         .Case("{v10}", RISCV::V10)
8852                         .Case("{v11}", RISCV::V11)
8853                         .Case("{v12}", RISCV::V12)
8854                         .Case("{v13}", RISCV::V13)
8855                         .Case("{v14}", RISCV::V14)
8856                         .Case("{v15}", RISCV::V15)
8857                         .Case("{v16}", RISCV::V16)
8858                         .Case("{v17}", RISCV::V17)
8859                         .Case("{v18}", RISCV::V18)
8860                         .Case("{v19}", RISCV::V19)
8861                         .Case("{v20}", RISCV::V20)
8862                         .Case("{v21}", RISCV::V21)
8863                         .Case("{v22}", RISCV::V22)
8864                         .Case("{v23}", RISCV::V23)
8865                         .Case("{v24}", RISCV::V24)
8866                         .Case("{v25}", RISCV::V25)
8867                         .Case("{v26}", RISCV::V26)
8868                         .Case("{v27}", RISCV::V27)
8869                         .Case("{v28}", RISCV::V28)
8870                         .Case("{v29}", RISCV::V29)
8871                         .Case("{v30}", RISCV::V30)
8872                         .Case("{v31}", RISCV::V31)
8873                         .Default(RISCV::NoRegister);
8874     if (VReg != RISCV::NoRegister) {
8875       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8876         return std::make_pair(VReg, &RISCV::VMRegClass);
8877       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8878         return std::make_pair(VReg, &RISCV::VRRegClass);
8879       for (const auto *RC :
8880            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8881         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8882           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8883           return std::make_pair(VReg, RC);
8884         }
8885       }
8886     }
8887   }
8888 
8889   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8890 }
8891 
8892 unsigned
8893 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8894   // Currently only support length 1 constraints.
8895   if (ConstraintCode.size() == 1) {
8896     switch (ConstraintCode[0]) {
8897     case 'A':
8898       return InlineAsm::Constraint_A;
8899     default:
8900       break;
8901     }
8902   }
8903 
8904   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8905 }
8906 
8907 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8908     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8909     SelectionDAG &DAG) const {
8910   // Currently only support length 1 constraints.
8911   if (Constraint.length() == 1) {
8912     switch (Constraint[0]) {
8913     case 'I':
8914       // Validate & create a 12-bit signed immediate operand.
8915       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8916         uint64_t CVal = C->getSExtValue();
8917         if (isInt<12>(CVal))
8918           Ops.push_back(
8919               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8920       }
8921       return;
8922     case 'J':
8923       // Validate & create an integer zero operand.
8924       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8925         if (C->getZExtValue() == 0)
8926           Ops.push_back(
8927               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8928       return;
8929     case 'K':
8930       // Validate & create a 5-bit unsigned immediate operand.
8931       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8932         uint64_t CVal = C->getZExtValue();
8933         if (isUInt<5>(CVal))
8934           Ops.push_back(
8935               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8936       }
8937       return;
8938     case 'S':
8939       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
8940         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
8941                                                  GA->getValueType(0)));
8942       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
8943         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
8944                                                 BA->getValueType(0)));
8945       }
8946       return;
8947     default:
8948       break;
8949     }
8950   }
8951   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8952 }
8953 
8954 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8955                                                    Instruction *Inst,
8956                                                    AtomicOrdering Ord) const {
8957   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8958     return Builder.CreateFence(Ord);
8959   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8960     return Builder.CreateFence(AtomicOrdering::Release);
8961   return nullptr;
8962 }
8963 
8964 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8965                                                     Instruction *Inst,
8966                                                     AtomicOrdering Ord) const {
8967   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8968     return Builder.CreateFence(AtomicOrdering::Acquire);
8969   return nullptr;
8970 }
8971 
8972 TargetLowering::AtomicExpansionKind
8973 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8974   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8975   // point operations can't be used in an lr/sc sequence without breaking the
8976   // forward-progress guarantee.
8977   if (AI->isFloatingPointOperation())
8978     return AtomicExpansionKind::CmpXChg;
8979 
8980   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8981   if (Size == 8 || Size == 16)
8982     return AtomicExpansionKind::MaskedIntrinsic;
8983   return AtomicExpansionKind::None;
8984 }
8985 
8986 static Intrinsic::ID
8987 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8988   if (XLen == 32) {
8989     switch (BinOp) {
8990     default:
8991       llvm_unreachable("Unexpected AtomicRMW BinOp");
8992     case AtomicRMWInst::Xchg:
8993       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8994     case AtomicRMWInst::Add:
8995       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8996     case AtomicRMWInst::Sub:
8997       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8998     case AtomicRMWInst::Nand:
8999       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
9000     case AtomicRMWInst::Max:
9001       return Intrinsic::riscv_masked_atomicrmw_max_i32;
9002     case AtomicRMWInst::Min:
9003       return Intrinsic::riscv_masked_atomicrmw_min_i32;
9004     case AtomicRMWInst::UMax:
9005       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
9006     case AtomicRMWInst::UMin:
9007       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
9008     }
9009   }
9010 
9011   if (XLen == 64) {
9012     switch (BinOp) {
9013     default:
9014       llvm_unreachable("Unexpected AtomicRMW BinOp");
9015     case AtomicRMWInst::Xchg:
9016       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
9017     case AtomicRMWInst::Add:
9018       return Intrinsic::riscv_masked_atomicrmw_add_i64;
9019     case AtomicRMWInst::Sub:
9020       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
9021     case AtomicRMWInst::Nand:
9022       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
9023     case AtomicRMWInst::Max:
9024       return Intrinsic::riscv_masked_atomicrmw_max_i64;
9025     case AtomicRMWInst::Min:
9026       return Intrinsic::riscv_masked_atomicrmw_min_i64;
9027     case AtomicRMWInst::UMax:
9028       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
9029     case AtomicRMWInst::UMin:
9030       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
9031     }
9032   }
9033 
9034   llvm_unreachable("Unexpected XLen\n");
9035 }
9036 
9037 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
9038     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
9039     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
9040   unsigned XLen = Subtarget.getXLen();
9041   Value *Ordering =
9042       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
9043   Type *Tys[] = {AlignedAddr->getType()};
9044   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
9045       AI->getModule(),
9046       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
9047 
9048   if (XLen == 64) {
9049     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
9050     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9051     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
9052   }
9053 
9054   Value *Result;
9055 
9056   // Must pass the shift amount needed to sign extend the loaded value prior
9057   // to performing a signed comparison for min/max. ShiftAmt is the number of
9058   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
9059   // is the number of bits to left+right shift the value in order to
9060   // sign-extend.
9061   if (AI->getOperation() == AtomicRMWInst::Min ||
9062       AI->getOperation() == AtomicRMWInst::Max) {
9063     const DataLayout &DL = AI->getModule()->getDataLayout();
9064     unsigned ValWidth =
9065         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
9066     Value *SextShamt =
9067         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
9068     Result = Builder.CreateCall(LrwOpScwLoop,
9069                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
9070   } else {
9071     Result =
9072         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
9073   }
9074 
9075   if (XLen == 64)
9076     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9077   return Result;
9078 }
9079 
9080 TargetLowering::AtomicExpansionKind
9081 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
9082     AtomicCmpXchgInst *CI) const {
9083   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
9084   if (Size == 8 || Size == 16)
9085     return AtomicExpansionKind::MaskedIntrinsic;
9086   return AtomicExpansionKind::None;
9087 }
9088 
9089 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
9090     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
9091     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
9092   unsigned XLen = Subtarget.getXLen();
9093   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
9094   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
9095   if (XLen == 64) {
9096     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
9097     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
9098     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9099     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
9100   }
9101   Type *Tys[] = {AlignedAddr->getType()};
9102   Function *MaskedCmpXchg =
9103       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
9104   Value *Result = Builder.CreateCall(
9105       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
9106   if (XLen == 64)
9107     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9108   return Result;
9109 }
9110 
9111 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
9112   return false;
9113 }
9114 
9115 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
9116                                                      EVT VT) const {
9117   VT = VT.getScalarType();
9118 
9119   if (!VT.isSimple())
9120     return false;
9121 
9122   switch (VT.getSimpleVT().SimpleTy) {
9123   case MVT::f16:
9124     return Subtarget.hasStdExtZfh();
9125   case MVT::f32:
9126     return Subtarget.hasStdExtF();
9127   case MVT::f64:
9128     return Subtarget.hasStdExtD();
9129   default:
9130     break;
9131   }
9132 
9133   return false;
9134 }
9135 
9136 Register RISCVTargetLowering::getExceptionPointerRegister(
9137     const Constant *PersonalityFn) const {
9138   return RISCV::X10;
9139 }
9140 
9141 Register RISCVTargetLowering::getExceptionSelectorRegister(
9142     const Constant *PersonalityFn) const {
9143   return RISCV::X11;
9144 }
9145 
9146 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
9147   // Return false to suppress the unnecessary extensions if the LibCall
9148   // arguments or return value is f32 type for LP64 ABI.
9149   RISCVABI::ABI ABI = Subtarget.getTargetABI();
9150   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
9151     return false;
9152 
9153   return true;
9154 }
9155 
9156 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
9157   if (Subtarget.is64Bit() && Type == MVT::i32)
9158     return true;
9159 
9160   return IsSigned;
9161 }
9162 
9163 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
9164                                                  SDValue C) const {
9165   // Check integral scalar types.
9166   if (VT.isScalarInteger()) {
9167     // Omit the optimization if the sub target has the M extension and the data
9168     // size exceeds XLen.
9169     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
9170       return false;
9171     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
9172       // Break the MUL to a SLLI and an ADD/SUB.
9173       const APInt &Imm = ConstNode->getAPIntValue();
9174       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9175           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9176         return true;
9177       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
9178       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
9179           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9180            (Imm - 8).isPowerOf2()))
9181         return true;
9182       // Omit the following optimization if the sub target has the M extension
9183       // and the data size >= XLen.
9184       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
9185         return false;
9186       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
9187       // a pair of LUI/ADDI.
9188       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
9189         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
9190         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
9191             (1 - ImmS).isPowerOf2())
9192         return true;
9193       }
9194     }
9195   }
9196 
9197   return false;
9198 }
9199 
9200 bool RISCVTargetLowering::isMulAddWithConstProfitable(
9201     const SDValue &AddNode, const SDValue &ConstNode) const {
9202   // Let the DAGCombiner decide for vectors.
9203   EVT VT = AddNode.getValueType();
9204   if (VT.isVector())
9205     return true;
9206 
9207   // Let the DAGCombiner decide for larger types.
9208   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
9209     return true;
9210 
9211   // It is worse if c1 is simm12 while c1*c2 is not.
9212   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
9213   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
9214   const APInt &C1 = C1Node->getAPIntValue();
9215   const APInt &C2 = C2Node->getAPIntValue();
9216   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
9217     return false;
9218 
9219   // Default to true and let the DAGCombiner decide.
9220   return true;
9221 }
9222 
9223 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
9224     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
9225     bool *Fast) const {
9226   if (!VT.isVector())
9227     return false;
9228 
9229   EVT ElemVT = VT.getVectorElementType();
9230   if (Alignment >= ElemVT.getStoreSize()) {
9231     if (Fast)
9232       *Fast = true;
9233     return true;
9234   }
9235 
9236   return false;
9237 }
9238 
9239 bool RISCVTargetLowering::splitValueIntoRegisterParts(
9240     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
9241     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
9242   bool IsABIRegCopy = CC.hasValue();
9243   EVT ValueVT = Val.getValueType();
9244   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9245     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
9246     // and cast to f32.
9247     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
9248     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
9249     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
9250                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
9251     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
9252     Parts[0] = Val;
9253     return true;
9254   }
9255 
9256   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9257     LLVMContext &Context = *DAG.getContext();
9258     EVT ValueEltVT = ValueVT.getVectorElementType();
9259     EVT PartEltVT = PartVT.getVectorElementType();
9260     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9261     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9262     if (PartVTBitSize % ValueVTBitSize == 0) {
9263       // If the element types are different, bitcast to the same element type of
9264       // PartVT first.
9265       if (ValueEltVT != PartEltVT) {
9266         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9267         assert(Count != 0 && "The number of element should not be zero.");
9268         EVT SameEltTypeVT =
9269             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9270         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
9271       }
9272       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
9273                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9274       Parts[0] = Val;
9275       return true;
9276     }
9277   }
9278   return false;
9279 }
9280 
9281 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
9282     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
9283     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
9284   bool IsABIRegCopy = CC.hasValue();
9285   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9286     SDValue Val = Parts[0];
9287 
9288     // Cast the f32 to i32, truncate to i16, and cast back to f16.
9289     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
9290     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
9291     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
9292     return Val;
9293   }
9294 
9295   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9296     LLVMContext &Context = *DAG.getContext();
9297     SDValue Val = Parts[0];
9298     EVT ValueEltVT = ValueVT.getVectorElementType();
9299     EVT PartEltVT = PartVT.getVectorElementType();
9300     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9301     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9302     if (PartVTBitSize % ValueVTBitSize == 0) {
9303       EVT SameEltTypeVT = ValueVT;
9304       // If the element types are different, convert it to the same element type
9305       // of PartVT.
9306       if (ValueEltVT != PartEltVT) {
9307         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9308         assert(Count != 0 && "The number of element should not be zero.");
9309         SameEltTypeVT =
9310             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9311       }
9312       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
9313                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9314       if (ValueEltVT != PartEltVT)
9315         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
9316       return Val;
9317     }
9318   }
9319   return SDValue();
9320 }
9321 
9322 #define GET_REGISTER_MATCHER
9323 #include "RISCVGenAsmMatcher.inc"
9324 
9325 Register
9326 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
9327                                        const MachineFunction &MF) const {
9328   Register Reg = MatchRegisterAltName(RegName);
9329   if (Reg == RISCV::NoRegister)
9330     Reg = MatchRegisterName(RegName);
9331   if (Reg == RISCV::NoRegister)
9332     report_fatal_error(
9333         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
9334   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9335   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
9336     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
9337                              StringRef(RegName) + "\"."));
9338   return Reg;
9339 }
9340 
9341 namespace llvm {
9342 namespace RISCVVIntrinsicsTable {
9343 
9344 #define GET_RISCVVIntrinsicsTable_IMPL
9345 #include "RISCVGenSearchableTables.inc"
9346 
9347 } // namespace RISCVVIntrinsicsTable
9348 
9349 } // namespace llvm
9350