1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
254     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
255     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
256     // BSWAP i8 doesn't exist.
257     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
258     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
259 
260     if (Subtarget.is64Bit()) {
261       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
262       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
263     }
264   } else {
265     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
266     // pattern match it directly in isel.
267     setOperationAction(ISD::BSWAP, XLenVT,
268                        Subtarget.hasStdExtZbb() ? Legal : Expand);
269   }
270 
271   if (Subtarget.hasStdExtZbb()) {
272     setOperationAction(ISD::SMIN, XLenVT, Legal);
273     setOperationAction(ISD::SMAX, XLenVT, Legal);
274     setOperationAction(ISD::UMIN, XLenVT, Legal);
275     setOperationAction(ISD::UMAX, XLenVT, Legal);
276 
277     if (Subtarget.is64Bit()) {
278       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
279       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
280       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
281       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
282     }
283   } else {
284     setOperationAction(ISD::CTTZ, XLenVT, Expand);
285     setOperationAction(ISD::CTLZ, XLenVT, Expand);
286     setOperationAction(ISD::CTPOP, XLenVT, Expand);
287   }
288 
289   if (Subtarget.hasStdExtZbt()) {
290     setOperationAction(ISD::FSHL, XLenVT, Custom);
291     setOperationAction(ISD::FSHR, XLenVT, Custom);
292     setOperationAction(ISD::SELECT, XLenVT, Legal);
293 
294     if (Subtarget.is64Bit()) {
295       setOperationAction(ISD::FSHL, MVT::i32, Custom);
296       setOperationAction(ISD::FSHR, MVT::i32, Custom);
297     }
298   } else {
299     setOperationAction(ISD::SELECT, XLenVT, Custom);
300   }
301 
302   ISD::CondCode FPCCToExpand[] = {
303       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
304       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
305       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
306 
307   ISD::NodeType FPOpToExpand[] = {
308       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
309       ISD::FP_TO_FP16};
310 
311   if (Subtarget.hasStdExtZfh())
312     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
313 
314   if (Subtarget.hasStdExtZfh()) {
315     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
316     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
317     setOperationAction(ISD::LRINT, MVT::f16, Legal);
318     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
319     setOperationAction(ISD::LROUND, MVT::f16, Legal);
320     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
321     for (auto CC : FPCCToExpand)
322       setCondCodeAction(CC, MVT::f16, Expand);
323     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
324     setOperationAction(ISD::SELECT, MVT::f16, Custom);
325     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
326     for (auto Op : FPOpToExpand)
327       setOperationAction(Op, MVT::f16, Expand);
328   }
329 
330   if (Subtarget.hasStdExtF()) {
331     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
332     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
333     setOperationAction(ISD::LRINT, MVT::f32, Legal);
334     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
335     setOperationAction(ISD::LROUND, MVT::f32, Legal);
336     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
337     for (auto CC : FPCCToExpand)
338       setCondCodeAction(CC, MVT::f32, Expand);
339     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT, MVT::f32, Custom);
341     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
342     for (auto Op : FPOpToExpand)
343       setOperationAction(Op, MVT::f32, Expand);
344     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
345     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
346   }
347 
348   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
349     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
350 
351   if (Subtarget.hasStdExtD()) {
352     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
353     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
354     setOperationAction(ISD::LRINT, MVT::f64, Legal);
355     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
356     setOperationAction(ISD::LROUND, MVT::f64, Legal);
357     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
358     for (auto CC : FPCCToExpand)
359       setCondCodeAction(CC, MVT::f64, Expand);
360     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
361     setOperationAction(ISD::SELECT, MVT::f64, Custom);
362     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
363     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
364     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
365     for (auto Op : FPOpToExpand)
366       setOperationAction(Op, MVT::f64, Expand);
367     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
368     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
369   }
370 
371   if (Subtarget.is64Bit()) {
372     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
373     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
374     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
375     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
376   }
377 
378   if (Subtarget.hasStdExtF()) {
379     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
380     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
381   }
382 
383   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
384   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
385   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
386   setOperationAction(ISD::JumpTable, XLenVT, Custom);
387 
388   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
389 
390   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
391   // Unfortunately this can't be determined just from the ISA naming string.
392   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
393                      Subtarget.is64Bit() ? Legal : Custom);
394 
395   setOperationAction(ISD::TRAP, MVT::Other, Legal);
396   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
397   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
398   if (Subtarget.is64Bit())
399     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
400 
401   if (Subtarget.hasStdExtA()) {
402     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
403     setMinCmpXchgSizeInBits(32);
404   } else {
405     setMaxAtomicSizeInBitsSupported(0);
406   }
407 
408   setBooleanContents(ZeroOrOneBooleanContent);
409 
410   if (Subtarget.hasStdExtV()) {
411     setBooleanVectorContents(ZeroOrOneBooleanContent);
412 
413     setOperationAction(ISD::VSCALE, XLenVT, Custom);
414 
415     // RVV intrinsics may have illegal operands.
416     // We also need to custom legalize vmv.x.s.
417     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
418     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
419     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
420     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
421     if (Subtarget.is64Bit()) {
422       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
423     } else {
424       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
425       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
426     }
427 
428     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
429 
430     static unsigned IntegerVPOps[] = {
431         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
432         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
433         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
434 
435     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
436                                             ISD::VP_FMUL, ISD::VP_FDIV};
437 
438     if (!Subtarget.is64Bit()) {
439       // We must custom-lower certain vXi64 operations on RV32 due to the vector
440       // element type being illegal.
441       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
442       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
443 
444       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
445       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
446       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
447       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
448       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
449       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
450       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
451       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
452     }
453 
454     for (MVT VT : BoolVecVTs) {
455       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
456 
457       // Mask VTs are custom-expanded into a series of standard nodes
458       setOperationAction(ISD::TRUNCATE, VT, Custom);
459       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
460       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
461       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
462 
463       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
464       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
465 
466       setOperationAction(ISD::SELECT, VT, Custom);
467       setOperationAction(ISD::SELECT_CC, VT, Expand);
468       setOperationAction(ISD::VSELECT, VT, Expand);
469 
470       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
471       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
472       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
473 
474       // RVV has native int->float & float->int conversions where the
475       // element type sizes are within one power-of-two of each other. Any
476       // wider distances between type sizes have to be lowered as sequences
477       // which progressively narrow the gap in stages.
478       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
479       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
480       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
481       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
482 
483       // Expand all extending loads to types larger than this, and truncating
484       // stores from types larger than this.
485       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
486         setTruncStoreAction(OtherVT, VT, Expand);
487         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
488         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
489         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
490       }
491     }
492 
493     for (MVT VT : IntVecVTs) {
494       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
495       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
496 
497       setOperationAction(ISD::SMIN, VT, Legal);
498       setOperationAction(ISD::SMAX, VT, Legal);
499       setOperationAction(ISD::UMIN, VT, Legal);
500       setOperationAction(ISD::UMAX, VT, Legal);
501 
502       setOperationAction(ISD::ROTL, VT, Expand);
503       setOperationAction(ISD::ROTR, VT, Expand);
504 
505       // Custom-lower extensions and truncations from/to mask types.
506       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
507       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
508       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
509 
510       // RVV has native int->float & float->int conversions where the
511       // element type sizes are within one power-of-two of each other. Any
512       // wider distances between type sizes have to be lowered as sequences
513       // which progressively narrow the gap in stages.
514       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
515       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
516       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
517       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
518 
519       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
520       // nodes which truncate by one power of two at a time.
521       setOperationAction(ISD::TRUNCATE, VT, Custom);
522 
523       // Custom-lower insert/extract operations to simplify patterns.
524       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
525       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
526 
527       // Custom-lower reduction operations to set up the corresponding custom
528       // nodes' operands.
529       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
530       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
531       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
532       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
533       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
534       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
535       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
536       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
537 
538       for (unsigned VPOpc : IntegerVPOps)
539         setOperationAction(VPOpc, VT, Custom);
540 
541       setOperationAction(ISD::LOAD, VT, Custom);
542       setOperationAction(ISD::STORE, VT, Custom);
543 
544       setOperationAction(ISD::MLOAD, VT, Custom);
545       setOperationAction(ISD::MSTORE, VT, Custom);
546       setOperationAction(ISD::MGATHER, VT, Custom);
547       setOperationAction(ISD::MSCATTER, VT, Custom);
548 
549       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
550       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
551       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
552 
553       setOperationAction(ISD::SELECT, VT, Custom);
554       setOperationAction(ISD::SELECT_CC, VT, Expand);
555 
556       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
557       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
558 
559       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
560         setTruncStoreAction(VT, OtherVT, Expand);
561         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
562         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
563         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
564       }
565     }
566 
567     // Expand various CCs to best match the RVV ISA, which natively supports UNE
568     // but no other unordered comparisons, and supports all ordered comparisons
569     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
570     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
571     // and we pattern-match those back to the "original", swapping operands once
572     // more. This way we catch both operations and both "vf" and "fv" forms with
573     // fewer patterns.
574     ISD::CondCode VFPCCToExpand[] = {
575         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
576         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
577         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
578     };
579 
580     // Sets common operation actions on RVV floating-point vector types.
581     const auto SetCommonVFPActions = [&](MVT VT) {
582       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
583       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
584       // sizes are within one power-of-two of each other. Therefore conversions
585       // between vXf16 and vXf64 must be lowered as sequences which convert via
586       // vXf32.
587       setOperationAction(ISD::FP_ROUND, VT, Custom);
588       setOperationAction(ISD::FP_EXTEND, VT, Custom);
589       // Custom-lower insert/extract operations to simplify patterns.
590       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
591       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
592       // Expand various condition codes (explained above).
593       for (auto CC : VFPCCToExpand)
594         setCondCodeAction(CC, VT, Expand);
595 
596       setOperationAction(ISD::FMINNUM, VT, Legal);
597       setOperationAction(ISD::FMAXNUM, VT, Legal);
598 
599       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
600       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
601       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
602       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
603       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
604 
605       setOperationAction(ISD::LOAD, VT, Custom);
606       setOperationAction(ISD::STORE, VT, Custom);
607 
608       setOperationAction(ISD::MLOAD, VT, Custom);
609       setOperationAction(ISD::MSTORE, VT, Custom);
610       setOperationAction(ISD::MGATHER, VT, Custom);
611       setOperationAction(ISD::MSCATTER, VT, Custom);
612 
613       setOperationAction(ISD::SELECT, VT, Custom);
614       setOperationAction(ISD::SELECT_CC, VT, Expand);
615 
616       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
617       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
618       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
619 
620       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
621 
622       for (unsigned VPOpc : FloatingPointVPOps)
623         setOperationAction(VPOpc, VT, Custom);
624     };
625 
626     // Sets common extload/truncstore actions on RVV floating-point vector
627     // types.
628     const auto SetCommonVFPExtLoadTruncStoreActions =
629         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
630           for (auto SmallVT : SmallerVTs) {
631             setTruncStoreAction(VT, SmallVT, Expand);
632             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
633           }
634         };
635 
636     if (Subtarget.hasStdExtZfh())
637       for (MVT VT : F16VecVTs)
638         SetCommonVFPActions(VT);
639 
640     for (MVT VT : F32VecVTs) {
641       if (Subtarget.hasStdExtF())
642         SetCommonVFPActions(VT);
643       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
644     }
645 
646     for (MVT VT : F64VecVTs) {
647       if (Subtarget.hasStdExtD())
648         SetCommonVFPActions(VT);
649       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
650       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
651     }
652 
653     if (Subtarget.useRVVForFixedLengthVectors()) {
654       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
655         if (!useRVVForFixedLengthVectorVT(VT))
656           continue;
657 
658         // By default everything must be expanded.
659         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
660           setOperationAction(Op, VT, Expand);
661         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
662           setTruncStoreAction(VT, OtherVT, Expand);
663           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
664           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
665           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
666         }
667 
668         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
669         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
670         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
671 
672         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
673         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
674 
675         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
676         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
677 
678         setOperationAction(ISD::LOAD, VT, Custom);
679         setOperationAction(ISD::STORE, VT, Custom);
680 
681         setOperationAction(ISD::SETCC, VT, Custom);
682 
683         setOperationAction(ISD::SELECT, VT, Custom);
684 
685         setOperationAction(ISD::TRUNCATE, VT, Custom);
686 
687         setOperationAction(ISD::BITCAST, VT, Custom);
688 
689         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
690         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
691         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
692 
693         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
694         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
695         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
696         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
697 
698         // Operations below are different for between masks and other vectors.
699         if (VT.getVectorElementType() == MVT::i1) {
700           setOperationAction(ISD::AND, VT, Custom);
701           setOperationAction(ISD::OR, VT, Custom);
702           setOperationAction(ISD::XOR, VT, Custom);
703           continue;
704         }
705 
706         // Use SPLAT_VECTOR to prevent type legalization from destroying the
707         // splats when type legalizing i64 scalar on RV32.
708         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
709         // improvements first.
710         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
711           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
712           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
713         }
714 
715         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
716         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
717 
718         setOperationAction(ISD::MLOAD, VT, Custom);
719         setOperationAction(ISD::MSTORE, VT, Custom);
720         setOperationAction(ISD::MGATHER, VT, Custom);
721         setOperationAction(ISD::MSCATTER, VT, Custom);
722         setOperationAction(ISD::ADD, VT, Custom);
723         setOperationAction(ISD::MUL, VT, Custom);
724         setOperationAction(ISD::SUB, VT, Custom);
725         setOperationAction(ISD::AND, VT, Custom);
726         setOperationAction(ISD::OR, VT, Custom);
727         setOperationAction(ISD::XOR, VT, Custom);
728         setOperationAction(ISD::SDIV, VT, Custom);
729         setOperationAction(ISD::SREM, VT, Custom);
730         setOperationAction(ISD::UDIV, VT, Custom);
731         setOperationAction(ISD::UREM, VT, Custom);
732         setOperationAction(ISD::SHL, VT, Custom);
733         setOperationAction(ISD::SRA, VT, Custom);
734         setOperationAction(ISD::SRL, VT, Custom);
735 
736         setOperationAction(ISD::SMIN, VT, Custom);
737         setOperationAction(ISD::SMAX, VT, Custom);
738         setOperationAction(ISD::UMIN, VT, Custom);
739         setOperationAction(ISD::UMAX, VT, Custom);
740         setOperationAction(ISD::ABS,  VT, Custom);
741 
742         setOperationAction(ISD::MULHS, VT, Custom);
743         setOperationAction(ISD::MULHU, VT, Custom);
744 
745         setOperationAction(ISD::VSELECT, VT, Custom);
746         setOperationAction(ISD::SELECT_CC, VT, Expand);
747 
748         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
749         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
750         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
751 
752         // Custom-lower reduction operations to set up the corresponding custom
753         // nodes' operands.
754         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
755         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
756         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
757         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
758         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
759 
760         for (unsigned VPOpc : IntegerVPOps)
761           setOperationAction(VPOpc, VT, Custom);
762       }
763 
764       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
765         if (!useRVVForFixedLengthVectorVT(VT))
766           continue;
767 
768         // By default everything must be expanded.
769         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
770           setOperationAction(Op, VT, Expand);
771         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
772           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
773           setTruncStoreAction(VT, OtherVT, Expand);
774         }
775 
776         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
777         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
778         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
779 
780         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
781         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
782         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
783         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
784 
785         setOperationAction(ISD::LOAD, VT, Custom);
786         setOperationAction(ISD::STORE, VT, Custom);
787         setOperationAction(ISD::MLOAD, VT, Custom);
788         setOperationAction(ISD::MSTORE, VT, Custom);
789         setOperationAction(ISD::MGATHER, VT, Custom);
790         setOperationAction(ISD::MSCATTER, VT, Custom);
791         setOperationAction(ISD::FADD, VT, Custom);
792         setOperationAction(ISD::FSUB, VT, Custom);
793         setOperationAction(ISD::FMUL, VT, Custom);
794         setOperationAction(ISD::FDIV, VT, Custom);
795         setOperationAction(ISD::FNEG, VT, Custom);
796         setOperationAction(ISD::FABS, VT, Custom);
797         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
798         setOperationAction(ISD::FSQRT, VT, Custom);
799         setOperationAction(ISD::FMA, VT, Custom);
800         setOperationAction(ISD::FMINNUM, VT, Custom);
801         setOperationAction(ISD::FMAXNUM, VT, Custom);
802 
803         setOperationAction(ISD::FP_ROUND, VT, Custom);
804         setOperationAction(ISD::FP_EXTEND, VT, Custom);
805 
806         for (auto CC : VFPCCToExpand)
807           setCondCodeAction(CC, VT, Expand);
808 
809         setOperationAction(ISD::VSELECT, VT, Custom);
810         setOperationAction(ISD::SELECT, VT, Custom);
811         setOperationAction(ISD::SELECT_CC, VT, Expand);
812 
813         setOperationAction(ISD::BITCAST, VT, Custom);
814 
815         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
816         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
817         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
818         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
819 
820         for (unsigned VPOpc : FloatingPointVPOps)
821           setOperationAction(VPOpc, VT, Custom);
822       }
823 
824       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
825       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
826       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
827       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
828       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
829       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
830       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
831       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
832     }
833   }
834 
835   // Function alignments.
836   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
837   setMinFunctionAlignment(FunctionAlignment);
838   setPrefFunctionAlignment(FunctionAlignment);
839 
840   setMinimumJumpTableEntries(5);
841 
842   // Jumps are expensive, compared to logic
843   setJumpIsExpensive();
844 
845   // We can use any register for comparisons
846   setHasMultipleConditionRegisters();
847 
848   setTargetDAGCombine(ISD::AND);
849   setTargetDAGCombine(ISD::OR);
850   setTargetDAGCombine(ISD::XOR);
851   setTargetDAGCombine(ISD::ANY_EXTEND);
852   setTargetDAGCombine(ISD::ZERO_EXTEND);
853   if (Subtarget.hasStdExtV()) {
854     setTargetDAGCombine(ISD::FCOPYSIGN);
855     setTargetDAGCombine(ISD::MGATHER);
856     setTargetDAGCombine(ISD::MSCATTER);
857     setTargetDAGCombine(ISD::SRA);
858     setTargetDAGCombine(ISD::SRL);
859     setTargetDAGCombine(ISD::SHL);
860   }
861 }
862 
863 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
864                                             LLVMContext &Context,
865                                             EVT VT) const {
866   if (!VT.isVector())
867     return getPointerTy(DL);
868   if (Subtarget.hasStdExtV() &&
869       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
870     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
871   return VT.changeVectorElementTypeToInteger();
872 }
873 
874 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
875   return Subtarget.getXLenVT();
876 }
877 
878 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
879                                              const CallInst &I,
880                                              MachineFunction &MF,
881                                              unsigned Intrinsic) const {
882   switch (Intrinsic) {
883   default:
884     return false;
885   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
886   case Intrinsic::riscv_masked_atomicrmw_add_i32:
887   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
888   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
889   case Intrinsic::riscv_masked_atomicrmw_max_i32:
890   case Intrinsic::riscv_masked_atomicrmw_min_i32:
891   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
892   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
893   case Intrinsic::riscv_masked_cmpxchg_i32: {
894     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
895     Info.opc = ISD::INTRINSIC_W_CHAIN;
896     Info.memVT = MVT::getVT(PtrTy->getElementType());
897     Info.ptrVal = I.getArgOperand(0);
898     Info.offset = 0;
899     Info.align = Align(4);
900     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
901                  MachineMemOperand::MOVolatile;
902     return true;
903   }
904   }
905 }
906 
907 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
908                                                 const AddrMode &AM, Type *Ty,
909                                                 unsigned AS,
910                                                 Instruction *I) const {
911   // No global is ever allowed as a base.
912   if (AM.BaseGV)
913     return false;
914 
915   // Require a 12-bit signed offset.
916   if (!isInt<12>(AM.BaseOffs))
917     return false;
918 
919   switch (AM.Scale) {
920   case 0: // "r+i" or just "i", depending on HasBaseReg.
921     break;
922   case 1:
923     if (!AM.HasBaseReg) // allow "r+i".
924       break;
925     return false; // disallow "r+r" or "r+r+i".
926   default:
927     return false;
928   }
929 
930   return true;
931 }
932 
933 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
934   return isInt<12>(Imm);
935 }
936 
937 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
938   return isInt<12>(Imm);
939 }
940 
941 // On RV32, 64-bit integers are split into their high and low parts and held
942 // in two different registers, so the trunc is free since the low register can
943 // just be used.
944 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
945   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
946     return false;
947   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
948   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
949   return (SrcBits == 64 && DestBits == 32);
950 }
951 
952 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
953   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
954       !SrcVT.isInteger() || !DstVT.isInteger())
955     return false;
956   unsigned SrcBits = SrcVT.getSizeInBits();
957   unsigned DestBits = DstVT.getSizeInBits();
958   return (SrcBits == 64 && DestBits == 32);
959 }
960 
961 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
962   // Zexts are free if they can be combined with a load.
963   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
964     EVT MemVT = LD->getMemoryVT();
965     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
966          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
967         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
968          LD->getExtensionType() == ISD::ZEXTLOAD))
969       return true;
970   }
971 
972   return TargetLowering::isZExtFree(Val, VT2);
973 }
974 
975 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
976   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
977 }
978 
979 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
980   return Subtarget.hasStdExtZbb();
981 }
982 
983 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
984   return Subtarget.hasStdExtZbb();
985 }
986 
987 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
988                                        bool ForCodeSize) const {
989   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
990     return false;
991   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
992     return false;
993   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
994     return false;
995   if (Imm.isNegZero())
996     return false;
997   return Imm.isZero();
998 }
999 
1000 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1001   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1002          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1003          (VT == MVT::f64 && Subtarget.hasStdExtD());
1004 }
1005 
1006 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1007                                                       CallingConv::ID CC,
1008                                                       EVT VT) const {
1009   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1010   // end up using a GPR but that will be decided based on ABI.
1011   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1012     return MVT::f32;
1013 
1014   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1015 }
1016 
1017 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1018                                                            CallingConv::ID CC,
1019                                                            EVT VT) const {
1020   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1021   // end up using a GPR but that will be decided based on ABI.
1022   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1023     return 1;
1024 
1025   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1026 }
1027 
1028 // Changes the condition code and swaps operands if necessary, so the SetCC
1029 // operation matches one of the comparisons supported directly by branches
1030 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1031 // with 1/-1.
1032 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1033                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1034   // Convert X > -1 to X >= 0.
1035   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1036     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1037     CC = ISD::SETGE;
1038     return;
1039   }
1040   // Convert X < 1 to 0 >= X.
1041   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1042     RHS = LHS;
1043     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1044     CC = ISD::SETGE;
1045     return;
1046   }
1047 
1048   switch (CC) {
1049   default:
1050     break;
1051   case ISD::SETGT:
1052   case ISD::SETLE:
1053   case ISD::SETUGT:
1054   case ISD::SETULE:
1055     CC = ISD::getSetCCSwappedOperands(CC);
1056     std::swap(LHS, RHS);
1057     break;
1058   }
1059 }
1060 
1061 // Return the RISC-V branch opcode that matches the given DAG integer
1062 // condition code. The CondCode must be one of those supported by the RISC-V
1063 // ISA (see translateSetCCForBranch).
1064 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1065   switch (CC) {
1066   default:
1067     llvm_unreachable("Unsupported CondCode");
1068   case ISD::SETEQ:
1069     return RISCV::BEQ;
1070   case ISD::SETNE:
1071     return RISCV::BNE;
1072   case ISD::SETLT:
1073     return RISCV::BLT;
1074   case ISD::SETGE:
1075     return RISCV::BGE;
1076   case ISD::SETULT:
1077     return RISCV::BLTU;
1078   case ISD::SETUGE:
1079     return RISCV::BGEU;
1080   }
1081 }
1082 
1083 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1084   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1085   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1086   if (VT.getVectorElementType() == MVT::i1)
1087     KnownSize *= 8;
1088 
1089   switch (KnownSize) {
1090   default:
1091     llvm_unreachable("Invalid LMUL.");
1092   case 8:
1093     return RISCVII::VLMUL::LMUL_F8;
1094   case 16:
1095     return RISCVII::VLMUL::LMUL_F4;
1096   case 32:
1097     return RISCVII::VLMUL::LMUL_F2;
1098   case 64:
1099     return RISCVII::VLMUL::LMUL_1;
1100   case 128:
1101     return RISCVII::VLMUL::LMUL_2;
1102   case 256:
1103     return RISCVII::VLMUL::LMUL_4;
1104   case 512:
1105     return RISCVII::VLMUL::LMUL_8;
1106   }
1107 }
1108 
1109 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1110   switch (LMul) {
1111   default:
1112     llvm_unreachable("Invalid LMUL.");
1113   case RISCVII::VLMUL::LMUL_F8:
1114   case RISCVII::VLMUL::LMUL_F4:
1115   case RISCVII::VLMUL::LMUL_F2:
1116   case RISCVII::VLMUL::LMUL_1:
1117     return RISCV::VRRegClassID;
1118   case RISCVII::VLMUL::LMUL_2:
1119     return RISCV::VRM2RegClassID;
1120   case RISCVII::VLMUL::LMUL_4:
1121     return RISCV::VRM4RegClassID;
1122   case RISCVII::VLMUL::LMUL_8:
1123     return RISCV::VRM8RegClassID;
1124   }
1125 }
1126 
1127 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1128   RISCVII::VLMUL LMUL = getLMUL(VT);
1129   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1130       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1131       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1132       LMUL == RISCVII::VLMUL::LMUL_1) {
1133     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1134                   "Unexpected subreg numbering");
1135     return RISCV::sub_vrm1_0 + Index;
1136   }
1137   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1138     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1139                   "Unexpected subreg numbering");
1140     return RISCV::sub_vrm2_0 + Index;
1141   }
1142   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1143     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1144                   "Unexpected subreg numbering");
1145     return RISCV::sub_vrm4_0 + Index;
1146   }
1147   llvm_unreachable("Invalid vector type.");
1148 }
1149 
1150 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1151   if (VT.getVectorElementType() == MVT::i1)
1152     return RISCV::VRRegClassID;
1153   return getRegClassIDForLMUL(getLMUL(VT));
1154 }
1155 
1156 // Attempt to decompose a subvector insert/extract between VecVT and
1157 // SubVecVT via subregister indices. Returns the subregister index that
1158 // can perform the subvector insert/extract with the given element index, as
1159 // well as the index corresponding to any leftover subvectors that must be
1160 // further inserted/extracted within the register class for SubVecVT.
1161 std::pair<unsigned, unsigned>
1162 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1163     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1164     const RISCVRegisterInfo *TRI) {
1165   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1166                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1167                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1168                 "Register classes not ordered");
1169   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1170   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1171   // Try to compose a subregister index that takes us from the incoming
1172   // LMUL>1 register class down to the outgoing one. At each step we half
1173   // the LMUL:
1174   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1175   // Note that this is not guaranteed to find a subregister index, such as
1176   // when we are extracting from one VR type to another.
1177   unsigned SubRegIdx = RISCV::NoSubRegister;
1178   for (const unsigned RCID :
1179        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1180     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1181       VecVT = VecVT.getHalfNumVectorElementsVT();
1182       bool IsHi =
1183           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1184       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1185                                             getSubregIndexByMVT(VecVT, IsHi));
1186       if (IsHi)
1187         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1188     }
1189   return {SubRegIdx, InsertExtractIdx};
1190 }
1191 
1192 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1193 // stores for those types.
1194 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1195   return !Subtarget.useRVVForFixedLengthVectors() ||
1196          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1197 }
1198 
1199 static bool useRVVForFixedLengthVectorVT(MVT VT,
1200                                          const RISCVSubtarget &Subtarget) {
1201   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1202   if (!Subtarget.useRVVForFixedLengthVectors())
1203     return false;
1204 
1205   // We only support a set of vector types with a consistent maximum fixed size
1206   // across all supported vector element types to avoid legalization issues.
1207   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1208   // fixed-length vector type we support is 1024 bytes.
1209   if (VT.getFixedSizeInBits() > 1024 * 8)
1210     return false;
1211 
1212   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1213 
1214   // Don't use RVV for vectors we cannot scalarize if required.
1215   switch (VT.getVectorElementType().SimpleTy) {
1216   // i1 is supported but has different rules.
1217   default:
1218     return false;
1219   case MVT::i1:
1220     // Masks can only use a single register.
1221     if (VT.getVectorNumElements() > MinVLen)
1222       return false;
1223     MinVLen /= 8;
1224     break;
1225   case MVT::i8:
1226   case MVT::i16:
1227   case MVT::i32:
1228   case MVT::i64:
1229     break;
1230   case MVT::f16:
1231     if (!Subtarget.hasStdExtZfh())
1232       return false;
1233     break;
1234   case MVT::f32:
1235     if (!Subtarget.hasStdExtF())
1236       return false;
1237     break;
1238   case MVT::f64:
1239     if (!Subtarget.hasStdExtD())
1240       return false;
1241     break;
1242   }
1243 
1244   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1245   // Don't use RVV for types that don't fit.
1246   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1247     return false;
1248 
1249   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1250   // the base fixed length RVV support in place.
1251   if (!VT.isPow2VectorType())
1252     return false;
1253 
1254   return true;
1255 }
1256 
1257 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1258   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1259 }
1260 
1261 // Return the largest legal scalable vector type that matches VT's element type.
1262 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1263                                             const RISCVSubtarget &Subtarget) {
1264   // This may be called before legal types are setup.
1265   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1266           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1267          "Expected legal fixed length vector!");
1268 
1269   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1270 
1271   MVT EltVT = VT.getVectorElementType();
1272   switch (EltVT.SimpleTy) {
1273   default:
1274     llvm_unreachable("unexpected element type for RVV container");
1275   case MVT::i1:
1276   case MVT::i8:
1277   case MVT::i16:
1278   case MVT::i32:
1279   case MVT::i64:
1280   case MVT::f16:
1281   case MVT::f32:
1282   case MVT::f64: {
1283     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1284     // narrower types, but we can't have a fractional LMUL with demoninator less
1285     // than 64/SEW.
1286     unsigned NumElts =
1287         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1288     return MVT::getScalableVectorVT(EltVT, NumElts);
1289   }
1290   }
1291 }
1292 
1293 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1294                                             const RISCVSubtarget &Subtarget) {
1295   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1296                                           Subtarget);
1297 }
1298 
1299 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1300   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1301 }
1302 
1303 // Grow V to consume an entire RVV register.
1304 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1305                                        const RISCVSubtarget &Subtarget) {
1306   assert(VT.isScalableVector() &&
1307          "Expected to convert into a scalable vector!");
1308   assert(V.getValueType().isFixedLengthVector() &&
1309          "Expected a fixed length vector operand!");
1310   SDLoc DL(V);
1311   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1312   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1313 }
1314 
1315 // Shrink V so it's just big enough to maintain a VT's worth of data.
1316 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1317                                          const RISCVSubtarget &Subtarget) {
1318   assert(VT.isFixedLengthVector() &&
1319          "Expected to convert into a fixed length vector!");
1320   assert(V.getValueType().isScalableVector() &&
1321          "Expected a scalable vector operand!");
1322   SDLoc DL(V);
1323   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1324   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1325 }
1326 
1327 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1328 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1329 // the vector type that it is contained in.
1330 static std::pair<SDValue, SDValue>
1331 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1332                 const RISCVSubtarget &Subtarget) {
1333   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1334   MVT XLenVT = Subtarget.getXLenVT();
1335   SDValue VL = VecVT.isFixedLengthVector()
1336                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1337                    : DAG.getRegister(RISCV::X0, XLenVT);
1338   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1339   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1340   return {Mask, VL};
1341 }
1342 
1343 // As above but assuming the given type is a scalable vector type.
1344 static std::pair<SDValue, SDValue>
1345 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1346                         const RISCVSubtarget &Subtarget) {
1347   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1348   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1349 }
1350 
1351 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1352 // of either is (currently) supported. This can get us into an infinite loop
1353 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1354 // as a ..., etc.
1355 // Until either (or both) of these can reliably lower any node, reporting that
1356 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1357 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1358 // which is not desirable.
1359 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1360     EVT VT, unsigned DefinedValues) const {
1361   return false;
1362 }
1363 
1364 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1365   // Only splats are currently supported.
1366   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1367     return true;
1368 
1369   return false;
1370 }
1371 
1372 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1373                                  const RISCVSubtarget &Subtarget) {
1374   MVT VT = Op.getSimpleValueType();
1375   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1376 
1377   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1378 
1379   SDLoc DL(Op);
1380   SDValue Mask, VL;
1381   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1382 
1383   unsigned Opc =
1384       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1385   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1386   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1387 }
1388 
1389 struct VIDSequence {
1390   int64_t Step;
1391   int64_t Addend;
1392 };
1393 
1394 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1395 // to the (non-zero) step S and start value X. This can be then lowered as the
1396 // RVV sequence (VID * S) + X, for example.
1397 // Note that this method will also match potentially unappealing index
1398 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1399 // determine whether this is worth generating code for.
1400 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1401   unsigned NumElts = Op.getNumOperands();
1402   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1403   if (!Op.getValueType().isInteger())
1404     return None;
1405 
1406   Optional<int64_t> SeqStep, SeqAddend;
1407   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1408   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1409   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1410     // Assume undef elements match the sequence; we just have to be careful
1411     // when interpolating across them.
1412     if (Op.getOperand(Idx).isUndef())
1413       continue;
1414     // The BUILD_VECTOR must be all constants.
1415     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1416       return None;
1417 
1418     uint64_t Val = Op.getConstantOperandVal(Idx) &
1419                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1420 
1421     if (PrevElt) {
1422       // Calculate the step since the last non-undef element, and ensure
1423       // it's consistent across the entire sequence.
1424       int64_t Diff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1425       // The difference must cleanly divide the element span.
1426       if (Diff % (Idx - PrevElt->second) != 0)
1427         return None;
1428       int64_t Step = Diff / (Idx - PrevElt->second);
1429       // A zero step indicates we're either a not an index sequence, or we
1430       // have a fractional step. This must be handled by a more complex
1431       // pattern recognition (undefs complicate things here).
1432       if (Step == 0)
1433         return None;
1434       if (!SeqStep)
1435         SeqStep = Step;
1436       else if (Step != SeqStep)
1437         return None;
1438     }
1439 
1440     // Record and/or check any addend.
1441     if (SeqStep) {
1442       int64_t Addend =
1443           SignExtend64(Val - (Idx * (uint64_t)*SeqStep), EltSizeInBits);
1444       if (!SeqAddend)
1445         SeqAddend = Addend;
1446       else if (SeqAddend != Addend)
1447         return None;
1448     }
1449 
1450     // Record this non-undef element for later.
1451     PrevElt = std::make_pair(Val, Idx);
1452   }
1453   // We need to have logged both a step and an addend for this to count as
1454   // a legal index sequence.
1455   if (!SeqStep || !SeqAddend)
1456     return None;
1457 
1458   return VIDSequence{*SeqStep, *SeqAddend};
1459 }
1460 
1461 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1462                                  const RISCVSubtarget &Subtarget) {
1463   MVT VT = Op.getSimpleValueType();
1464   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1465 
1466   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1467 
1468   SDLoc DL(Op);
1469   SDValue Mask, VL;
1470   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1471 
1472   MVT XLenVT = Subtarget.getXLenVT();
1473   unsigned NumElts = Op.getNumOperands();
1474 
1475   if (VT.getVectorElementType() == MVT::i1) {
1476     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1477       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1478       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1479     }
1480 
1481     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1482       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1483       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1484     }
1485 
1486     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1487     // scalar integer chunks whose bit-width depends on the number of mask
1488     // bits and XLEN.
1489     // First, determine the most appropriate scalar integer type to use. This
1490     // is at most XLenVT, but may be shrunk to a smaller vector element type
1491     // according to the size of the final vector - use i8 chunks rather than
1492     // XLenVT if we're producing a v8i1. This results in more consistent
1493     // codegen across RV32 and RV64.
1494     unsigned NumViaIntegerBits =
1495         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1496     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1497       // If we have to use more than one INSERT_VECTOR_ELT then this
1498       // optimization is likely to increase code size; avoid peforming it in
1499       // such a case. We can use a load from a constant pool in this case.
1500       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1501         return SDValue();
1502       // Now we can create our integer vector type. Note that it may be larger
1503       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1504       MVT IntegerViaVecVT =
1505           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1506                            divideCeil(NumElts, NumViaIntegerBits));
1507 
1508       uint64_t Bits = 0;
1509       unsigned BitPos = 0, IntegerEltIdx = 0;
1510       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1511 
1512       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1513         // Once we accumulate enough bits to fill our scalar type, insert into
1514         // our vector and clear our accumulated data.
1515         if (I != 0 && I % NumViaIntegerBits == 0) {
1516           if (NumViaIntegerBits <= 32)
1517             Bits = SignExtend64(Bits, 32);
1518           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1519           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1520                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1521           Bits = 0;
1522           BitPos = 0;
1523           IntegerEltIdx++;
1524         }
1525         SDValue V = Op.getOperand(I);
1526         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1527         Bits |= ((uint64_t)BitValue << BitPos);
1528       }
1529 
1530       // Insert the (remaining) scalar value into position in our integer
1531       // vector type.
1532       if (NumViaIntegerBits <= 32)
1533         Bits = SignExtend64(Bits, 32);
1534       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1535       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1536                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1537 
1538       if (NumElts < NumViaIntegerBits) {
1539         // If we're producing a smaller vector than our minimum legal integer
1540         // type, bitcast to the equivalent (known-legal) mask type, and extract
1541         // our final mask.
1542         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1543         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1544         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1545                           DAG.getConstant(0, DL, XLenVT));
1546       } else {
1547         // Else we must have produced an integer type with the same size as the
1548         // mask type; bitcast for the final result.
1549         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1550         Vec = DAG.getBitcast(VT, Vec);
1551       }
1552 
1553       return Vec;
1554     }
1555 
1556     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1557     // vector type, we have a legal equivalently-sized i8 type, so we can use
1558     // that.
1559     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1560     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1561 
1562     SDValue WideVec;
1563     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1564       // For a splat, perform a scalar truncate before creating the wider
1565       // vector.
1566       assert(Splat.getValueType() == XLenVT &&
1567              "Unexpected type for i1 splat value");
1568       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1569                           DAG.getConstant(1, DL, XLenVT));
1570       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1571     } else {
1572       SmallVector<SDValue, 8> Ops(Op->op_values());
1573       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1574       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1575       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1576     }
1577 
1578     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1579   }
1580 
1581   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1582     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1583                                         : RISCVISD::VMV_V_X_VL;
1584     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1585     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1586   }
1587 
1588   // Try and match index sequences, which we can lower to the vid instruction
1589   // with optional modifications. An all-undef vector is matched by
1590   // getSplatValue, above.
1591   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1592     int64_t Step = SimpleVID->Step;
1593     int64_t Addend = SimpleVID->Addend;
1594     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1595     // threshold since it's the immediate value many RVV instructions accept.
1596     if (isInt<5>(Step) && isInt<5>(Addend)) {
1597       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1598       // Convert right out of the scalable type so we can use standard ISD
1599       // nodes for the rest of the computation. If we used scalable types with
1600       // these, we'd lose the fixed-length vector info and generate worse
1601       // vsetvli code.
1602       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1603       assert(Step != 0 && "Invalid step");
1604       bool Negate = false;
1605       if (Step != 1) {
1606         int64_t SplatStepVal = Step;
1607         unsigned Opcode = ISD::MUL;
1608         if (isPowerOf2_64(std::abs(Step))) {
1609           Negate = Step < 0;
1610           Opcode = ISD::SHL;
1611           SplatStepVal = Log2_64(std::abs(Step));
1612         }
1613         SDValue SplatStep = DAG.getSplatVector(
1614             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1615         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1616       }
1617       if (Addend != 0 || Negate) {
1618         SDValue SplatAddend =
1619             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1620         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1621       }
1622       return VID;
1623     }
1624   }
1625 
1626   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1627   // when re-interpreted as a vector with a larger element type. For example,
1628   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1629   // could be instead splat as
1630   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1631   // TODO: This optimization could also work on non-constant splats, but it
1632   // would require bit-manipulation instructions to construct the splat value.
1633   SmallVector<SDValue> Sequence;
1634   unsigned EltBitSize = VT.getScalarSizeInBits();
1635   const auto *BV = cast<BuildVectorSDNode>(Op);
1636   if (VT.isInteger() && EltBitSize < 64 &&
1637       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1638       BV->getRepeatedSequence(Sequence) &&
1639       (Sequence.size() * EltBitSize) <= 64) {
1640     unsigned SeqLen = Sequence.size();
1641     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1642     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1643     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1644             ViaIntVT == MVT::i64) &&
1645            "Unexpected sequence type");
1646 
1647     unsigned EltIdx = 0;
1648     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1649     uint64_t SplatValue = 0;
1650     // Construct the amalgamated value which can be splatted as this larger
1651     // vector type.
1652     for (const auto &SeqV : Sequence) {
1653       if (!SeqV.isUndef())
1654         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1655                        << (EltIdx * EltBitSize));
1656       EltIdx++;
1657     }
1658 
1659     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1660     // achieve better constant materializion.
1661     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1662       SplatValue = SignExtend64(SplatValue, 32);
1663 
1664     // Since we can't introduce illegal i64 types at this stage, we can only
1665     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1666     // way we can use RVV instructions to splat.
1667     assert((ViaIntVT.bitsLE(XLenVT) ||
1668             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1669            "Unexpected bitcast sequence");
1670     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1671       SDValue ViaVL =
1672           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1673       MVT ViaContainerVT =
1674           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1675       SDValue Splat =
1676           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1677                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1678       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1679       return DAG.getBitcast(VT, Splat);
1680     }
1681   }
1682 
1683   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1684   // which constitute a large proportion of the elements. In such cases we can
1685   // splat a vector with the dominant element and make up the shortfall with
1686   // INSERT_VECTOR_ELTs.
1687   // Note that this includes vectors of 2 elements by association. The
1688   // upper-most element is the "dominant" one, allowing us to use a splat to
1689   // "insert" the upper element, and an insert of the lower element at position
1690   // 0, which improves codegen.
1691   SDValue DominantValue;
1692   unsigned MostCommonCount = 0;
1693   DenseMap<SDValue, unsigned> ValueCounts;
1694   unsigned NumUndefElts =
1695       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1696 
1697   for (SDValue V : Op->op_values()) {
1698     if (V.isUndef())
1699       continue;
1700 
1701     ValueCounts.insert(std::make_pair(V, 0));
1702     unsigned &Count = ValueCounts[V];
1703 
1704     // Is this value dominant? In case of a tie, prefer the highest element as
1705     // it's cheaper to insert near the beginning of a vector than it is at the
1706     // end.
1707     if (++Count >= MostCommonCount) {
1708       DominantValue = V;
1709       MostCommonCount = Count;
1710     }
1711   }
1712 
1713   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1714   unsigned NumDefElts = NumElts - NumUndefElts;
1715   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1716 
1717   // Don't perform this optimization when optimizing for size, since
1718   // materializing elements and inserting them tends to cause code bloat.
1719   if (!DAG.shouldOptForSize() &&
1720       ((MostCommonCount > DominantValueCountThreshold) ||
1721        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1722     // Start by splatting the most common element.
1723     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1724 
1725     DenseSet<SDValue> Processed{DominantValue};
1726     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1727     for (const auto &OpIdx : enumerate(Op->ops())) {
1728       const SDValue &V = OpIdx.value();
1729       if (V.isUndef() || !Processed.insert(V).second)
1730         continue;
1731       if (ValueCounts[V] == 1) {
1732         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1733                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1734       } else {
1735         // Blend in all instances of this value using a VSELECT, using a
1736         // mask where each bit signals whether that element is the one
1737         // we're after.
1738         SmallVector<SDValue> Ops;
1739         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1740           return DAG.getConstant(V == V1, DL, XLenVT);
1741         });
1742         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1743                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1744                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1745       }
1746     }
1747 
1748     return Vec;
1749   }
1750 
1751   return SDValue();
1752 }
1753 
1754 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1755                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1756   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1757     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1758     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1759     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1760     // node in order to try and match RVV vector/scalar instructions.
1761     if ((LoC >> 31) == HiC)
1762       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1763   }
1764 
1765   // Fall back to a stack store and stride x0 vector load.
1766   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1767 }
1768 
1769 // Called by type legalization to handle splat of i64 on RV32.
1770 // FIXME: We can optimize this when the type has sign or zero bits in one
1771 // of the halves.
1772 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1773                                    SDValue VL, SelectionDAG &DAG) {
1774   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1775   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1776                            DAG.getConstant(0, DL, MVT::i32));
1777   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1778                            DAG.getConstant(1, DL, MVT::i32));
1779   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1780 }
1781 
1782 // This function lowers a splat of a scalar operand Splat with the vector
1783 // length VL. It ensures the final sequence is type legal, which is useful when
1784 // lowering a splat after type legalization.
1785 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1786                                 SelectionDAG &DAG,
1787                                 const RISCVSubtarget &Subtarget) {
1788   if (VT.isFloatingPoint())
1789     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1790 
1791   MVT XLenVT = Subtarget.getXLenVT();
1792 
1793   // Simplest case is that the operand needs to be promoted to XLenVT.
1794   if (Scalar.getValueType().bitsLE(XLenVT)) {
1795     // If the operand is a constant, sign extend to increase our chances
1796     // of being able to use a .vi instruction. ANY_EXTEND would become a
1797     // a zero extend and the simm5 check in isel would fail.
1798     // FIXME: Should we ignore the upper bits in isel instead?
1799     unsigned ExtOpc =
1800         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1801     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1802     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1803   }
1804 
1805   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1806          "Unexpected scalar for splat lowering!");
1807 
1808   // Otherwise use the more complicated splatting algorithm.
1809   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1810 }
1811 
1812 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1813                                    const RISCVSubtarget &Subtarget) {
1814   SDValue V1 = Op.getOperand(0);
1815   SDValue V2 = Op.getOperand(1);
1816   SDLoc DL(Op);
1817   MVT XLenVT = Subtarget.getXLenVT();
1818   MVT VT = Op.getSimpleValueType();
1819   unsigned NumElts = VT.getVectorNumElements();
1820   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1821 
1822   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1823 
1824   SDValue TrueMask, VL;
1825   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1826 
1827   if (SVN->isSplat()) {
1828     const int Lane = SVN->getSplatIndex();
1829     if (Lane >= 0) {
1830       MVT SVT = VT.getVectorElementType();
1831 
1832       // Turn splatted vector load into a strided load with an X0 stride.
1833       SDValue V = V1;
1834       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1835       // with undef.
1836       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1837       int Offset = Lane;
1838       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1839         int OpElements =
1840             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1841         V = V.getOperand(Offset / OpElements);
1842         Offset %= OpElements;
1843       }
1844 
1845       // We need to ensure the load isn't atomic or volatile.
1846       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1847         auto *Ld = cast<LoadSDNode>(V);
1848         Offset *= SVT.getStoreSize();
1849         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1850                                                    TypeSize::Fixed(Offset), DL);
1851 
1852         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1853         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1854           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1855           SDValue IntID =
1856               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1857           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1858                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1859           SDValue NewLoad = DAG.getMemIntrinsicNode(
1860               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1861               DAG.getMachineFunction().getMachineMemOperand(
1862                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1863           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1864           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1865         }
1866 
1867         // Otherwise use a scalar load and splat. This will give the best
1868         // opportunity to fold a splat into the operation. ISel can turn it into
1869         // the x0 strided load if we aren't able to fold away the select.
1870         if (SVT.isFloatingPoint())
1871           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1872                           Ld->getPointerInfo().getWithOffset(Offset),
1873                           Ld->getOriginalAlign(),
1874                           Ld->getMemOperand()->getFlags());
1875         else
1876           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1877                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1878                              Ld->getOriginalAlign(),
1879                              Ld->getMemOperand()->getFlags());
1880         DAG.makeEquivalentMemoryOrdering(Ld, V);
1881 
1882         unsigned Opc =
1883             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1884         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1885         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1886       }
1887 
1888       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1889       assert(Lane < (int)NumElts && "Unexpected lane!");
1890       SDValue Gather =
1891           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1892                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1893       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1894     }
1895   }
1896 
1897   // Detect shuffles which can be re-expressed as vector selects; these are
1898   // shuffles in which each element in the destination is taken from an element
1899   // at the corresponding index in either source vectors.
1900   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1901     int MaskIndex = MaskIdx.value();
1902     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1903   });
1904 
1905   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1906 
1907   SmallVector<SDValue> MaskVals;
1908   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1909   // merged with a second vrgather.
1910   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1911 
1912   // By default we preserve the original operand order, and use a mask to
1913   // select LHS as true and RHS as false. However, since RVV vector selects may
1914   // feature splats but only on the LHS, we may choose to invert our mask and
1915   // instead select between RHS and LHS.
1916   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1917   bool InvertMask = IsSelect == SwapOps;
1918 
1919   // Now construct the mask that will be used by the vselect or blended
1920   // vrgather operation. For vrgathers, construct the appropriate indices into
1921   // each vector.
1922   for (int MaskIndex : SVN->getMask()) {
1923     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1924     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1925     if (!IsSelect) {
1926       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
1927       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
1928                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
1929                                      : DAG.getUNDEF(XLenVT));
1930       GatherIndicesRHS.push_back(
1931           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
1932                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
1933     }
1934   }
1935 
1936   if (SwapOps) {
1937     std::swap(V1, V2);
1938     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1939   }
1940 
1941   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1942   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1943   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1944 
1945   if (IsSelect)
1946     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1947 
1948   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1949     // On such a large vector we're unable to use i8 as the index type.
1950     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1951     // may involve vector splitting if we're already at LMUL=8, or our
1952     // user-supplied maximum fixed-length LMUL.
1953     return SDValue();
1954   }
1955 
1956   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1957   MVT IndexVT = VT.changeTypeToInteger();
1958   // Since we can't introduce illegal index types at this stage, use i16 and
1959   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1960   // than XLenVT.
1961   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1962     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1963     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1964   }
1965 
1966   MVT IndexContainerVT =
1967       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1968 
1969   SDValue Gather;
1970   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1971   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1972   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
1973     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1974   } else {
1975     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1976     LHSIndices =
1977         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1978 
1979     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1980     Gather =
1981         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1982   }
1983 
1984   // If a second vector operand is used by this shuffle, blend it in with an
1985   // additional vrgather.
1986   if (!V2.isUndef()) {
1987     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1988     SelectMask =
1989         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1990 
1991     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1992     RHSIndices =
1993         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1994 
1995     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1996     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1997     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1998                          Gather, VL);
1999   }
2000 
2001   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2002 }
2003 
2004 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2005                                      SDLoc DL, SelectionDAG &DAG,
2006                                      const RISCVSubtarget &Subtarget) {
2007   if (VT.isScalableVector())
2008     return DAG.getFPExtendOrRound(Op, DL, VT);
2009   assert(VT.isFixedLengthVector() &&
2010          "Unexpected value type for RVV FP extend/round lowering");
2011   SDValue Mask, VL;
2012   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2013   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2014                         ? RISCVISD::FP_EXTEND_VL
2015                         : RISCVISD::FP_ROUND_VL;
2016   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2017 }
2018 
2019 // While RVV has alignment restrictions, we should always be able to load as a
2020 // legal equivalently-sized byte-typed vector instead. This method is
2021 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2022 // the load is already correctly-aligned, it returns SDValue().
2023 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2024                                                     SelectionDAG &DAG) const {
2025   auto *Load = cast<LoadSDNode>(Op);
2026   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2027 
2028   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2029                                      Load->getMemoryVT(),
2030                                      *Load->getMemOperand()))
2031     return SDValue();
2032 
2033   SDLoc DL(Op);
2034   MVT VT = Op.getSimpleValueType();
2035   unsigned EltSizeBits = VT.getScalarSizeInBits();
2036   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2037          "Unexpected unaligned RVV load type");
2038   MVT NewVT =
2039       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2040   assert(NewVT.isValid() &&
2041          "Expecting equally-sized RVV vector types to be legal");
2042   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2043                           Load->getPointerInfo(), Load->getOriginalAlign(),
2044                           Load->getMemOperand()->getFlags());
2045   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2046 }
2047 
2048 // While RVV has alignment restrictions, we should always be able to store as a
2049 // legal equivalently-sized byte-typed vector instead. This method is
2050 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2051 // returns SDValue() if the store is already correctly aligned.
2052 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2053                                                      SelectionDAG &DAG) const {
2054   auto *Store = cast<StoreSDNode>(Op);
2055   assert(Store && Store->getValue().getValueType().isVector() &&
2056          "Expected vector store");
2057 
2058   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2059                                      Store->getMemoryVT(),
2060                                      *Store->getMemOperand()))
2061     return SDValue();
2062 
2063   SDLoc DL(Op);
2064   SDValue StoredVal = Store->getValue();
2065   MVT VT = StoredVal.getSimpleValueType();
2066   unsigned EltSizeBits = VT.getScalarSizeInBits();
2067   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2068          "Unexpected unaligned RVV store type");
2069   MVT NewVT =
2070       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2071   assert(NewVT.isValid() &&
2072          "Expecting equally-sized RVV vector types to be legal");
2073   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2074   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2075                       Store->getPointerInfo(), Store->getOriginalAlign(),
2076                       Store->getMemOperand()->getFlags());
2077 }
2078 
2079 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2080                                             SelectionDAG &DAG) const {
2081   switch (Op.getOpcode()) {
2082   default:
2083     report_fatal_error("unimplemented operand");
2084   case ISD::GlobalAddress:
2085     return lowerGlobalAddress(Op, DAG);
2086   case ISD::BlockAddress:
2087     return lowerBlockAddress(Op, DAG);
2088   case ISD::ConstantPool:
2089     return lowerConstantPool(Op, DAG);
2090   case ISD::JumpTable:
2091     return lowerJumpTable(Op, DAG);
2092   case ISD::GlobalTLSAddress:
2093     return lowerGlobalTLSAddress(Op, DAG);
2094   case ISD::SELECT:
2095     return lowerSELECT(Op, DAG);
2096   case ISD::BRCOND:
2097     return lowerBRCOND(Op, DAG);
2098   case ISD::VASTART:
2099     return lowerVASTART(Op, DAG);
2100   case ISD::FRAMEADDR:
2101     return lowerFRAMEADDR(Op, DAG);
2102   case ISD::RETURNADDR:
2103     return lowerRETURNADDR(Op, DAG);
2104   case ISD::SHL_PARTS:
2105     return lowerShiftLeftParts(Op, DAG);
2106   case ISD::SRA_PARTS:
2107     return lowerShiftRightParts(Op, DAG, true);
2108   case ISD::SRL_PARTS:
2109     return lowerShiftRightParts(Op, DAG, false);
2110   case ISD::BITCAST: {
2111     SDLoc DL(Op);
2112     EVT VT = Op.getValueType();
2113     SDValue Op0 = Op.getOperand(0);
2114     EVT Op0VT = Op0.getValueType();
2115     MVT XLenVT = Subtarget.getXLenVT();
2116     if (VT.isFixedLengthVector()) {
2117       // We can handle fixed length vector bitcasts with a simple replacement
2118       // in isel.
2119       if (Op0VT.isFixedLengthVector())
2120         return Op;
2121       // When bitcasting from scalar to fixed-length vector, insert the scalar
2122       // into a one-element vector of the result type, and perform a vector
2123       // bitcast.
2124       if (!Op0VT.isVector()) {
2125         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2126         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2127                                               DAG.getUNDEF(BVT), Op0,
2128                                               DAG.getConstant(0, DL, XLenVT)));
2129       }
2130       return SDValue();
2131     }
2132     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2133     // thus: bitcast the vector to a one-element vector type whose element type
2134     // is the same as the result type, and extract the first element.
2135     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2136       LLVMContext &Context = *DAG.getContext();
2137       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2138       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2139                          DAG.getConstant(0, DL, XLenVT));
2140     }
2141     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2142       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2143       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2144       return FPConv;
2145     }
2146     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2147         Subtarget.hasStdExtF()) {
2148       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2149       SDValue FPConv =
2150           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2151       return FPConv;
2152     }
2153     return SDValue();
2154   }
2155   case ISD::INTRINSIC_WO_CHAIN:
2156     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2157   case ISD::INTRINSIC_W_CHAIN:
2158     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2159   case ISD::BSWAP:
2160   case ISD::BITREVERSE: {
2161     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2162     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2163     MVT VT = Op.getSimpleValueType();
2164     SDLoc DL(Op);
2165     // Start with the maximum immediate value which is the bitwidth - 1.
2166     unsigned Imm = VT.getSizeInBits() - 1;
2167     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2168     if (Op.getOpcode() == ISD::BSWAP)
2169       Imm &= ~0x7U;
2170     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2171                        DAG.getConstant(Imm, DL, VT));
2172   }
2173   case ISD::FSHL:
2174   case ISD::FSHR: {
2175     MVT VT = Op.getSimpleValueType();
2176     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2177     SDLoc DL(Op);
2178     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2179       return Op;
2180     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2181     // use log(XLen) bits. Mask the shift amount accordingly.
2182     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2183     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2184                                 DAG.getConstant(ShAmtWidth, DL, VT));
2185     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2186     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2187   }
2188   case ISD::TRUNCATE: {
2189     SDLoc DL(Op);
2190     MVT VT = Op.getSimpleValueType();
2191     // Only custom-lower vector truncates
2192     if (!VT.isVector())
2193       return Op;
2194 
2195     // Truncates to mask types are handled differently
2196     if (VT.getVectorElementType() == MVT::i1)
2197       return lowerVectorMaskTrunc(Op, DAG);
2198 
2199     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2200     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2201     // truncate by one power of two at a time.
2202     MVT DstEltVT = VT.getVectorElementType();
2203 
2204     SDValue Src = Op.getOperand(0);
2205     MVT SrcVT = Src.getSimpleValueType();
2206     MVT SrcEltVT = SrcVT.getVectorElementType();
2207 
2208     assert(DstEltVT.bitsLT(SrcEltVT) &&
2209            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2210            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2211            "Unexpected vector truncate lowering");
2212 
2213     MVT ContainerVT = SrcVT;
2214     if (SrcVT.isFixedLengthVector()) {
2215       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2216       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2217     }
2218 
2219     SDValue Result = Src;
2220     SDValue Mask, VL;
2221     std::tie(Mask, VL) =
2222         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2223     LLVMContext &Context = *DAG.getContext();
2224     const ElementCount Count = ContainerVT.getVectorElementCount();
2225     do {
2226       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2227       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2228       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2229                            Mask, VL);
2230     } while (SrcEltVT != DstEltVT);
2231 
2232     if (SrcVT.isFixedLengthVector())
2233       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2234 
2235     return Result;
2236   }
2237   case ISD::ANY_EXTEND:
2238   case ISD::ZERO_EXTEND:
2239     if (Op.getOperand(0).getValueType().isVector() &&
2240         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2241       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2242     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2243   case ISD::SIGN_EXTEND:
2244     if (Op.getOperand(0).getValueType().isVector() &&
2245         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2246       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2247     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2248   case ISD::SPLAT_VECTOR_PARTS:
2249     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2250   case ISD::INSERT_VECTOR_ELT:
2251     return lowerINSERT_VECTOR_ELT(Op, DAG);
2252   case ISD::EXTRACT_VECTOR_ELT:
2253     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2254   case ISD::VSCALE: {
2255     MVT VT = Op.getSimpleValueType();
2256     SDLoc DL(Op);
2257     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2258     // We define our scalable vector types for lmul=1 to use a 64 bit known
2259     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2260     // vscale as VLENB / 8.
2261     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2262     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2263       // We assume VLENB is a multiple of 8. We manually choose the best shift
2264       // here because SimplifyDemandedBits isn't always able to simplify it.
2265       uint64_t Val = Op.getConstantOperandVal(0);
2266       if (isPowerOf2_64(Val)) {
2267         uint64_t Log2 = Log2_64(Val);
2268         if (Log2 < 3)
2269           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2270                              DAG.getConstant(3 - Log2, DL, VT));
2271         if (Log2 > 3)
2272           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2273                              DAG.getConstant(Log2 - 3, DL, VT));
2274         return VLENB;
2275       }
2276       // If the multiplier is a multiple of 8, scale it down to avoid needing
2277       // to shift the VLENB value.
2278       if ((Val % 8) == 0)
2279         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2280                            DAG.getConstant(Val / 8, DL, VT));
2281     }
2282 
2283     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2284                                  DAG.getConstant(3, DL, VT));
2285     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2286   }
2287   case ISD::FP_EXTEND: {
2288     // RVV can only do fp_extend to types double the size as the source. We
2289     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2290     // via f32.
2291     SDLoc DL(Op);
2292     MVT VT = Op.getSimpleValueType();
2293     SDValue Src = Op.getOperand(0);
2294     MVT SrcVT = Src.getSimpleValueType();
2295 
2296     // Prepare any fixed-length vector operands.
2297     MVT ContainerVT = VT;
2298     if (SrcVT.isFixedLengthVector()) {
2299       ContainerVT = getContainerForFixedLengthVector(VT);
2300       MVT SrcContainerVT =
2301           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2302       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2303     }
2304 
2305     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2306         SrcVT.getVectorElementType() != MVT::f16) {
2307       // For scalable vectors, we only need to close the gap between
2308       // vXf16->vXf64.
2309       if (!VT.isFixedLengthVector())
2310         return Op;
2311       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2312       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2313       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2314     }
2315 
2316     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2317     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2318     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2319         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2320 
2321     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2322                                            DL, DAG, Subtarget);
2323     if (VT.isFixedLengthVector())
2324       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2325     return Extend;
2326   }
2327   case ISD::FP_ROUND: {
2328     // RVV can only do fp_round to types half the size as the source. We
2329     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2330     // conversion instruction.
2331     SDLoc DL(Op);
2332     MVT VT = Op.getSimpleValueType();
2333     SDValue Src = Op.getOperand(0);
2334     MVT SrcVT = Src.getSimpleValueType();
2335 
2336     // Prepare any fixed-length vector operands.
2337     MVT ContainerVT = VT;
2338     if (VT.isFixedLengthVector()) {
2339       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2340       ContainerVT =
2341           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2342       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2343     }
2344 
2345     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2346         SrcVT.getVectorElementType() != MVT::f64) {
2347       // For scalable vectors, we only need to close the gap between
2348       // vXf64<->vXf16.
2349       if (!VT.isFixedLengthVector())
2350         return Op;
2351       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2352       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2353       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2354     }
2355 
2356     SDValue Mask, VL;
2357     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2358 
2359     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2360     SDValue IntermediateRound =
2361         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2362     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2363                                           DL, DAG, Subtarget);
2364 
2365     if (VT.isFixedLengthVector())
2366       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2367     return Round;
2368   }
2369   case ISD::FP_TO_SINT:
2370   case ISD::FP_TO_UINT:
2371   case ISD::SINT_TO_FP:
2372   case ISD::UINT_TO_FP: {
2373     // RVV can only do fp<->int conversions to types half/double the size as
2374     // the source. We custom-lower any conversions that do two hops into
2375     // sequences.
2376     MVT VT = Op.getSimpleValueType();
2377     if (!VT.isVector())
2378       return Op;
2379     SDLoc DL(Op);
2380     SDValue Src = Op.getOperand(0);
2381     MVT EltVT = VT.getVectorElementType();
2382     MVT SrcVT = Src.getSimpleValueType();
2383     MVT SrcEltVT = SrcVT.getVectorElementType();
2384     unsigned EltSize = EltVT.getSizeInBits();
2385     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2386     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2387            "Unexpected vector element types");
2388 
2389     bool IsInt2FP = SrcEltVT.isInteger();
2390     // Widening conversions
2391     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2392       if (IsInt2FP) {
2393         // Do a regular integer sign/zero extension then convert to float.
2394         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2395                                       VT.getVectorElementCount());
2396         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2397                                  ? ISD::ZERO_EXTEND
2398                                  : ISD::SIGN_EXTEND;
2399         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2400         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2401       }
2402       // FP2Int
2403       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2404       // Do one doubling fp_extend then complete the operation by converting
2405       // to int.
2406       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2407       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2408       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2409     }
2410 
2411     // Narrowing conversions
2412     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2413       if (IsInt2FP) {
2414         // One narrowing int_to_fp, then an fp_round.
2415         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2416         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2417         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2418         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2419       }
2420       // FP2Int
2421       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2422       // representable by the integer, the result is poison.
2423       MVT IVecVT =
2424           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2425                            VT.getVectorElementCount());
2426       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2427       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2428     }
2429 
2430     // Scalable vectors can exit here. Patterns will handle equally-sized
2431     // conversions halving/doubling ones.
2432     if (!VT.isFixedLengthVector())
2433       return Op;
2434 
2435     // For fixed-length vectors we lower to a custom "VL" node.
2436     unsigned RVVOpc = 0;
2437     switch (Op.getOpcode()) {
2438     default:
2439       llvm_unreachable("Impossible opcode");
2440     case ISD::FP_TO_SINT:
2441       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2442       break;
2443     case ISD::FP_TO_UINT:
2444       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2445       break;
2446     case ISD::SINT_TO_FP:
2447       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2448       break;
2449     case ISD::UINT_TO_FP:
2450       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2451       break;
2452     }
2453 
2454     MVT ContainerVT, SrcContainerVT;
2455     // Derive the reference container type from the larger vector type.
2456     if (SrcEltSize > EltSize) {
2457       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2458       ContainerVT =
2459           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2460     } else {
2461       ContainerVT = getContainerForFixedLengthVector(VT);
2462       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2463     }
2464 
2465     SDValue Mask, VL;
2466     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2467 
2468     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2469     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2470     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2471   }
2472   case ISD::VECREDUCE_ADD:
2473   case ISD::VECREDUCE_UMAX:
2474   case ISD::VECREDUCE_SMAX:
2475   case ISD::VECREDUCE_UMIN:
2476   case ISD::VECREDUCE_SMIN:
2477     return lowerVECREDUCE(Op, DAG);
2478   case ISD::VECREDUCE_AND:
2479   case ISD::VECREDUCE_OR:
2480   case ISD::VECREDUCE_XOR:
2481     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2482       return lowerVectorMaskVECREDUCE(Op, DAG);
2483     return lowerVECREDUCE(Op, DAG);
2484   case ISD::VECREDUCE_FADD:
2485   case ISD::VECREDUCE_SEQ_FADD:
2486   case ISD::VECREDUCE_FMIN:
2487   case ISD::VECREDUCE_FMAX:
2488     return lowerFPVECREDUCE(Op, DAG);
2489   case ISD::INSERT_SUBVECTOR:
2490     return lowerINSERT_SUBVECTOR(Op, DAG);
2491   case ISD::EXTRACT_SUBVECTOR:
2492     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2493   case ISD::STEP_VECTOR:
2494     return lowerSTEP_VECTOR(Op, DAG);
2495   case ISD::VECTOR_REVERSE:
2496     return lowerVECTOR_REVERSE(Op, DAG);
2497   case ISD::BUILD_VECTOR:
2498     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2499   case ISD::SPLAT_VECTOR:
2500     if (Op.getValueType().getVectorElementType() == MVT::i1)
2501       return lowerVectorMaskSplat(Op, DAG);
2502     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2503   case ISD::VECTOR_SHUFFLE:
2504     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2505   case ISD::CONCAT_VECTORS: {
2506     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2507     // better than going through the stack, as the default expansion does.
2508     SDLoc DL(Op);
2509     MVT VT = Op.getSimpleValueType();
2510     unsigned NumOpElts =
2511         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2512     SDValue Vec = DAG.getUNDEF(VT);
2513     for (const auto &OpIdx : enumerate(Op->ops()))
2514       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2515                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2516     return Vec;
2517   }
2518   case ISD::LOAD:
2519     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2520       return V;
2521     if (Op.getValueType().isFixedLengthVector())
2522       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2523     return Op;
2524   case ISD::STORE:
2525     if (auto V = expandUnalignedRVVStore(Op, DAG))
2526       return V;
2527     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2528       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2529     return Op;
2530   case ISD::MLOAD:
2531     return lowerMLOAD(Op, DAG);
2532   case ISD::MSTORE:
2533     return lowerMSTORE(Op, DAG);
2534   case ISD::SETCC:
2535     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2536   case ISD::ADD:
2537     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2538   case ISD::SUB:
2539     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2540   case ISD::MUL:
2541     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2542   case ISD::MULHS:
2543     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2544   case ISD::MULHU:
2545     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2546   case ISD::AND:
2547     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2548                                               RISCVISD::AND_VL);
2549   case ISD::OR:
2550     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2551                                               RISCVISD::OR_VL);
2552   case ISD::XOR:
2553     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2554                                               RISCVISD::XOR_VL);
2555   case ISD::SDIV:
2556     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2557   case ISD::SREM:
2558     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2559   case ISD::UDIV:
2560     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2561   case ISD::UREM:
2562     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2563   case ISD::SHL:
2564   case ISD::SRA:
2565   case ISD::SRL:
2566     if (Op.getSimpleValueType().isFixedLengthVector())
2567       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2568     // This can be called for an i32 shift amount that needs to be promoted.
2569     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2570            "Unexpected custom legalisation");
2571     return SDValue();
2572   case ISD::FADD:
2573     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2574   case ISD::FSUB:
2575     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2576   case ISD::FMUL:
2577     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2578   case ISD::FDIV:
2579     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2580   case ISD::FNEG:
2581     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2582   case ISD::FABS:
2583     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2584   case ISD::FSQRT:
2585     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2586   case ISD::FMA:
2587     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2588   case ISD::SMIN:
2589     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2590   case ISD::SMAX:
2591     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2592   case ISD::UMIN:
2593     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2594   case ISD::UMAX:
2595     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2596   case ISD::FMINNUM:
2597     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2598   case ISD::FMAXNUM:
2599     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2600   case ISD::ABS:
2601     return lowerABS(Op, DAG);
2602   case ISD::VSELECT:
2603     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2604   case ISD::FCOPYSIGN:
2605     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2606   case ISD::MGATHER:
2607     return lowerMGATHER(Op, DAG);
2608   case ISD::MSCATTER:
2609     return lowerMSCATTER(Op, DAG);
2610   case ISD::FLT_ROUNDS_:
2611     return lowerGET_ROUNDING(Op, DAG);
2612   case ISD::SET_ROUNDING:
2613     return lowerSET_ROUNDING(Op, DAG);
2614   case ISD::VP_ADD:
2615     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2616   case ISD::VP_SUB:
2617     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2618   case ISD::VP_MUL:
2619     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2620   case ISD::VP_SDIV:
2621     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2622   case ISD::VP_UDIV:
2623     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2624   case ISD::VP_SREM:
2625     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2626   case ISD::VP_UREM:
2627     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2628   case ISD::VP_AND:
2629     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2630   case ISD::VP_OR:
2631     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2632   case ISD::VP_XOR:
2633     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2634   case ISD::VP_ASHR:
2635     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2636   case ISD::VP_LSHR:
2637     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2638   case ISD::VP_SHL:
2639     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2640   case ISD::VP_FADD:
2641     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2642   case ISD::VP_FSUB:
2643     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2644   case ISD::VP_FMUL:
2645     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2646   case ISD::VP_FDIV:
2647     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2648   }
2649 }
2650 
2651 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2652                              SelectionDAG &DAG, unsigned Flags) {
2653   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2654 }
2655 
2656 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2657                              SelectionDAG &DAG, unsigned Flags) {
2658   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2659                                    Flags);
2660 }
2661 
2662 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2663                              SelectionDAG &DAG, unsigned Flags) {
2664   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2665                                    N->getOffset(), Flags);
2666 }
2667 
2668 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2669                              SelectionDAG &DAG, unsigned Flags) {
2670   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2671 }
2672 
2673 template <class NodeTy>
2674 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2675                                      bool IsLocal) const {
2676   SDLoc DL(N);
2677   EVT Ty = getPointerTy(DAG.getDataLayout());
2678 
2679   if (isPositionIndependent()) {
2680     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2681     if (IsLocal)
2682       // Use PC-relative addressing to access the symbol. This generates the
2683       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2684       // %pcrel_lo(auipc)).
2685       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2686 
2687     // Use PC-relative addressing to access the GOT for this symbol, then load
2688     // the address from the GOT. This generates the pattern (PseudoLA sym),
2689     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2690     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2691   }
2692 
2693   switch (getTargetMachine().getCodeModel()) {
2694   default:
2695     report_fatal_error("Unsupported code model for lowering");
2696   case CodeModel::Small: {
2697     // Generate a sequence for accessing addresses within the first 2 GiB of
2698     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2699     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2700     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2701     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2702     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2703   }
2704   case CodeModel::Medium: {
2705     // Generate a sequence for accessing addresses within any 2GiB range within
2706     // the address space. This generates the pattern (PseudoLLA sym), which
2707     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2708     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2709     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2710   }
2711   }
2712 }
2713 
2714 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2715                                                 SelectionDAG &DAG) const {
2716   SDLoc DL(Op);
2717   EVT Ty = Op.getValueType();
2718   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2719   int64_t Offset = N->getOffset();
2720   MVT XLenVT = Subtarget.getXLenVT();
2721 
2722   const GlobalValue *GV = N->getGlobal();
2723   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2724   SDValue Addr = getAddr(N, DAG, IsLocal);
2725 
2726   // In order to maximise the opportunity for common subexpression elimination,
2727   // emit a separate ADD node for the global address offset instead of folding
2728   // it in the global address node. Later peephole optimisations may choose to
2729   // fold it back in when profitable.
2730   if (Offset != 0)
2731     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2732                        DAG.getConstant(Offset, DL, XLenVT));
2733   return Addr;
2734 }
2735 
2736 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2737                                                SelectionDAG &DAG) const {
2738   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2739 
2740   return getAddr(N, DAG);
2741 }
2742 
2743 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2744                                                SelectionDAG &DAG) const {
2745   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2746 
2747   return getAddr(N, DAG);
2748 }
2749 
2750 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2751                                             SelectionDAG &DAG) const {
2752   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2753 
2754   return getAddr(N, DAG);
2755 }
2756 
2757 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2758                                               SelectionDAG &DAG,
2759                                               bool UseGOT) const {
2760   SDLoc DL(N);
2761   EVT Ty = getPointerTy(DAG.getDataLayout());
2762   const GlobalValue *GV = N->getGlobal();
2763   MVT XLenVT = Subtarget.getXLenVT();
2764 
2765   if (UseGOT) {
2766     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2767     // load the address from the GOT and add the thread pointer. This generates
2768     // the pattern (PseudoLA_TLS_IE sym), which expands to
2769     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2770     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2771     SDValue Load =
2772         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2773 
2774     // Add the thread pointer.
2775     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2776     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2777   }
2778 
2779   // Generate a sequence for accessing the address relative to the thread
2780   // pointer, with the appropriate adjustment for the thread pointer offset.
2781   // This generates the pattern
2782   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2783   SDValue AddrHi =
2784       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2785   SDValue AddrAdd =
2786       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2787   SDValue AddrLo =
2788       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2789 
2790   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2791   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2792   SDValue MNAdd = SDValue(
2793       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2794       0);
2795   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2796 }
2797 
2798 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2799                                                SelectionDAG &DAG) const {
2800   SDLoc DL(N);
2801   EVT Ty = getPointerTy(DAG.getDataLayout());
2802   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2803   const GlobalValue *GV = N->getGlobal();
2804 
2805   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2806   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2807   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2808   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2809   SDValue Load =
2810       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2811 
2812   // Prepare argument list to generate call.
2813   ArgListTy Args;
2814   ArgListEntry Entry;
2815   Entry.Node = Load;
2816   Entry.Ty = CallTy;
2817   Args.push_back(Entry);
2818 
2819   // Setup call to __tls_get_addr.
2820   TargetLowering::CallLoweringInfo CLI(DAG);
2821   CLI.setDebugLoc(DL)
2822       .setChain(DAG.getEntryNode())
2823       .setLibCallee(CallingConv::C, CallTy,
2824                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2825                     std::move(Args));
2826 
2827   return LowerCallTo(CLI).first;
2828 }
2829 
2830 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2831                                                    SelectionDAG &DAG) const {
2832   SDLoc DL(Op);
2833   EVT Ty = Op.getValueType();
2834   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2835   int64_t Offset = N->getOffset();
2836   MVT XLenVT = Subtarget.getXLenVT();
2837 
2838   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2839 
2840   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2841       CallingConv::GHC)
2842     report_fatal_error("In GHC calling convention TLS is not supported");
2843 
2844   SDValue Addr;
2845   switch (Model) {
2846   case TLSModel::LocalExec:
2847     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2848     break;
2849   case TLSModel::InitialExec:
2850     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2851     break;
2852   case TLSModel::LocalDynamic:
2853   case TLSModel::GeneralDynamic:
2854     Addr = getDynamicTLSAddr(N, DAG);
2855     break;
2856   }
2857 
2858   // In order to maximise the opportunity for common subexpression elimination,
2859   // emit a separate ADD node for the global address offset instead of folding
2860   // it in the global address node. Later peephole optimisations may choose to
2861   // fold it back in when profitable.
2862   if (Offset != 0)
2863     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2864                        DAG.getConstant(Offset, DL, XLenVT));
2865   return Addr;
2866 }
2867 
2868 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2869   SDValue CondV = Op.getOperand(0);
2870   SDValue TrueV = Op.getOperand(1);
2871   SDValue FalseV = Op.getOperand(2);
2872   SDLoc DL(Op);
2873   MVT VT = Op.getSimpleValueType();
2874   MVT XLenVT = Subtarget.getXLenVT();
2875 
2876   // Lower vector SELECTs to VSELECTs by splatting the condition.
2877   if (VT.isVector()) {
2878     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
2879     SDValue CondSplat = VT.isScalableVector()
2880                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
2881                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
2882     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
2883   }
2884 
2885   // If the result type is XLenVT and CondV is the output of a SETCC node
2886   // which also operated on XLenVT inputs, then merge the SETCC node into the
2887   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2888   // compare+branch instructions. i.e.:
2889   // (select (setcc lhs, rhs, cc), truev, falsev)
2890   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2891   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2892       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2893     SDValue LHS = CondV.getOperand(0);
2894     SDValue RHS = CondV.getOperand(1);
2895     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2896     ISD::CondCode CCVal = CC->get();
2897 
2898     // Special case for a select of 2 constants that have a diffence of 1.
2899     // Normally this is done by DAGCombine, but if the select is introduced by
2900     // type legalization or op legalization, we miss it. Restricting to SETLT
2901     // case for now because that is what signed saturating add/sub need.
2902     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2903     // but we would probably want to swap the true/false values if the condition
2904     // is SETGE/SETLE to avoid an XORI.
2905     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2906         CCVal == ISD::SETLT) {
2907       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2908       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2909       if (TrueVal - 1 == FalseVal)
2910         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2911       if (TrueVal + 1 == FalseVal)
2912         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2913     }
2914 
2915     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2916 
2917     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2918     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2919     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2920   }
2921 
2922   // Otherwise:
2923   // (select condv, truev, falsev)
2924   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2925   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2926   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2927 
2928   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2929 
2930   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2931 }
2932 
2933 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2934   SDValue CondV = Op.getOperand(1);
2935   SDLoc DL(Op);
2936   MVT XLenVT = Subtarget.getXLenVT();
2937 
2938   if (CondV.getOpcode() == ISD::SETCC &&
2939       CondV.getOperand(0).getValueType() == XLenVT) {
2940     SDValue LHS = CondV.getOperand(0);
2941     SDValue RHS = CondV.getOperand(1);
2942     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2943 
2944     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2945 
2946     SDValue TargetCC = DAG.getCondCode(CCVal);
2947     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2948                        LHS, RHS, TargetCC, Op.getOperand(2));
2949   }
2950 
2951   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2952                      CondV, DAG.getConstant(0, DL, XLenVT),
2953                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2954 }
2955 
2956 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2957   MachineFunction &MF = DAG.getMachineFunction();
2958   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2959 
2960   SDLoc DL(Op);
2961   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2962                                  getPointerTy(MF.getDataLayout()));
2963 
2964   // vastart just stores the address of the VarArgsFrameIndex slot into the
2965   // memory location argument.
2966   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2967   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2968                       MachinePointerInfo(SV));
2969 }
2970 
2971 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2972                                             SelectionDAG &DAG) const {
2973   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2974   MachineFunction &MF = DAG.getMachineFunction();
2975   MachineFrameInfo &MFI = MF.getFrameInfo();
2976   MFI.setFrameAddressIsTaken(true);
2977   Register FrameReg = RI.getFrameRegister(MF);
2978   int XLenInBytes = Subtarget.getXLen() / 8;
2979 
2980   EVT VT = Op.getValueType();
2981   SDLoc DL(Op);
2982   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2983   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2984   while (Depth--) {
2985     int Offset = -(XLenInBytes * 2);
2986     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2987                               DAG.getIntPtrConstant(Offset, DL));
2988     FrameAddr =
2989         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2990   }
2991   return FrameAddr;
2992 }
2993 
2994 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2995                                              SelectionDAG &DAG) const {
2996   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2997   MachineFunction &MF = DAG.getMachineFunction();
2998   MachineFrameInfo &MFI = MF.getFrameInfo();
2999   MFI.setReturnAddressIsTaken(true);
3000   MVT XLenVT = Subtarget.getXLenVT();
3001   int XLenInBytes = Subtarget.getXLen() / 8;
3002 
3003   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3004     return SDValue();
3005 
3006   EVT VT = Op.getValueType();
3007   SDLoc DL(Op);
3008   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3009   if (Depth) {
3010     int Off = -XLenInBytes;
3011     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3012     SDValue Offset = DAG.getConstant(Off, DL, VT);
3013     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3014                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3015                        MachinePointerInfo());
3016   }
3017 
3018   // Return the value of the return address register, marking it an implicit
3019   // live-in.
3020   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3021   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3022 }
3023 
3024 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3025                                                  SelectionDAG &DAG) const {
3026   SDLoc DL(Op);
3027   SDValue Lo = Op.getOperand(0);
3028   SDValue Hi = Op.getOperand(1);
3029   SDValue Shamt = Op.getOperand(2);
3030   EVT VT = Lo.getValueType();
3031 
3032   // if Shamt-XLEN < 0: // Shamt < XLEN
3033   //   Lo = Lo << Shamt
3034   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3035   // else:
3036   //   Lo = 0
3037   //   Hi = Lo << (Shamt-XLEN)
3038 
3039   SDValue Zero = DAG.getConstant(0, DL, VT);
3040   SDValue One = DAG.getConstant(1, DL, VT);
3041   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3042   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3043   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3044   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3045 
3046   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3047   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3048   SDValue ShiftRightLo =
3049       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3050   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3051   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3052   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3053 
3054   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3055 
3056   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3057   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3058 
3059   SDValue Parts[2] = {Lo, Hi};
3060   return DAG.getMergeValues(Parts, DL);
3061 }
3062 
3063 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3064                                                   bool IsSRA) const {
3065   SDLoc DL(Op);
3066   SDValue Lo = Op.getOperand(0);
3067   SDValue Hi = Op.getOperand(1);
3068   SDValue Shamt = Op.getOperand(2);
3069   EVT VT = Lo.getValueType();
3070 
3071   // SRA expansion:
3072   //   if Shamt-XLEN < 0: // Shamt < XLEN
3073   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3074   //     Hi = Hi >>s Shamt
3075   //   else:
3076   //     Lo = Hi >>s (Shamt-XLEN);
3077   //     Hi = Hi >>s (XLEN-1)
3078   //
3079   // SRL expansion:
3080   //   if Shamt-XLEN < 0: // Shamt < XLEN
3081   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3082   //     Hi = Hi >>u Shamt
3083   //   else:
3084   //     Lo = Hi >>u (Shamt-XLEN);
3085   //     Hi = 0;
3086 
3087   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3088 
3089   SDValue Zero = DAG.getConstant(0, DL, VT);
3090   SDValue One = DAG.getConstant(1, DL, VT);
3091   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3092   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3093   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3094   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3095 
3096   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3097   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3098   SDValue ShiftLeftHi =
3099       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3100   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3101   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3102   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3103   SDValue HiFalse =
3104       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3105 
3106   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3107 
3108   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3109   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3110 
3111   SDValue Parts[2] = {Lo, Hi};
3112   return DAG.getMergeValues(Parts, DL);
3113 }
3114 
3115 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3116 // legal equivalently-sized i8 type, so we can use that as a go-between.
3117 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3118                                                   SelectionDAG &DAG) const {
3119   SDLoc DL(Op);
3120   MVT VT = Op.getSimpleValueType();
3121   SDValue SplatVal = Op.getOperand(0);
3122   // All-zeros or all-ones splats are handled specially.
3123   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3124     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3125     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3126   }
3127   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3128     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3129     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3130   }
3131   MVT XLenVT = Subtarget.getXLenVT();
3132   assert(SplatVal.getValueType() == XLenVT &&
3133          "Unexpected type for i1 splat value");
3134   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3135   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3136                          DAG.getConstant(1, DL, XLenVT));
3137   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3138   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3139   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3140 }
3141 
3142 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3143 // illegal (currently only vXi64 RV32).
3144 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3145 // them to SPLAT_VECTOR_I64
3146 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3147                                                      SelectionDAG &DAG) const {
3148   SDLoc DL(Op);
3149   MVT VecVT = Op.getSimpleValueType();
3150   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3151          "Unexpected SPLAT_VECTOR_PARTS lowering");
3152 
3153   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3154   SDValue Lo = Op.getOperand(0);
3155   SDValue Hi = Op.getOperand(1);
3156 
3157   if (VecVT.isFixedLengthVector()) {
3158     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3159     SDLoc DL(Op);
3160     SDValue Mask, VL;
3161     std::tie(Mask, VL) =
3162         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3163 
3164     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3165     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3166   }
3167 
3168   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3169     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3170     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3171     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3172     // node in order to try and match RVV vector/scalar instructions.
3173     if ((LoC >> 31) == HiC)
3174       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3175   }
3176 
3177   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3178   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3179       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3180       Hi.getConstantOperandVal(1) == 31)
3181     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3182 
3183   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3184   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3185                      DAG.getRegister(RISCV::X0, MVT::i64));
3186 }
3187 
3188 // Custom-lower extensions from mask vectors by using a vselect either with 1
3189 // for zero/any-extension or -1 for sign-extension:
3190 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3191 // Note that any-extension is lowered identically to zero-extension.
3192 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3193                                                 int64_t ExtTrueVal) const {
3194   SDLoc DL(Op);
3195   MVT VecVT = Op.getSimpleValueType();
3196   SDValue Src = Op.getOperand(0);
3197   // Only custom-lower extensions from mask types
3198   assert(Src.getValueType().isVector() &&
3199          Src.getValueType().getVectorElementType() == MVT::i1);
3200 
3201   MVT XLenVT = Subtarget.getXLenVT();
3202   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3203   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3204 
3205   if (VecVT.isScalableVector()) {
3206     // Be careful not to introduce illegal scalar types at this stage, and be
3207     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3208     // illegal and must be expanded. Since we know that the constants are
3209     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3210     bool IsRV32E64 =
3211         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3212 
3213     if (!IsRV32E64) {
3214       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3215       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3216     } else {
3217       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3218       SplatTrueVal =
3219           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3220     }
3221 
3222     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3223   }
3224 
3225   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3226   MVT I1ContainerVT =
3227       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3228 
3229   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3230 
3231   SDValue Mask, VL;
3232   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3233 
3234   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3235   SplatTrueVal =
3236       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3237   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3238                                SplatTrueVal, SplatZero, VL);
3239 
3240   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3241 }
3242 
3243 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3244     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3245   MVT ExtVT = Op.getSimpleValueType();
3246   // Only custom-lower extensions from fixed-length vector types.
3247   if (!ExtVT.isFixedLengthVector())
3248     return Op;
3249   MVT VT = Op.getOperand(0).getSimpleValueType();
3250   // Grab the canonical container type for the extended type. Infer the smaller
3251   // type from that to ensure the same number of vector elements, as we know
3252   // the LMUL will be sufficient to hold the smaller type.
3253   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3254   // Get the extended container type manually to ensure the same number of
3255   // vector elements between source and dest.
3256   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3257                                      ContainerExtVT.getVectorElementCount());
3258 
3259   SDValue Op1 =
3260       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3261 
3262   SDLoc DL(Op);
3263   SDValue Mask, VL;
3264   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3265 
3266   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3267 
3268   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3269 }
3270 
3271 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3272 // setcc operation:
3273 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3274 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3275                                                   SelectionDAG &DAG) const {
3276   SDLoc DL(Op);
3277   EVT MaskVT = Op.getValueType();
3278   // Only expect to custom-lower truncations to mask types
3279   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3280          "Unexpected type for vector mask lowering");
3281   SDValue Src = Op.getOperand(0);
3282   MVT VecVT = Src.getSimpleValueType();
3283 
3284   // If this is a fixed vector, we need to convert it to a scalable vector.
3285   MVT ContainerVT = VecVT;
3286   if (VecVT.isFixedLengthVector()) {
3287     ContainerVT = getContainerForFixedLengthVector(VecVT);
3288     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3289   }
3290 
3291   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3292   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3293 
3294   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3295   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3296 
3297   if (VecVT.isScalableVector()) {
3298     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3299     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3300   }
3301 
3302   SDValue Mask, VL;
3303   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3304 
3305   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3306   SDValue Trunc =
3307       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3308   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3309                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3310   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3311 }
3312 
3313 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3314 // first position of a vector, and that vector is slid up to the insert index.
3315 // By limiting the active vector length to index+1 and merging with the
3316 // original vector (with an undisturbed tail policy for elements >= VL), we
3317 // achieve the desired result of leaving all elements untouched except the one
3318 // at VL-1, which is replaced with the desired value.
3319 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3320                                                     SelectionDAG &DAG) const {
3321   SDLoc DL(Op);
3322   MVT VecVT = Op.getSimpleValueType();
3323   SDValue Vec = Op.getOperand(0);
3324   SDValue Val = Op.getOperand(1);
3325   SDValue Idx = Op.getOperand(2);
3326 
3327   if (VecVT.getVectorElementType() == MVT::i1) {
3328     // FIXME: For now we just promote to an i8 vector and insert into that,
3329     // but this is probably not optimal.
3330     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3331     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3332     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3333     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3334   }
3335 
3336   MVT ContainerVT = VecVT;
3337   // If the operand is a fixed-length vector, convert to a scalable one.
3338   if (VecVT.isFixedLengthVector()) {
3339     ContainerVT = getContainerForFixedLengthVector(VecVT);
3340     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3341   }
3342 
3343   MVT XLenVT = Subtarget.getXLenVT();
3344 
3345   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3346   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3347   // Even i64-element vectors on RV32 can be lowered without scalar
3348   // legalization if the most-significant 32 bits of the value are not affected
3349   // by the sign-extension of the lower 32 bits.
3350   // TODO: We could also catch sign extensions of a 32-bit value.
3351   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3352     const auto *CVal = cast<ConstantSDNode>(Val);
3353     if (isInt<32>(CVal->getSExtValue())) {
3354       IsLegalInsert = true;
3355       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3356     }
3357   }
3358 
3359   SDValue Mask, VL;
3360   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3361 
3362   SDValue ValInVec;
3363 
3364   if (IsLegalInsert) {
3365     unsigned Opc =
3366         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3367     if (isNullConstant(Idx)) {
3368       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3369       if (!VecVT.isFixedLengthVector())
3370         return Vec;
3371       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3372     }
3373     ValInVec =
3374         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3375   } else {
3376     // On RV32, i64-element vectors must be specially handled to place the
3377     // value at element 0, by using two vslide1up instructions in sequence on
3378     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3379     // this.
3380     SDValue One = DAG.getConstant(1, DL, XLenVT);
3381     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3382     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3383     MVT I32ContainerVT =
3384         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3385     SDValue I32Mask =
3386         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3387     // Limit the active VL to two.
3388     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3389     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3390     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3391     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3392                            InsertI64VL);
3393     // First slide in the hi value, then the lo in underneath it.
3394     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3395                            ValHi, I32Mask, InsertI64VL);
3396     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3397                            ValLo, I32Mask, InsertI64VL);
3398     // Bitcast back to the right container type.
3399     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3400   }
3401 
3402   // Now that the value is in a vector, slide it into position.
3403   SDValue InsertVL =
3404       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3405   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3406                                 ValInVec, Idx, Mask, InsertVL);
3407   if (!VecVT.isFixedLengthVector())
3408     return Slideup;
3409   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3410 }
3411 
3412 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3413 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3414 // types this is done using VMV_X_S to allow us to glean information about the
3415 // sign bits of the result.
3416 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3417                                                      SelectionDAG &DAG) const {
3418   SDLoc DL(Op);
3419   SDValue Idx = Op.getOperand(1);
3420   SDValue Vec = Op.getOperand(0);
3421   EVT EltVT = Op.getValueType();
3422   MVT VecVT = Vec.getSimpleValueType();
3423   MVT XLenVT = Subtarget.getXLenVT();
3424 
3425   if (VecVT.getVectorElementType() == MVT::i1) {
3426     // FIXME: For now we just promote to an i8 vector and extract from that,
3427     // but this is probably not optimal.
3428     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3429     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3430     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3431   }
3432 
3433   // If this is a fixed vector, we need to convert it to a scalable vector.
3434   MVT ContainerVT = VecVT;
3435   if (VecVT.isFixedLengthVector()) {
3436     ContainerVT = getContainerForFixedLengthVector(VecVT);
3437     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3438   }
3439 
3440   // If the index is 0, the vector is already in the right position.
3441   if (!isNullConstant(Idx)) {
3442     // Use a VL of 1 to avoid processing more elements than we need.
3443     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3444     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3445     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3446     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3447                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3448   }
3449 
3450   if (!EltVT.isInteger()) {
3451     // Floating-point extracts are handled in TableGen.
3452     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3453                        DAG.getConstant(0, DL, XLenVT));
3454   }
3455 
3456   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3457   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3458 }
3459 
3460 // Some RVV intrinsics may claim that they want an integer operand to be
3461 // promoted or expanded.
3462 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3463                                           const RISCVSubtarget &Subtarget) {
3464   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3465           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3466          "Unexpected opcode");
3467 
3468   if (!Subtarget.hasStdExtV())
3469     return SDValue();
3470 
3471   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3472   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3473   SDLoc DL(Op);
3474 
3475   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3476       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3477   if (!II || !II->SplatOperand)
3478     return SDValue();
3479 
3480   unsigned SplatOp = II->SplatOperand + HasChain;
3481   assert(SplatOp < Op.getNumOperands());
3482 
3483   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3484   SDValue &ScalarOp = Operands[SplatOp];
3485   MVT OpVT = ScalarOp.getSimpleValueType();
3486   MVT XLenVT = Subtarget.getXLenVT();
3487 
3488   // If this isn't a scalar, or its type is XLenVT we're done.
3489   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3490     return SDValue();
3491 
3492   // Simplest case is that the operand needs to be promoted to XLenVT.
3493   if (OpVT.bitsLT(XLenVT)) {
3494     // If the operand is a constant, sign extend to increase our chances
3495     // of being able to use a .vi instruction. ANY_EXTEND would become a
3496     // a zero extend and the simm5 check in isel would fail.
3497     // FIXME: Should we ignore the upper bits in isel instead?
3498     unsigned ExtOpc =
3499         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3500     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3501     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3502   }
3503 
3504   // Use the previous operand to get the vXi64 VT. The result might be a mask
3505   // VT for compares. Using the previous operand assumes that the previous
3506   // operand will never have a smaller element size than a scalar operand and
3507   // that a widening operation never uses SEW=64.
3508   // NOTE: If this fails the below assert, we can probably just find the
3509   // element count from any operand or result and use it to construct the VT.
3510   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3511   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3512 
3513   // The more complex case is when the scalar is larger than XLenVT.
3514   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3515          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3516 
3517   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3518   // on the instruction to sign-extend since SEW>XLEN.
3519   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3520     if (isInt<32>(CVal->getSExtValue())) {
3521       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3522       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3523     }
3524   }
3525 
3526   // We need to convert the scalar to a splat vector.
3527   // FIXME: Can we implicitly truncate the scalar if it is known to
3528   // be sign extended?
3529   // VL should be the last operand.
3530   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3531   assert(VL.getValueType() == XLenVT);
3532   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3533   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3534 }
3535 
3536 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3537                                                      SelectionDAG &DAG) const {
3538   unsigned IntNo = Op.getConstantOperandVal(0);
3539   SDLoc DL(Op);
3540   MVT XLenVT = Subtarget.getXLenVT();
3541 
3542   switch (IntNo) {
3543   default:
3544     break; // Don't custom lower most intrinsics.
3545   case Intrinsic::thread_pointer: {
3546     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3547     return DAG.getRegister(RISCV::X4, PtrVT);
3548   }
3549   case Intrinsic::riscv_orc_b:
3550     // Lower to the GORCI encoding for orc.b.
3551     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3552                        DAG.getConstant(7, DL, XLenVT));
3553   case Intrinsic::riscv_grev:
3554   case Intrinsic::riscv_gorc: {
3555     unsigned Opc =
3556         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3557     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3558   }
3559   case Intrinsic::riscv_shfl:
3560   case Intrinsic::riscv_unshfl: {
3561     unsigned Opc =
3562         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3563     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3564   }
3565   case Intrinsic::riscv_bcompress:
3566   case Intrinsic::riscv_bdecompress: {
3567     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3568                                                        : RISCVISD::BDECOMPRESS;
3569     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3570   }
3571   case Intrinsic::riscv_vmv_x_s:
3572     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3573     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3574                        Op.getOperand(1));
3575   case Intrinsic::riscv_vmv_v_x:
3576     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3577                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3578   case Intrinsic::riscv_vfmv_v_f:
3579     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3580                        Op.getOperand(1), Op.getOperand(2));
3581   case Intrinsic::riscv_vmv_s_x: {
3582     SDValue Scalar = Op.getOperand(2);
3583 
3584     if (Scalar.getValueType().bitsLE(XLenVT)) {
3585       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3586       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3587                          Op.getOperand(1), Scalar, Op.getOperand(3));
3588     }
3589 
3590     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3591 
3592     // This is an i64 value that lives in two scalar registers. We have to
3593     // insert this in a convoluted way. First we build vXi64 splat containing
3594     // the/ two values that we assemble using some bit math. Next we'll use
3595     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3596     // to merge element 0 from our splat into the source vector.
3597     // FIXME: This is probably not the best way to do this, but it is
3598     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3599     // point.
3600     //   sw lo, (a0)
3601     //   sw hi, 4(a0)
3602     //   vlse vX, (a0)
3603     //
3604     //   vid.v      vVid
3605     //   vmseq.vx   mMask, vVid, 0
3606     //   vmerge.vvm vDest, vSrc, vVal, mMask
3607     MVT VT = Op.getSimpleValueType();
3608     SDValue Vec = Op.getOperand(1);
3609     SDValue VL = Op.getOperand(3);
3610 
3611     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3612     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3613                                       DAG.getConstant(0, DL, MVT::i32), VL);
3614 
3615     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3616     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3617     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3618     SDValue SelectCond =
3619         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3620                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3621     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3622                        Vec, VL);
3623   }
3624   case Intrinsic::riscv_vslide1up:
3625   case Intrinsic::riscv_vslide1down:
3626   case Intrinsic::riscv_vslide1up_mask:
3627   case Intrinsic::riscv_vslide1down_mask: {
3628     // We need to special case these when the scalar is larger than XLen.
3629     unsigned NumOps = Op.getNumOperands();
3630     bool IsMasked = NumOps == 6;
3631     unsigned OpOffset = IsMasked ? 1 : 0;
3632     SDValue Scalar = Op.getOperand(2 + OpOffset);
3633     if (Scalar.getValueType().bitsLE(XLenVT))
3634       break;
3635 
3636     // Splatting a sign extended constant is fine.
3637     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3638       if (isInt<32>(CVal->getSExtValue()))
3639         break;
3640 
3641     MVT VT = Op.getSimpleValueType();
3642     assert(VT.getVectorElementType() == MVT::i64 &&
3643            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3644 
3645     // Convert the vector source to the equivalent nxvXi32 vector.
3646     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3647     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3648 
3649     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3650                                    DAG.getConstant(0, DL, XLenVT));
3651     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3652                                    DAG.getConstant(1, DL, XLenVT));
3653 
3654     // Double the VL since we halved SEW.
3655     SDValue VL = Op.getOperand(NumOps - 1);
3656     SDValue I32VL =
3657         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3658 
3659     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3660     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3661 
3662     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3663     // instructions.
3664     if (IntNo == Intrinsic::riscv_vslide1up ||
3665         IntNo == Intrinsic::riscv_vslide1up_mask) {
3666       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3667                         I32Mask, I32VL);
3668       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3669                         I32Mask, I32VL);
3670     } else {
3671       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3672                         I32Mask, I32VL);
3673       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3674                         I32Mask, I32VL);
3675     }
3676 
3677     // Convert back to nxvXi64.
3678     Vec = DAG.getBitcast(VT, Vec);
3679 
3680     if (!IsMasked)
3681       return Vec;
3682 
3683     // Apply mask after the operation.
3684     SDValue Mask = Op.getOperand(NumOps - 2);
3685     SDValue MaskedOff = Op.getOperand(1);
3686     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3687   }
3688   }
3689 
3690   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3691 }
3692 
3693 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3694                                                     SelectionDAG &DAG) const {
3695   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3696 }
3697 
3698 static MVT getLMUL1VT(MVT VT) {
3699   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3700          "Unexpected vector MVT");
3701   return MVT::getScalableVectorVT(
3702       VT.getVectorElementType(),
3703       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3704 }
3705 
3706 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3707   switch (ISDOpcode) {
3708   default:
3709     llvm_unreachable("Unhandled reduction");
3710   case ISD::VECREDUCE_ADD:
3711     return RISCVISD::VECREDUCE_ADD_VL;
3712   case ISD::VECREDUCE_UMAX:
3713     return RISCVISD::VECREDUCE_UMAX_VL;
3714   case ISD::VECREDUCE_SMAX:
3715     return RISCVISD::VECREDUCE_SMAX_VL;
3716   case ISD::VECREDUCE_UMIN:
3717     return RISCVISD::VECREDUCE_UMIN_VL;
3718   case ISD::VECREDUCE_SMIN:
3719     return RISCVISD::VECREDUCE_SMIN_VL;
3720   case ISD::VECREDUCE_AND:
3721     return RISCVISD::VECREDUCE_AND_VL;
3722   case ISD::VECREDUCE_OR:
3723     return RISCVISD::VECREDUCE_OR_VL;
3724   case ISD::VECREDUCE_XOR:
3725     return RISCVISD::VECREDUCE_XOR_VL;
3726   }
3727 }
3728 
3729 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3730                                                       SelectionDAG &DAG) const {
3731   SDLoc DL(Op);
3732   SDValue Vec = Op.getOperand(0);
3733   MVT VecVT = Vec.getSimpleValueType();
3734   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3735           Op.getOpcode() == ISD::VECREDUCE_OR ||
3736           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3737          "Unexpected reduction lowering");
3738 
3739   MVT XLenVT = Subtarget.getXLenVT();
3740   assert(Op.getValueType() == XLenVT &&
3741          "Expected reduction output to be legalized to XLenVT");
3742 
3743   MVT ContainerVT = VecVT;
3744   if (VecVT.isFixedLengthVector()) {
3745     ContainerVT = getContainerForFixedLengthVector(VecVT);
3746     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3747   }
3748 
3749   SDValue Mask, VL;
3750   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3751   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3752 
3753   switch (Op.getOpcode()) {
3754   default:
3755     llvm_unreachable("Unhandled reduction");
3756   case ISD::VECREDUCE_AND:
3757     // vpopc ~x == 0
3758     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3759     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3760     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3761   case ISD::VECREDUCE_OR:
3762     // vpopc x != 0
3763     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3764     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3765   case ISD::VECREDUCE_XOR: {
3766     // ((vpopc x) & 1) != 0
3767     SDValue One = DAG.getConstant(1, DL, XLenVT);
3768     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3769     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3770     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3771   }
3772   }
3773 }
3774 
3775 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3776                                             SelectionDAG &DAG) const {
3777   SDLoc DL(Op);
3778   SDValue Vec = Op.getOperand(0);
3779   EVT VecEVT = Vec.getValueType();
3780 
3781   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3782 
3783   // Due to ordering in legalize types we may have a vector type that needs to
3784   // be split. Do that manually so we can get down to a legal type.
3785   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3786          TargetLowering::TypeSplitVector) {
3787     SDValue Lo, Hi;
3788     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3789     VecEVT = Lo.getValueType();
3790     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3791   }
3792 
3793   // TODO: The type may need to be widened rather than split. Or widened before
3794   // it can be split.
3795   if (!isTypeLegal(VecEVT))
3796     return SDValue();
3797 
3798   MVT VecVT = VecEVT.getSimpleVT();
3799   MVT VecEltVT = VecVT.getVectorElementType();
3800   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3801 
3802   MVT ContainerVT = VecVT;
3803   if (VecVT.isFixedLengthVector()) {
3804     ContainerVT = getContainerForFixedLengthVector(VecVT);
3805     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3806   }
3807 
3808   MVT M1VT = getLMUL1VT(ContainerVT);
3809 
3810   SDValue Mask, VL;
3811   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3812 
3813   // FIXME: This is a VLMAX splat which might be too large and can prevent
3814   // vsetvli removal.
3815   SDValue NeutralElem =
3816       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3817   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3818   SDValue Reduction =
3819       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3820   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3821                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3822   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3823 }
3824 
3825 // Given a reduction op, this function returns the matching reduction opcode,
3826 // the vector SDValue and the scalar SDValue required to lower this to a
3827 // RISCVISD node.
3828 static std::tuple<unsigned, SDValue, SDValue>
3829 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3830   SDLoc DL(Op);
3831   auto Flags = Op->getFlags();
3832   unsigned Opcode = Op.getOpcode();
3833   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3834   switch (Opcode) {
3835   default:
3836     llvm_unreachable("Unhandled reduction");
3837   case ISD::VECREDUCE_FADD:
3838     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3839                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3840   case ISD::VECREDUCE_SEQ_FADD:
3841     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3842                            Op.getOperand(0));
3843   case ISD::VECREDUCE_FMIN:
3844     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3845                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3846   case ISD::VECREDUCE_FMAX:
3847     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3848                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3849   }
3850 }
3851 
3852 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3853                                               SelectionDAG &DAG) const {
3854   SDLoc DL(Op);
3855   MVT VecEltVT = Op.getSimpleValueType();
3856 
3857   unsigned RVVOpcode;
3858   SDValue VectorVal, ScalarVal;
3859   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3860       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3861   MVT VecVT = VectorVal.getSimpleValueType();
3862 
3863   MVT ContainerVT = VecVT;
3864   if (VecVT.isFixedLengthVector()) {
3865     ContainerVT = getContainerForFixedLengthVector(VecVT);
3866     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3867   }
3868 
3869   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3870 
3871   SDValue Mask, VL;
3872   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3873 
3874   // FIXME: This is a VLMAX splat which might be too large and can prevent
3875   // vsetvli removal.
3876   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3877   SDValue Reduction =
3878       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3879   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3880                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3881 }
3882 
3883 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3884                                                    SelectionDAG &DAG) const {
3885   SDValue Vec = Op.getOperand(0);
3886   SDValue SubVec = Op.getOperand(1);
3887   MVT VecVT = Vec.getSimpleValueType();
3888   MVT SubVecVT = SubVec.getSimpleValueType();
3889 
3890   SDLoc DL(Op);
3891   MVT XLenVT = Subtarget.getXLenVT();
3892   unsigned OrigIdx = Op.getConstantOperandVal(2);
3893   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3894 
3895   // We don't have the ability to slide mask vectors up indexed by their i1
3896   // elements; the smallest we can do is i8. Often we are able to bitcast to
3897   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3898   // into a scalable one, we might not necessarily have enough scalable
3899   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3900   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3901       (OrigIdx != 0 || !Vec.isUndef())) {
3902     if (VecVT.getVectorMinNumElements() >= 8 &&
3903         SubVecVT.getVectorMinNumElements() >= 8) {
3904       assert(OrigIdx % 8 == 0 && "Invalid index");
3905       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3906              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3907              "Unexpected mask vector lowering");
3908       OrigIdx /= 8;
3909       SubVecVT =
3910           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3911                            SubVecVT.isScalableVector());
3912       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3913                                VecVT.isScalableVector());
3914       Vec = DAG.getBitcast(VecVT, Vec);
3915       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3916     } else {
3917       // We can't slide this mask vector up indexed by its i1 elements.
3918       // This poses a problem when we wish to insert a scalable vector which
3919       // can't be re-expressed as a larger type. Just choose the slow path and
3920       // extend to a larger type, then truncate back down.
3921       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3922       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3923       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3924       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3925       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3926                         Op.getOperand(2));
3927       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3928       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3929     }
3930   }
3931 
3932   // If the subvector vector is a fixed-length type, we cannot use subregister
3933   // manipulation to simplify the codegen; we don't know which register of a
3934   // LMUL group contains the specific subvector as we only know the minimum
3935   // register size. Therefore we must slide the vector group up the full
3936   // amount.
3937   if (SubVecVT.isFixedLengthVector()) {
3938     if (OrigIdx == 0 && Vec.isUndef())
3939       return Op;
3940     MVT ContainerVT = VecVT;
3941     if (VecVT.isFixedLengthVector()) {
3942       ContainerVT = getContainerForFixedLengthVector(VecVT);
3943       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3944     }
3945     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3946                          DAG.getUNDEF(ContainerVT), SubVec,
3947                          DAG.getConstant(0, DL, XLenVT));
3948     SDValue Mask =
3949         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3950     // Set the vector length to only the number of elements we care about. Note
3951     // that for slideup this includes the offset.
3952     SDValue VL =
3953         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3954     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3955     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3956                                   SubVec, SlideupAmt, Mask, VL);
3957     if (VecVT.isFixedLengthVector())
3958       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3959     return DAG.getBitcast(Op.getValueType(), Slideup);
3960   }
3961 
3962   unsigned SubRegIdx, RemIdx;
3963   std::tie(SubRegIdx, RemIdx) =
3964       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3965           VecVT, SubVecVT, OrigIdx, TRI);
3966 
3967   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3968   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3969                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3970                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3971 
3972   // 1. If the Idx has been completely eliminated and this subvector's size is
3973   // a vector register or a multiple thereof, or the surrounding elements are
3974   // undef, then this is a subvector insert which naturally aligns to a vector
3975   // register. These can easily be handled using subregister manipulation.
3976   // 2. If the subvector is smaller than a vector register, then the insertion
3977   // must preserve the undisturbed elements of the register. We do this by
3978   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3979   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3980   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3981   // LMUL=1 type back into the larger vector (resolving to another subregister
3982   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3983   // to avoid allocating a large register group to hold our subvector.
3984   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3985     return Op;
3986 
3987   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3988   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3989   // (in our case undisturbed). This means we can set up a subvector insertion
3990   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3991   // size of the subvector.
3992   MVT InterSubVT = VecVT;
3993   SDValue AlignedExtract = Vec;
3994   unsigned AlignedIdx = OrigIdx - RemIdx;
3995   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3996     InterSubVT = getLMUL1VT(VecVT);
3997     // Extract a subvector equal to the nearest full vector register type. This
3998     // should resolve to a EXTRACT_SUBREG instruction.
3999     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4000                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4001   }
4002 
4003   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4004   // For scalable vectors this must be further multiplied by vscale.
4005   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4006 
4007   SDValue Mask, VL;
4008   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4009 
4010   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4011   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4012   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4013   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4014 
4015   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4016                        DAG.getUNDEF(InterSubVT), SubVec,
4017                        DAG.getConstant(0, DL, XLenVT));
4018 
4019   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4020                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4021 
4022   // If required, insert this subvector back into the correct vector register.
4023   // This should resolve to an INSERT_SUBREG instruction.
4024   if (VecVT.bitsGT(InterSubVT))
4025     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4026                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4027 
4028   // We might have bitcast from a mask type: cast back to the original type if
4029   // required.
4030   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4031 }
4032 
4033 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4034                                                     SelectionDAG &DAG) const {
4035   SDValue Vec = Op.getOperand(0);
4036   MVT SubVecVT = Op.getSimpleValueType();
4037   MVT VecVT = Vec.getSimpleValueType();
4038 
4039   SDLoc DL(Op);
4040   MVT XLenVT = Subtarget.getXLenVT();
4041   unsigned OrigIdx = Op.getConstantOperandVal(1);
4042   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4043 
4044   // We don't have the ability to slide mask vectors down indexed by their i1
4045   // elements; the smallest we can do is i8. Often we are able to bitcast to
4046   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4047   // from a scalable one, we might not necessarily have enough scalable
4048   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4049   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4050     if (VecVT.getVectorMinNumElements() >= 8 &&
4051         SubVecVT.getVectorMinNumElements() >= 8) {
4052       assert(OrigIdx % 8 == 0 && "Invalid index");
4053       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4054              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4055              "Unexpected mask vector lowering");
4056       OrigIdx /= 8;
4057       SubVecVT =
4058           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4059                            SubVecVT.isScalableVector());
4060       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4061                                VecVT.isScalableVector());
4062       Vec = DAG.getBitcast(VecVT, Vec);
4063     } else {
4064       // We can't slide this mask vector down, indexed by its i1 elements.
4065       // This poses a problem when we wish to extract a scalable vector which
4066       // can't be re-expressed as a larger type. Just choose the slow path and
4067       // extend to a larger type, then truncate back down.
4068       // TODO: We could probably improve this when extracting certain fixed
4069       // from fixed, where we can extract as i8 and shift the correct element
4070       // right to reach the desired subvector?
4071       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4072       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4073       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4074       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4075                         Op.getOperand(1));
4076       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4077       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4078     }
4079   }
4080 
4081   // If the subvector vector is a fixed-length type, we cannot use subregister
4082   // manipulation to simplify the codegen; we don't know which register of a
4083   // LMUL group contains the specific subvector as we only know the minimum
4084   // register size. Therefore we must slide the vector group down the full
4085   // amount.
4086   if (SubVecVT.isFixedLengthVector()) {
4087     // With an index of 0 this is a cast-like subvector, which can be performed
4088     // with subregister operations.
4089     if (OrigIdx == 0)
4090       return Op;
4091     MVT ContainerVT = VecVT;
4092     if (VecVT.isFixedLengthVector()) {
4093       ContainerVT = getContainerForFixedLengthVector(VecVT);
4094       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4095     }
4096     SDValue Mask =
4097         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4098     // Set the vector length to only the number of elements we care about. This
4099     // avoids sliding down elements we're going to discard straight away.
4100     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4101     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4102     SDValue Slidedown =
4103         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4104                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4105     // Now we can use a cast-like subvector extract to get the result.
4106     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4107                             DAG.getConstant(0, DL, XLenVT));
4108     return DAG.getBitcast(Op.getValueType(), Slidedown);
4109   }
4110 
4111   unsigned SubRegIdx, RemIdx;
4112   std::tie(SubRegIdx, RemIdx) =
4113       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4114           VecVT, SubVecVT, OrigIdx, TRI);
4115 
4116   // If the Idx has been completely eliminated then this is a subvector extract
4117   // which naturally aligns to a vector register. These can easily be handled
4118   // using subregister manipulation.
4119   if (RemIdx == 0)
4120     return Op;
4121 
4122   // Else we must shift our vector register directly to extract the subvector.
4123   // Do this using VSLIDEDOWN.
4124 
4125   // If the vector type is an LMUL-group type, extract a subvector equal to the
4126   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4127   // instruction.
4128   MVT InterSubVT = VecVT;
4129   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4130     InterSubVT = getLMUL1VT(VecVT);
4131     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4132                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4133   }
4134 
4135   // Slide this vector register down by the desired number of elements in order
4136   // to place the desired subvector starting at element 0.
4137   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4138   // For scalable vectors this must be further multiplied by vscale.
4139   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4140 
4141   SDValue Mask, VL;
4142   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4143   SDValue Slidedown =
4144       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4145                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4146 
4147   // Now the vector is in the right position, extract our final subvector. This
4148   // should resolve to a COPY.
4149   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4150                           DAG.getConstant(0, DL, XLenVT));
4151 
4152   // We might have bitcast from a mask type: cast back to the original type if
4153   // required.
4154   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4155 }
4156 
4157 // Lower step_vector to the vid instruction. Any non-identity step value must
4158 // be accounted for my manual expansion.
4159 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4160                                               SelectionDAG &DAG) const {
4161   SDLoc DL(Op);
4162   MVT VT = Op.getSimpleValueType();
4163   MVT XLenVT = Subtarget.getXLenVT();
4164   SDValue Mask, VL;
4165   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4166   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4167   uint64_t StepValImm = Op.getConstantOperandVal(0);
4168   if (StepValImm != 1) {
4169     if (isPowerOf2_64(StepValImm)) {
4170       SDValue StepVal =
4171           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4172                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4173       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4174     } else {
4175       SDValue StepVal = lowerScalarSplat(
4176           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4177           DL, DAG, Subtarget);
4178       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4179     }
4180   }
4181   return StepVec;
4182 }
4183 
4184 // Implement vector_reverse using vrgather.vv with indices determined by
4185 // subtracting the id of each element from (VLMAX-1). This will convert
4186 // the indices like so:
4187 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4188 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4189 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4190                                                  SelectionDAG &DAG) const {
4191   SDLoc DL(Op);
4192   MVT VecVT = Op.getSimpleValueType();
4193   unsigned EltSize = VecVT.getScalarSizeInBits();
4194   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4195 
4196   unsigned MaxVLMAX = 0;
4197   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4198   if (VectorBitsMax != 0)
4199     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4200 
4201   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4202   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4203 
4204   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4205   // to use vrgatherei16.vv.
4206   // TODO: It's also possible to use vrgatherei16.vv for other types to
4207   // decrease register width for the index calculation.
4208   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4209     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4210     // Reverse each half, then reassemble them in reverse order.
4211     // NOTE: It's also possible that after splitting that VLMAX no longer
4212     // requires vrgatherei16.vv.
4213     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4214       SDValue Lo, Hi;
4215       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4216       EVT LoVT, HiVT;
4217       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4218       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4219       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4220       // Reassemble the low and high pieces reversed.
4221       // FIXME: This is a CONCAT_VECTORS.
4222       SDValue Res =
4223           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4224                       DAG.getIntPtrConstant(0, DL));
4225       return DAG.getNode(
4226           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4227           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4228     }
4229 
4230     // Just promote the int type to i16 which will double the LMUL.
4231     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4232     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4233   }
4234 
4235   MVT XLenVT = Subtarget.getXLenVT();
4236   SDValue Mask, VL;
4237   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4238 
4239   // Calculate VLMAX-1 for the desired SEW.
4240   unsigned MinElts = VecVT.getVectorMinNumElements();
4241   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4242                               DAG.getConstant(MinElts, DL, XLenVT));
4243   SDValue VLMinus1 =
4244       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4245 
4246   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4247   bool IsRV32E64 =
4248       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4249   SDValue SplatVL;
4250   if (!IsRV32E64)
4251     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4252   else
4253     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4254 
4255   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4256   SDValue Indices =
4257       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4258 
4259   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4260 }
4261 
4262 SDValue
4263 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4264                                                      SelectionDAG &DAG) const {
4265   SDLoc DL(Op);
4266   auto *Load = cast<LoadSDNode>(Op);
4267 
4268   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4269                                         Load->getMemoryVT(),
4270                                         *Load->getMemOperand()) &&
4271          "Expecting a correctly-aligned load");
4272 
4273   MVT VT = Op.getSimpleValueType();
4274   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4275 
4276   SDValue VL =
4277       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4278 
4279   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4280   SDValue NewLoad = DAG.getMemIntrinsicNode(
4281       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4282       Load->getMemoryVT(), Load->getMemOperand());
4283 
4284   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4285   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4286 }
4287 
4288 SDValue
4289 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4290                                                       SelectionDAG &DAG) const {
4291   SDLoc DL(Op);
4292   auto *Store = cast<StoreSDNode>(Op);
4293 
4294   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4295                                         Store->getMemoryVT(),
4296                                         *Store->getMemOperand()) &&
4297          "Expecting a correctly-aligned store");
4298 
4299   SDValue StoreVal = Store->getValue();
4300   MVT VT = StoreVal.getSimpleValueType();
4301 
4302   // If the size less than a byte, we need to pad with zeros to make a byte.
4303   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4304     VT = MVT::v8i1;
4305     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4306                            DAG.getConstant(0, DL, VT), StoreVal,
4307                            DAG.getIntPtrConstant(0, DL));
4308   }
4309 
4310   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4311 
4312   SDValue VL =
4313       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4314 
4315   SDValue NewValue =
4316       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4317   return DAG.getMemIntrinsicNode(
4318       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4319       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4320       Store->getMemoryVT(), Store->getMemOperand());
4321 }
4322 
4323 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4324   auto *Load = cast<MaskedLoadSDNode>(Op);
4325 
4326   SDLoc DL(Op);
4327   MVT VT = Op.getSimpleValueType();
4328   MVT XLenVT = Subtarget.getXLenVT();
4329 
4330   SDValue Mask = Load->getMask();
4331   SDValue PassThru = Load->getPassThru();
4332   SDValue VL;
4333 
4334   MVT ContainerVT = VT;
4335   if (VT.isFixedLengthVector()) {
4336     ContainerVT = getContainerForFixedLengthVector(VT);
4337     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4338 
4339     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4340     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4341     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4342   } else
4343     VL = DAG.getRegister(RISCV::X0, XLenVT);
4344 
4345   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4346   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4347   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4348                    Load->getBasePtr(), Mask,  VL};
4349   SDValue Result =
4350       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4351                               Load->getMemoryVT(), Load->getMemOperand());
4352   SDValue Chain = Result.getValue(1);
4353 
4354   if (VT.isFixedLengthVector())
4355     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4356 
4357   return DAG.getMergeValues({Result, Chain}, DL);
4358 }
4359 
4360 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4361   auto *Store = cast<MaskedStoreSDNode>(Op);
4362 
4363   SDLoc DL(Op);
4364   SDValue Val = Store->getValue();
4365   SDValue Mask = Store->getMask();
4366   MVT VT = Val.getSimpleValueType();
4367   MVT XLenVT = Subtarget.getXLenVT();
4368   SDValue VL;
4369 
4370   MVT ContainerVT = VT;
4371   if (VT.isFixedLengthVector()) {
4372     ContainerVT = getContainerForFixedLengthVector(VT);
4373     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4374 
4375     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4376     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4377     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4378   } else
4379     VL = DAG.getRegister(RISCV::X0, XLenVT);
4380 
4381   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4382   return DAG.getMemIntrinsicNode(
4383       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4384       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4385       Store->getMemoryVT(), Store->getMemOperand());
4386 }
4387 
4388 SDValue
4389 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4390                                                       SelectionDAG &DAG) const {
4391   MVT InVT = Op.getOperand(0).getSimpleValueType();
4392   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4393 
4394   MVT VT = Op.getSimpleValueType();
4395 
4396   SDValue Op1 =
4397       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4398   SDValue Op2 =
4399       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4400 
4401   SDLoc DL(Op);
4402   SDValue VL =
4403       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4404 
4405   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4406   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4407 
4408   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4409                             Op.getOperand(2), Mask, VL);
4410 
4411   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4412 }
4413 
4414 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4415     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4416   MVT VT = Op.getSimpleValueType();
4417 
4418   if (VT.getVectorElementType() == MVT::i1)
4419     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4420 
4421   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4422 }
4423 
4424 SDValue
4425 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
4426                                                       SelectionDAG &DAG) const {
4427   unsigned Opc;
4428   switch (Op.getOpcode()) {
4429   default: llvm_unreachable("Unexpected opcode!");
4430   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
4431   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
4432   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
4433   }
4434 
4435   return lowerToScalableOp(Op, DAG, Opc);
4436 }
4437 
4438 // Lower vector ABS to smax(X, sub(0, X)).
4439 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4440   SDLoc DL(Op);
4441   MVT VT = Op.getSimpleValueType();
4442   SDValue X = Op.getOperand(0);
4443 
4444   assert(VT.isFixedLengthVector() && "Unexpected type");
4445 
4446   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4447   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4448 
4449   SDValue Mask, VL;
4450   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4451 
4452   SDValue SplatZero =
4453       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4454                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4455   SDValue NegX =
4456       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4457   SDValue Max =
4458       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4459 
4460   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4461 }
4462 
4463 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4464     SDValue Op, SelectionDAG &DAG) const {
4465   SDLoc DL(Op);
4466   MVT VT = Op.getSimpleValueType();
4467   SDValue Mag = Op.getOperand(0);
4468   SDValue Sign = Op.getOperand(1);
4469   assert(Mag.getValueType() == Sign.getValueType() &&
4470          "Can only handle COPYSIGN with matching types.");
4471 
4472   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4473   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4474   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4475 
4476   SDValue Mask, VL;
4477   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4478 
4479   SDValue CopySign =
4480       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4481 
4482   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4483 }
4484 
4485 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4486     SDValue Op, SelectionDAG &DAG) const {
4487   MVT VT = Op.getSimpleValueType();
4488   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4489 
4490   MVT I1ContainerVT =
4491       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4492 
4493   SDValue CC =
4494       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4495   SDValue Op1 =
4496       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4497   SDValue Op2 =
4498       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4499 
4500   SDLoc DL(Op);
4501   SDValue Mask, VL;
4502   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4503 
4504   SDValue Select =
4505       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4506 
4507   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4508 }
4509 
4510 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4511                                                unsigned NewOpc,
4512                                                bool HasMask) const {
4513   MVT VT = Op.getSimpleValueType();
4514   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4515 
4516   // Create list of operands by converting existing ones to scalable types.
4517   SmallVector<SDValue, 6> Ops;
4518   for (const SDValue &V : Op->op_values()) {
4519     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4520 
4521     // Pass through non-vector operands.
4522     if (!V.getValueType().isVector()) {
4523       Ops.push_back(V);
4524       continue;
4525     }
4526 
4527     // "cast" fixed length vector to a scalable vector.
4528     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4529            "Only fixed length vectors are supported!");
4530     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4531   }
4532 
4533   SDLoc DL(Op);
4534   SDValue Mask, VL;
4535   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4536   if (HasMask)
4537     Ops.push_back(Mask);
4538   Ops.push_back(VL);
4539 
4540   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4541   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4542 }
4543 
4544 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4545 // * Operands of each node are assumed to be in the same order.
4546 // * The EVL operand is promoted from i32 to i64 on RV64.
4547 // * Fixed-length vectors are converted to their scalable-vector container
4548 //   types.
4549 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4550                                        unsigned RISCVISDOpc) const {
4551   SDLoc DL(Op);
4552   MVT VT = Op.getSimpleValueType();
4553   SmallVector<SDValue, 4> Ops;
4554 
4555   for (const auto &OpIdx : enumerate(Op->ops())) {
4556     SDValue V = OpIdx.value();
4557     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4558     // Pass through operands which aren't fixed-length vectors.
4559     if (!V.getValueType().isFixedLengthVector()) {
4560       Ops.push_back(V);
4561       continue;
4562     }
4563     // "cast" fixed length vector to a scalable vector.
4564     MVT OpVT = V.getSimpleValueType();
4565     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4566     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4567            "Only fixed length vectors are supported!");
4568     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4569   }
4570 
4571   if (!VT.isFixedLengthVector())
4572     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4573 
4574   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4575 
4576   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4577 
4578   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4579 }
4580 
4581 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4582 // a RVV indexed load. The RVV indexed load instructions only support the
4583 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4584 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4585 // indexing is extended to the XLEN value type and scaled accordingly.
4586 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4587   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4588   SDLoc DL(Op);
4589 
4590   SDValue Index = MGN->getIndex();
4591   SDValue Mask = MGN->getMask();
4592   SDValue PassThru = MGN->getPassThru();
4593 
4594   MVT VT = Op.getSimpleValueType();
4595   MVT IndexVT = Index.getSimpleValueType();
4596   MVT XLenVT = Subtarget.getXLenVT();
4597 
4598   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4599          "Unexpected VTs!");
4600   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4601          "Unexpected pointer type");
4602   // Targets have to explicitly opt-in for extending vector loads.
4603   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4604          "Unexpected extending MGATHER");
4605 
4606   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4607   // the selection of the masked intrinsics doesn't do this for us.
4608   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4609 
4610   SDValue VL;
4611   MVT ContainerVT = VT;
4612   if (VT.isFixedLengthVector()) {
4613     // We need to use the larger of the result and index type to determine the
4614     // scalable type to use so we don't increase LMUL for any operand/result.
4615     if (VT.bitsGE(IndexVT)) {
4616       ContainerVT = getContainerForFixedLengthVector(VT);
4617       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4618                                  ContainerVT.getVectorElementCount());
4619     } else {
4620       IndexVT = getContainerForFixedLengthVector(IndexVT);
4621       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4622                                      IndexVT.getVectorElementCount());
4623     }
4624 
4625     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4626 
4627     if (!IsUnmasked) {
4628       MVT MaskVT =
4629           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4630       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4631       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4632     }
4633 
4634     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4635   } else
4636     VL = DAG.getRegister(RISCV::X0, XLenVT);
4637 
4638   unsigned IntID =
4639       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
4640   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4641                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4642   if (!IsUnmasked)
4643     Ops.push_back(PassThru);
4644   Ops.push_back(MGN->getBasePtr());
4645   Ops.push_back(Index);
4646   if (!IsUnmasked)
4647     Ops.push_back(Mask);
4648   Ops.push_back(VL);
4649 
4650   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4651   SDValue Result =
4652       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4653                               MGN->getMemoryVT(), MGN->getMemOperand());
4654   SDValue Chain = Result.getValue(1);
4655 
4656   if (VT.isFixedLengthVector())
4657     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4658 
4659   return DAG.getMergeValues({Result, Chain}, DL);
4660 }
4661 
4662 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4663 // a RVV indexed store. The RVV indexed store instructions only support the
4664 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4665 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4666 // indexing is extended to the XLEN value type and scaled accordingly.
4667 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4668                                            SelectionDAG &DAG) const {
4669   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4670   SDLoc DL(Op);
4671   SDValue Index = MSN->getIndex();
4672   SDValue Mask = MSN->getMask();
4673   SDValue Val = MSN->getValue();
4674 
4675   MVT VT = Val.getSimpleValueType();
4676   MVT IndexVT = Index.getSimpleValueType();
4677   MVT XLenVT = Subtarget.getXLenVT();
4678 
4679   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4680          "Unexpected VTs!");
4681   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4682          "Unexpected pointer type");
4683   // Targets have to explicitly opt-in for extending vector loads and
4684   // truncating vector stores.
4685   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4686 
4687   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4688   // the selection of the masked intrinsics doesn't do this for us.
4689   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4690 
4691   SDValue VL;
4692   if (VT.isFixedLengthVector()) {
4693     // We need to use the larger of the value and index type to determine the
4694     // scalable type to use so we don't increase LMUL for any operand/result.
4695     MVT ContainerVT;
4696     if (VT.bitsGE(IndexVT)) {
4697       ContainerVT = getContainerForFixedLengthVector(VT);
4698       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4699                                  ContainerVT.getVectorElementCount());
4700     } else {
4701       IndexVT = getContainerForFixedLengthVector(IndexVT);
4702       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4703                                      IndexVT.getVectorElementCount());
4704     }
4705 
4706     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4707     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4708 
4709     if (!IsUnmasked) {
4710       MVT MaskVT =
4711           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4712       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4713     }
4714 
4715     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4716   } else
4717     VL = DAG.getRegister(RISCV::X0, XLenVT);
4718 
4719   unsigned IntID =
4720       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4721   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4722                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4723   Ops.push_back(Val);
4724   Ops.push_back(MSN->getBasePtr());
4725   Ops.push_back(Index);
4726   if (!IsUnmasked)
4727     Ops.push_back(Mask);
4728   Ops.push_back(VL);
4729 
4730   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4731                                  MSN->getMemoryVT(), MSN->getMemOperand());
4732 }
4733 
4734 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4735                                                SelectionDAG &DAG) const {
4736   const MVT XLenVT = Subtarget.getXLenVT();
4737   SDLoc DL(Op);
4738   SDValue Chain = Op->getOperand(0);
4739   SDValue SysRegNo = DAG.getConstant(
4740       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4741   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4742   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4743 
4744   // Encoding used for rounding mode in RISCV differs from that used in
4745   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4746   // table, which consists of a sequence of 4-bit fields, each representing
4747   // corresponding FLT_ROUNDS mode.
4748   static const int Table =
4749       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4750       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4751       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4752       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4753       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4754 
4755   SDValue Shift =
4756       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4757   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4758                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4759   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4760                                DAG.getConstant(7, DL, XLenVT));
4761 
4762   return DAG.getMergeValues({Masked, Chain}, DL);
4763 }
4764 
4765 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4766                                                SelectionDAG &DAG) const {
4767   const MVT XLenVT = Subtarget.getXLenVT();
4768   SDLoc DL(Op);
4769   SDValue Chain = Op->getOperand(0);
4770   SDValue RMValue = Op->getOperand(1);
4771   SDValue SysRegNo = DAG.getConstant(
4772       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4773 
4774   // Encoding used for rounding mode in RISCV differs from that used in
4775   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4776   // a table, which consists of a sequence of 4-bit fields, each representing
4777   // corresponding RISCV mode.
4778   static const unsigned Table =
4779       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4780       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4781       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4782       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4783       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4784 
4785   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4786                               DAG.getConstant(2, DL, XLenVT));
4787   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4788                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4789   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4790                         DAG.getConstant(0x7, DL, XLenVT));
4791   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4792                      RMValue);
4793 }
4794 
4795 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4796 // form of the given Opcode.
4797 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4798   switch (Opcode) {
4799   default:
4800     llvm_unreachable("Unexpected opcode");
4801   case ISD::SHL:
4802     return RISCVISD::SLLW;
4803   case ISD::SRA:
4804     return RISCVISD::SRAW;
4805   case ISD::SRL:
4806     return RISCVISD::SRLW;
4807   case ISD::SDIV:
4808     return RISCVISD::DIVW;
4809   case ISD::UDIV:
4810     return RISCVISD::DIVUW;
4811   case ISD::UREM:
4812     return RISCVISD::REMUW;
4813   case ISD::ROTL:
4814     return RISCVISD::ROLW;
4815   case ISD::ROTR:
4816     return RISCVISD::RORW;
4817   case RISCVISD::GREV:
4818     return RISCVISD::GREVW;
4819   case RISCVISD::GORC:
4820     return RISCVISD::GORCW;
4821   }
4822 }
4823 
4824 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
4825 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
4826 // otherwise be promoted to i64, making it difficult to select the
4827 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
4828 // type i8/i16/i32 is lost.
4829 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4830                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4831   SDLoc DL(N);
4832   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4833   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4834   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4835   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4836   // ReplaceNodeResults requires we maintain the same type for the return value.
4837   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4838 }
4839 
4840 // Converts the given 32-bit operation to a i64 operation with signed extension
4841 // semantic to reduce the signed extension instructions.
4842 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4843   SDLoc DL(N);
4844   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4845   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4846   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4847   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4848                                DAG.getValueType(MVT::i32));
4849   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4850 }
4851 
4852 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4853                                              SmallVectorImpl<SDValue> &Results,
4854                                              SelectionDAG &DAG) const {
4855   SDLoc DL(N);
4856   switch (N->getOpcode()) {
4857   default:
4858     llvm_unreachable("Don't know how to custom type legalize this operation!");
4859   case ISD::STRICT_FP_TO_SINT:
4860   case ISD::STRICT_FP_TO_UINT:
4861   case ISD::FP_TO_SINT:
4862   case ISD::FP_TO_UINT: {
4863     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4864            "Unexpected custom legalisation");
4865     bool IsStrict = N->isStrictFPOpcode();
4866     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
4867                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
4868     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4869     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4870         TargetLowering::TypeSoftenFloat) {
4871       // FIXME: Support strict FP.
4872       if (IsStrict)
4873         return;
4874       if (!isTypeLegal(Op0.getValueType()))
4875         return;
4876       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
4877       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
4878       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4879       return;
4880     }
4881     // If the FP type needs to be softened, emit a library call using the 'si'
4882     // version. If we left it to default legalization we'd end up with 'di'. If
4883     // the FP type doesn't need to be softened just let generic type
4884     // legalization promote the result type.
4885     RTLIB::Libcall LC;
4886     if (IsSigned)
4887       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4888     else
4889       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4890     MakeLibCallOptions CallOptions;
4891     EVT OpVT = Op0.getValueType();
4892     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4893     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4894     SDValue Result;
4895     std::tie(Result, Chain) =
4896         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4897     Results.push_back(Result);
4898     if (IsStrict)
4899       Results.push_back(Chain);
4900     break;
4901   }
4902   case ISD::READCYCLECOUNTER: {
4903     assert(!Subtarget.is64Bit() &&
4904            "READCYCLECOUNTER only has custom type legalization on riscv32");
4905 
4906     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4907     SDValue RCW =
4908         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4909 
4910     Results.push_back(
4911         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4912     Results.push_back(RCW.getValue(2));
4913     break;
4914   }
4915   case ISD::MUL: {
4916     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4917     unsigned XLen = Subtarget.getXLen();
4918     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4919     if (Size > XLen) {
4920       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4921       SDValue LHS = N->getOperand(0);
4922       SDValue RHS = N->getOperand(1);
4923       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4924 
4925       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4926       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4927       // We need exactly one side to be unsigned.
4928       if (LHSIsU == RHSIsU)
4929         return;
4930 
4931       auto MakeMULPair = [&](SDValue S, SDValue U) {
4932         MVT XLenVT = Subtarget.getXLenVT();
4933         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4934         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4935         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4936         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4937         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4938       };
4939 
4940       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4941       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4942 
4943       // The other operand should be signed, but still prefer MULH when
4944       // possible.
4945       if (RHSIsU && LHSIsS && !RHSIsS)
4946         Results.push_back(MakeMULPair(LHS, RHS));
4947       else if (LHSIsU && RHSIsS && !LHSIsS)
4948         Results.push_back(MakeMULPair(RHS, LHS));
4949 
4950       return;
4951     }
4952     LLVM_FALLTHROUGH;
4953   }
4954   case ISD::ADD:
4955   case ISD::SUB:
4956     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4957            "Unexpected custom legalisation");
4958     if (N->getOperand(1).getOpcode() == ISD::Constant)
4959       return;
4960     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4961     break;
4962   case ISD::SHL:
4963   case ISD::SRA:
4964   case ISD::SRL:
4965     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4966            "Unexpected custom legalisation");
4967     if (N->getOperand(1).getOpcode() == ISD::Constant)
4968       return;
4969     Results.push_back(customLegalizeToWOp(N, DAG));
4970     break;
4971   case ISD::ROTL:
4972   case ISD::ROTR:
4973     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4974            "Unexpected custom legalisation");
4975     Results.push_back(customLegalizeToWOp(N, DAG));
4976     break;
4977   case ISD::CTTZ:
4978   case ISD::CTTZ_ZERO_UNDEF:
4979   case ISD::CTLZ:
4980   case ISD::CTLZ_ZERO_UNDEF: {
4981     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4982            "Unexpected custom legalisation");
4983 
4984     SDValue NewOp0 =
4985         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4986     bool IsCTZ =
4987         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4988     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4989     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4990     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4991     return;
4992   }
4993   case ISD::SDIV:
4994   case ISD::UDIV:
4995   case ISD::UREM: {
4996     MVT VT = N->getSimpleValueType(0);
4997     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4998            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4999            "Unexpected custom legalisation");
5000     // Don't promote division/remainder by constant since we should expand those
5001     // to multiply by magic constant.
5002     // FIXME: What if the expansion is disabled for minsize.
5003     if (N->getOperand(1).getOpcode() == ISD::Constant)
5004       return;
5005 
5006     // If the input is i32, use ANY_EXTEND since the W instructions don't read
5007     // the upper 32 bits. For other types we need to sign or zero extend
5008     // based on the opcode.
5009     unsigned ExtOpc = ISD::ANY_EXTEND;
5010     if (VT != MVT::i32)
5011       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5012                                            : ISD::ZERO_EXTEND;
5013 
5014     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5015     break;
5016   }
5017   case ISD::UADDO:
5018   case ISD::USUBO: {
5019     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5020            "Unexpected custom legalisation");
5021     bool IsAdd = N->getOpcode() == ISD::UADDO;
5022     // Create an ADDW or SUBW.
5023     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5024     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5025     SDValue Res =
5026         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5027     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5028                       DAG.getValueType(MVT::i32));
5029 
5030     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5031     // Since the inputs are sign extended from i32, this is equivalent to
5032     // comparing the lower 32 bits.
5033     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5034     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5035                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5036 
5037     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5038     Results.push_back(Overflow);
5039     return;
5040   }
5041   case ISD::UADDSAT:
5042   case ISD::USUBSAT: {
5043     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5044            "Unexpected custom legalisation");
5045     if (Subtarget.hasStdExtZbb()) {
5046       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5047       // sign extend allows overflow of the lower 32 bits to be detected on
5048       // the promoted size.
5049       SDValue LHS =
5050           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5051       SDValue RHS =
5052           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5053       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5054       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5055       return;
5056     }
5057 
5058     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5059     // promotion for UADDO/USUBO.
5060     Results.push_back(expandAddSubSat(N, DAG));
5061     return;
5062   }
5063   case ISD::BITCAST: {
5064     EVT VT = N->getValueType(0);
5065     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5066     SDValue Op0 = N->getOperand(0);
5067     EVT Op0VT = Op0.getValueType();
5068     MVT XLenVT = Subtarget.getXLenVT();
5069     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5070       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5071       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5072     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5073                Subtarget.hasStdExtF()) {
5074       SDValue FPConv =
5075           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5076       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5077     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5078                isTypeLegal(Op0VT)) {
5079       // Custom-legalize bitcasts from fixed-length vector types to illegal
5080       // scalar types in order to improve codegen. Bitcast the vector to a
5081       // one-element vector type whose element type is the same as the result
5082       // type, and extract the first element.
5083       LLVMContext &Context = *DAG.getContext();
5084       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
5085       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5086                                     DAG.getConstant(0, DL, XLenVT)));
5087     }
5088     break;
5089   }
5090   case RISCVISD::GREV:
5091   case RISCVISD::GORC: {
5092     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5093            "Unexpected custom legalisation");
5094     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5095     // This is similar to customLegalizeToWOp, except that we pass the second
5096     // operand (a TargetConstant) straight through: it is already of type
5097     // XLenVT.
5098     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5099     SDValue NewOp0 =
5100         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5101     SDValue NewOp1 =
5102         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5103     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5104     // ReplaceNodeResults requires we maintain the same type for the return
5105     // value.
5106     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5107     break;
5108   }
5109   case RISCVISD::SHFL: {
5110     // There is no SHFLIW instruction, but we can just promote the operation.
5111     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5112            "Unexpected custom legalisation");
5113     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5114     SDValue NewOp0 =
5115         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5116     SDValue NewOp1 =
5117         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5118     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5119     // ReplaceNodeResults requires we maintain the same type for the return
5120     // value.
5121     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5122     break;
5123   }
5124   case ISD::BSWAP:
5125   case ISD::BITREVERSE: {
5126     MVT VT = N->getSimpleValueType(0);
5127     MVT XLenVT = Subtarget.getXLenVT();
5128     assert((VT == MVT::i8 || VT == MVT::i16 ||
5129             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5130            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5131     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5132     unsigned Imm = VT.getSizeInBits() - 1;
5133     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5134     if (N->getOpcode() == ISD::BSWAP)
5135       Imm &= ~0x7U;
5136     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5137     SDValue GREVI =
5138         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5139     // ReplaceNodeResults requires we maintain the same type for the return
5140     // value.
5141     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5142     break;
5143   }
5144   case ISD::FSHL:
5145   case ISD::FSHR: {
5146     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5147            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5148     SDValue NewOp0 =
5149         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5150     SDValue NewOp1 =
5151         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5152     SDValue NewOp2 =
5153         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5154     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5155     // Mask the shift amount to 5 bits.
5156     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5157                          DAG.getConstant(0x1f, DL, MVT::i64));
5158     unsigned Opc =
5159         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5160     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5161     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5162     break;
5163   }
5164   case ISD::EXTRACT_VECTOR_ELT: {
5165     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5166     // type is illegal (currently only vXi64 RV32).
5167     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5168     // transferred to the destination register. We issue two of these from the
5169     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5170     // first element.
5171     SDValue Vec = N->getOperand(0);
5172     SDValue Idx = N->getOperand(1);
5173 
5174     // The vector type hasn't been legalized yet so we can't issue target
5175     // specific nodes if it needs legalization.
5176     // FIXME: We would manually legalize if it's important.
5177     if (!isTypeLegal(Vec.getValueType()))
5178       return;
5179 
5180     MVT VecVT = Vec.getSimpleValueType();
5181 
5182     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5183            VecVT.getVectorElementType() == MVT::i64 &&
5184            "Unexpected EXTRACT_VECTOR_ELT legalization");
5185 
5186     // If this is a fixed vector, we need to convert it to a scalable vector.
5187     MVT ContainerVT = VecVT;
5188     if (VecVT.isFixedLengthVector()) {
5189       ContainerVT = getContainerForFixedLengthVector(VecVT);
5190       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5191     }
5192 
5193     MVT XLenVT = Subtarget.getXLenVT();
5194 
5195     // Use a VL of 1 to avoid processing more elements than we need.
5196     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5197     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5198     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5199 
5200     // Unless the index is known to be 0, we must slide the vector down to get
5201     // the desired element into index 0.
5202     if (!isNullConstant(Idx)) {
5203       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5204                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5205     }
5206 
5207     // Extract the lower XLEN bits of the correct vector element.
5208     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5209 
5210     // To extract the upper XLEN bits of the vector element, shift the first
5211     // element right by 32 bits and re-extract the lower XLEN bits.
5212     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5213                                      DAG.getConstant(32, DL, XLenVT), VL);
5214     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5215                                  ThirtyTwoV, Mask, VL);
5216 
5217     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5218 
5219     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5220     break;
5221   }
5222   case ISD::INTRINSIC_WO_CHAIN: {
5223     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5224     switch (IntNo) {
5225     default:
5226       llvm_unreachable(
5227           "Don't know how to custom type legalize this intrinsic!");
5228     case Intrinsic::riscv_orc_b: {
5229       // Lower to the GORCI encoding for orc.b with the operand extended.
5230       SDValue NewOp =
5231           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5232       // If Zbp is enabled, use GORCIW which will sign extend the result.
5233       unsigned Opc =
5234           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5235       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5236                                 DAG.getConstant(7, DL, MVT::i64));
5237       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5238       return;
5239     }
5240     case Intrinsic::riscv_grev:
5241     case Intrinsic::riscv_gorc: {
5242       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5243              "Unexpected custom legalisation");
5244       SDValue NewOp1 =
5245           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5246       SDValue NewOp2 =
5247           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5248       unsigned Opc =
5249           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5250       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5251       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5252       break;
5253     }
5254     case Intrinsic::riscv_shfl:
5255     case Intrinsic::riscv_unshfl: {
5256       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5257              "Unexpected custom legalisation");
5258       SDValue NewOp1 =
5259           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5260       SDValue NewOp2 =
5261           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5262       unsigned Opc =
5263           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5264       if (isa<ConstantSDNode>(N->getOperand(2))) {
5265         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5266                              DAG.getConstant(0xf, DL, MVT::i64));
5267         Opc =
5268             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5269       }
5270       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5271       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5272       break;
5273     }
5274     case Intrinsic::riscv_bcompress:
5275     case Intrinsic::riscv_bdecompress: {
5276       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5277              "Unexpected custom legalisation");
5278       SDValue NewOp1 =
5279           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5280       SDValue NewOp2 =
5281           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5282       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5283                          ? RISCVISD::BCOMPRESSW
5284                          : RISCVISD::BDECOMPRESSW;
5285       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5286       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5287       break;
5288     }
5289     case Intrinsic::riscv_vmv_x_s: {
5290       EVT VT = N->getValueType(0);
5291       MVT XLenVT = Subtarget.getXLenVT();
5292       if (VT.bitsLT(XLenVT)) {
5293         // Simple case just extract using vmv.x.s and truncate.
5294         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5295                                       Subtarget.getXLenVT(), N->getOperand(1));
5296         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5297         return;
5298       }
5299 
5300       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5301              "Unexpected custom legalization");
5302 
5303       // We need to do the move in two steps.
5304       SDValue Vec = N->getOperand(1);
5305       MVT VecVT = Vec.getSimpleValueType();
5306 
5307       // First extract the lower XLEN bits of the element.
5308       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5309 
5310       // To extract the upper XLEN bits of the vector element, shift the first
5311       // element right by 32 bits and re-extract the lower XLEN bits.
5312       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5313       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5314       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5315       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5316                                        DAG.getConstant(32, DL, XLenVT), VL);
5317       SDValue LShr32 =
5318           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5319       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5320 
5321       Results.push_back(
5322           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5323       break;
5324     }
5325     }
5326     break;
5327   }
5328   case ISD::VECREDUCE_ADD:
5329   case ISD::VECREDUCE_AND:
5330   case ISD::VECREDUCE_OR:
5331   case ISD::VECREDUCE_XOR:
5332   case ISD::VECREDUCE_SMAX:
5333   case ISD::VECREDUCE_UMAX:
5334   case ISD::VECREDUCE_SMIN:
5335   case ISD::VECREDUCE_UMIN:
5336     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5337       Results.push_back(V);
5338     break;
5339   case ISD::FLT_ROUNDS_: {
5340     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5341     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5342     Results.push_back(Res.getValue(0));
5343     Results.push_back(Res.getValue(1));
5344     break;
5345   }
5346   }
5347 }
5348 
5349 // A structure to hold one of the bit-manipulation patterns below. Together, a
5350 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5351 //   (or (and (shl x, 1), 0xAAAAAAAA),
5352 //       (and (srl x, 1), 0x55555555))
5353 struct RISCVBitmanipPat {
5354   SDValue Op;
5355   unsigned ShAmt;
5356   bool IsSHL;
5357 
5358   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5359     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5360   }
5361 };
5362 
5363 // Matches patterns of the form
5364 //   (and (shl x, C2), (C1 << C2))
5365 //   (and (srl x, C2), C1)
5366 //   (shl (and x, C1), C2)
5367 //   (srl (and x, (C1 << C2)), C2)
5368 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5369 // The expected masks for each shift amount are specified in BitmanipMasks where
5370 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5371 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5372 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5373 // XLen is 64.
5374 static Optional<RISCVBitmanipPat>
5375 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5376   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5377          "Unexpected number of masks");
5378   Optional<uint64_t> Mask;
5379   // Optionally consume a mask around the shift operation.
5380   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5381     Mask = Op.getConstantOperandVal(1);
5382     Op = Op.getOperand(0);
5383   }
5384   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5385     return None;
5386   bool IsSHL = Op.getOpcode() == ISD::SHL;
5387 
5388   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5389     return None;
5390   uint64_t ShAmt = Op.getConstantOperandVal(1);
5391 
5392   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5393   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5394     return None;
5395   // If we don't have enough masks for 64 bit, then we must be trying to
5396   // match SHFL so we're only allowed to shift 1/4 of the width.
5397   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5398     return None;
5399 
5400   SDValue Src = Op.getOperand(0);
5401 
5402   // The expected mask is shifted left when the AND is found around SHL
5403   // patterns.
5404   //   ((x >> 1) & 0x55555555)
5405   //   ((x << 1) & 0xAAAAAAAA)
5406   bool SHLExpMask = IsSHL;
5407 
5408   if (!Mask) {
5409     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5410     // the mask is all ones: consume that now.
5411     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5412       Mask = Src.getConstantOperandVal(1);
5413       Src = Src.getOperand(0);
5414       // The expected mask is now in fact shifted left for SRL, so reverse the
5415       // decision.
5416       //   ((x & 0xAAAAAAAA) >> 1)
5417       //   ((x & 0x55555555) << 1)
5418       SHLExpMask = !SHLExpMask;
5419     } else {
5420       // Use a default shifted mask of all-ones if there's no AND, truncated
5421       // down to the expected width. This simplifies the logic later on.
5422       Mask = maskTrailingOnes<uint64_t>(Width);
5423       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5424     }
5425   }
5426 
5427   unsigned MaskIdx = Log2_32(ShAmt);
5428   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5429 
5430   if (SHLExpMask)
5431     ExpMask <<= ShAmt;
5432 
5433   if (Mask != ExpMask)
5434     return None;
5435 
5436   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5437 }
5438 
5439 // Matches any of the following bit-manipulation patterns:
5440 //   (and (shl x, 1), (0x55555555 << 1))
5441 //   (and (srl x, 1), 0x55555555)
5442 //   (shl (and x, 0x55555555), 1)
5443 //   (srl (and x, (0x55555555 << 1)), 1)
5444 // where the shift amount and mask may vary thus:
5445 //   [1]  = 0x55555555 / 0xAAAAAAAA
5446 //   [2]  = 0x33333333 / 0xCCCCCCCC
5447 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5448 //   [8]  = 0x00FF00FF / 0xFF00FF00
5449 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5450 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5451 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5452   // These are the unshifted masks which we use to match bit-manipulation
5453   // patterns. They may be shifted left in certain circumstances.
5454   static const uint64_t BitmanipMasks[] = {
5455       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5456       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5457 
5458   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5459 }
5460 
5461 // Match the following pattern as a GREVI(W) operation
5462 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5463 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5464                                const RISCVSubtarget &Subtarget) {
5465   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5466   EVT VT = Op.getValueType();
5467 
5468   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5469     auto LHS = matchGREVIPat(Op.getOperand(0));
5470     auto RHS = matchGREVIPat(Op.getOperand(1));
5471     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5472       SDLoc DL(Op);
5473       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5474                          DAG.getConstant(LHS->ShAmt, DL, VT));
5475     }
5476   }
5477   return SDValue();
5478 }
5479 
5480 // Matches any the following pattern as a GORCI(W) operation
5481 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5482 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5483 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5484 // Note that with the variant of 3.,
5485 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5486 // the inner pattern will first be matched as GREVI and then the outer
5487 // pattern will be matched to GORC via the first rule above.
5488 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5489 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5490                                const RISCVSubtarget &Subtarget) {
5491   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5492   EVT VT = Op.getValueType();
5493 
5494   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5495     SDLoc DL(Op);
5496     SDValue Op0 = Op.getOperand(0);
5497     SDValue Op1 = Op.getOperand(1);
5498 
5499     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5500       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5501           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5502           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5503         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5504       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5505       if ((Reverse.getOpcode() == ISD::ROTL ||
5506            Reverse.getOpcode() == ISD::ROTR) &&
5507           Reverse.getOperand(0) == X &&
5508           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5509         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5510         if (RotAmt == (VT.getSizeInBits() / 2))
5511           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5512                              DAG.getConstant(RotAmt, DL, VT));
5513       }
5514       return SDValue();
5515     };
5516 
5517     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5518     if (SDValue V = MatchOROfReverse(Op0, Op1))
5519       return V;
5520     if (SDValue V = MatchOROfReverse(Op1, Op0))
5521       return V;
5522 
5523     // OR is commutable so canonicalize its OR operand to the left
5524     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5525       std::swap(Op0, Op1);
5526     if (Op0.getOpcode() != ISD::OR)
5527       return SDValue();
5528     SDValue OrOp0 = Op0.getOperand(0);
5529     SDValue OrOp1 = Op0.getOperand(1);
5530     auto LHS = matchGREVIPat(OrOp0);
5531     // OR is commutable so swap the operands and try again: x might have been
5532     // on the left
5533     if (!LHS) {
5534       std::swap(OrOp0, OrOp1);
5535       LHS = matchGREVIPat(OrOp0);
5536     }
5537     auto RHS = matchGREVIPat(Op1);
5538     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5539       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5540                          DAG.getConstant(LHS->ShAmt, DL, VT));
5541     }
5542   }
5543   return SDValue();
5544 }
5545 
5546 // Matches any of the following bit-manipulation patterns:
5547 //   (and (shl x, 1), (0x22222222 << 1))
5548 //   (and (srl x, 1), 0x22222222)
5549 //   (shl (and x, 0x22222222), 1)
5550 //   (srl (and x, (0x22222222 << 1)), 1)
5551 // where the shift amount and mask may vary thus:
5552 //   [1]  = 0x22222222 / 0x44444444
5553 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5554 //   [4]  = 0x00F000F0 / 0x0F000F00
5555 //   [8]  = 0x0000FF00 / 0x00FF0000
5556 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5557 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5558   // These are the unshifted masks which we use to match bit-manipulation
5559   // patterns. They may be shifted left in certain circumstances.
5560   static const uint64_t BitmanipMasks[] = {
5561       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5562       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5563 
5564   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5565 }
5566 
5567 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5568 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5569                                const RISCVSubtarget &Subtarget) {
5570   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5571   EVT VT = Op.getValueType();
5572 
5573   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5574     return SDValue();
5575 
5576   SDValue Op0 = Op.getOperand(0);
5577   SDValue Op1 = Op.getOperand(1);
5578 
5579   // Or is commutable so canonicalize the second OR to the LHS.
5580   if (Op0.getOpcode() != ISD::OR)
5581     std::swap(Op0, Op1);
5582   if (Op0.getOpcode() != ISD::OR)
5583     return SDValue();
5584 
5585   // We found an inner OR, so our operands are the operands of the inner OR
5586   // and the other operand of the outer OR.
5587   SDValue A = Op0.getOperand(0);
5588   SDValue B = Op0.getOperand(1);
5589   SDValue C = Op1;
5590 
5591   auto Match1 = matchSHFLPat(A);
5592   auto Match2 = matchSHFLPat(B);
5593 
5594   // If neither matched, we failed.
5595   if (!Match1 && !Match2)
5596     return SDValue();
5597 
5598   // We had at least one match. if one failed, try the remaining C operand.
5599   if (!Match1) {
5600     std::swap(A, C);
5601     Match1 = matchSHFLPat(A);
5602     if (!Match1)
5603       return SDValue();
5604   } else if (!Match2) {
5605     std::swap(B, C);
5606     Match2 = matchSHFLPat(B);
5607     if (!Match2)
5608       return SDValue();
5609   }
5610   assert(Match1 && Match2);
5611 
5612   // Make sure our matches pair up.
5613   if (!Match1->formsPairWith(*Match2))
5614     return SDValue();
5615 
5616   // All the remains is to make sure C is an AND with the same input, that masks
5617   // out the bits that are being shuffled.
5618   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5619       C.getOperand(0) != Match1->Op)
5620     return SDValue();
5621 
5622   uint64_t Mask = C.getConstantOperandVal(1);
5623 
5624   static const uint64_t BitmanipMasks[] = {
5625       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5626       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5627   };
5628 
5629   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5630   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5631   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5632 
5633   if (Mask != ExpMask)
5634     return SDValue();
5635 
5636   SDLoc DL(Op);
5637   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5638                      DAG.getConstant(Match1->ShAmt, DL, VT));
5639 }
5640 
5641 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5642 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5643 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5644 // not undo itself, but they are redundant.
5645 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5646   SDValue Src = N->getOperand(0);
5647 
5648   if (Src.getOpcode() != N->getOpcode())
5649     return SDValue();
5650 
5651   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5652       !isa<ConstantSDNode>(Src.getOperand(1)))
5653     return SDValue();
5654 
5655   unsigned ShAmt1 = N->getConstantOperandVal(1);
5656   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5657   Src = Src.getOperand(0);
5658 
5659   unsigned CombinedShAmt;
5660   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5661     CombinedShAmt = ShAmt1 | ShAmt2;
5662   else
5663     CombinedShAmt = ShAmt1 ^ ShAmt2;
5664 
5665   if (CombinedShAmt == 0)
5666     return Src;
5667 
5668   SDLoc DL(N);
5669   return DAG.getNode(
5670       N->getOpcode(), DL, N->getValueType(0), Src,
5671       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5672 }
5673 
5674 // Combine a constant select operand into its use:
5675 //
5676 // (and (select_cc lhs, rhs, cc, -1, c), x)
5677 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5678 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5679 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5680 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5681 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5682 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5683                                      SelectionDAG &DAG, bool AllOnes) {
5684   EVT VT = N->getValueType(0);
5685 
5686   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5687     return SDValue();
5688 
5689   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5690     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5691   };
5692 
5693   bool SwapSelectOps;
5694   SDValue TrueVal = Slct.getOperand(3);
5695   SDValue FalseVal = Slct.getOperand(4);
5696   SDValue NonConstantVal;
5697   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5698     SwapSelectOps = false;
5699     NonConstantVal = FalseVal;
5700   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5701     SwapSelectOps = true;
5702     NonConstantVal = TrueVal;
5703   } else
5704     return SDValue();
5705 
5706   // Slct is now know to be the desired identity constant when CC is true.
5707   TrueVal = OtherOp;
5708   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5709   // Unless SwapSelectOps says CC should be false.
5710   if (SwapSelectOps)
5711     std::swap(TrueVal, FalseVal);
5712 
5713   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5714                      {Slct.getOperand(0), Slct.getOperand(1),
5715                       Slct.getOperand(2), TrueVal, FalseVal});
5716 }
5717 
5718 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5719 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5720                                                 bool AllOnes) {
5721   SDValue N0 = N->getOperand(0);
5722   SDValue N1 = N->getOperand(1);
5723   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5724     return Result;
5725   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5726     return Result;
5727   return SDValue();
5728 }
5729 
5730 static SDValue performANDCombine(SDNode *N,
5731                                  TargetLowering::DAGCombinerInfo &DCI,
5732                                  const RISCVSubtarget &Subtarget) {
5733   SelectionDAG &DAG = DCI.DAG;
5734 
5735   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5736   //      (select lhs, rhs, cc, x, (and x, y))
5737   return combineSelectCCAndUseCommutative(N, DAG, true);
5738 }
5739 
5740 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5741                                 const RISCVSubtarget &Subtarget) {
5742   SelectionDAG &DAG = DCI.DAG;
5743   if (Subtarget.hasStdExtZbp()) {
5744     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5745       return GREV;
5746     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5747       return GORC;
5748     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5749       return SHFL;
5750   }
5751 
5752   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5753   //      (select lhs, rhs, cc, x, (or x, y))
5754   return combineSelectCCAndUseCommutative(N, DAG, false);
5755 }
5756 
5757 static SDValue performXORCombine(SDNode *N,
5758                                  TargetLowering::DAGCombinerInfo &DCI,
5759                                  const RISCVSubtarget &Subtarget) {
5760   SelectionDAG &DAG = DCI.DAG;
5761 
5762   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5763   //      (select lhs, rhs, cc, x, (xor x, y))
5764   return combineSelectCCAndUseCommutative(N, DAG, false);
5765 }
5766 
5767 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
5768 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
5769 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
5770 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
5771 // ADDW/SUBW/MULW.
5772 static SDValue performANY_EXTENDCombine(SDNode *N,
5773                                         TargetLowering::DAGCombinerInfo &DCI,
5774                                         const RISCVSubtarget &Subtarget) {
5775   if (!Subtarget.is64Bit())
5776     return SDValue();
5777 
5778   SelectionDAG &DAG = DCI.DAG;
5779 
5780   SDValue Src = N->getOperand(0);
5781   EVT VT = N->getValueType(0);
5782   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
5783     return SDValue();
5784 
5785   // The opcode must be one that can implicitly sign_extend.
5786   // FIXME: Additional opcodes.
5787   switch (Src.getOpcode()) {
5788   default:
5789     return SDValue();
5790   case ISD::MUL:
5791     if (!Subtarget.hasStdExtM())
5792       return SDValue();
5793     LLVM_FALLTHROUGH;
5794   case ISD::ADD:
5795   case ISD::SUB:
5796     break;
5797   }
5798 
5799   SmallVector<SDNode *, 4> SetCCs;
5800   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
5801                             UE = Src.getNode()->use_end();
5802        UI != UE; ++UI) {
5803     SDNode *User = *UI;
5804     if (User == N)
5805       continue;
5806     if (UI.getUse().getResNo() != Src.getResNo())
5807       continue;
5808     // All i32 setccs are legalized by sign extending operands.
5809     if (User->getOpcode() == ISD::SETCC) {
5810       SetCCs.push_back(User);
5811       continue;
5812     }
5813     // We don't know if we can extend this user.
5814     break;
5815   }
5816 
5817   // If we don't have any SetCCs, this isn't worthwhile.
5818   if (SetCCs.empty())
5819     return SDValue();
5820 
5821   SDLoc DL(N);
5822   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
5823   DCI.CombineTo(N, SExt);
5824 
5825   // Promote all the setccs.
5826   for (SDNode *SetCC : SetCCs) {
5827     SmallVector<SDValue, 4> Ops;
5828 
5829     for (unsigned j = 0; j != 2; ++j) {
5830       SDValue SOp = SetCC->getOperand(j);
5831       if (SOp == Src)
5832         Ops.push_back(SExt);
5833       else
5834         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
5835     }
5836 
5837     Ops.push_back(SetCC->getOperand(2));
5838     DCI.CombineTo(SetCC,
5839                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
5840   }
5841   return SDValue(N, 0);
5842 }
5843 
5844 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5845                                                DAGCombinerInfo &DCI) const {
5846   SelectionDAG &DAG = DCI.DAG;
5847 
5848   switch (N->getOpcode()) {
5849   default:
5850     break;
5851   case RISCVISD::SplitF64: {
5852     SDValue Op0 = N->getOperand(0);
5853     // If the input to SplitF64 is just BuildPairF64 then the operation is
5854     // redundant. Instead, use BuildPairF64's operands directly.
5855     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5856       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5857 
5858     SDLoc DL(N);
5859 
5860     // It's cheaper to materialise two 32-bit integers than to load a double
5861     // from the constant pool and transfer it to integer registers through the
5862     // stack.
5863     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5864       APInt V = C->getValueAPF().bitcastToAPInt();
5865       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5866       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5867       return DCI.CombineTo(N, Lo, Hi);
5868     }
5869 
5870     // This is a target-specific version of a DAGCombine performed in
5871     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5872     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5873     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5874     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5875         !Op0.getNode()->hasOneUse())
5876       break;
5877     SDValue NewSplitF64 =
5878         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5879                     Op0.getOperand(0));
5880     SDValue Lo = NewSplitF64.getValue(0);
5881     SDValue Hi = NewSplitF64.getValue(1);
5882     APInt SignBit = APInt::getSignMask(32);
5883     if (Op0.getOpcode() == ISD::FNEG) {
5884       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5885                                   DAG.getConstant(SignBit, DL, MVT::i32));
5886       return DCI.CombineTo(N, Lo, NewHi);
5887     }
5888     assert(Op0.getOpcode() == ISD::FABS);
5889     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5890                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5891     return DCI.CombineTo(N, Lo, NewHi);
5892   }
5893   case RISCVISD::SLLW:
5894   case RISCVISD::SRAW:
5895   case RISCVISD::SRLW:
5896   case RISCVISD::ROLW:
5897   case RISCVISD::RORW: {
5898     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5899     SDValue LHS = N->getOperand(0);
5900     SDValue RHS = N->getOperand(1);
5901     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5902     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5903     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5904         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5905       if (N->getOpcode() != ISD::DELETED_NODE)
5906         DCI.AddToWorklist(N);
5907       return SDValue(N, 0);
5908     }
5909     break;
5910   }
5911   case RISCVISD::CLZW:
5912   case RISCVISD::CTZW: {
5913     // Only the lower 32 bits of the first operand are read
5914     SDValue Op0 = N->getOperand(0);
5915     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5916     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5917       if (N->getOpcode() != ISD::DELETED_NODE)
5918         DCI.AddToWorklist(N);
5919       return SDValue(N, 0);
5920     }
5921     break;
5922   }
5923   case RISCVISD::FSL:
5924   case RISCVISD::FSR: {
5925     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5926     SDValue ShAmt = N->getOperand(2);
5927     unsigned BitWidth = ShAmt.getValueSizeInBits();
5928     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5929     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5930     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5931       if (N->getOpcode() != ISD::DELETED_NODE)
5932         DCI.AddToWorklist(N);
5933       return SDValue(N, 0);
5934     }
5935     break;
5936   }
5937   case RISCVISD::FSLW:
5938   case RISCVISD::FSRW: {
5939     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5940     // read.
5941     SDValue Op0 = N->getOperand(0);
5942     SDValue Op1 = N->getOperand(1);
5943     SDValue ShAmt = N->getOperand(2);
5944     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5945     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5946     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5947         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5948         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5949       if (N->getOpcode() != ISD::DELETED_NODE)
5950         DCI.AddToWorklist(N);
5951       return SDValue(N, 0);
5952     }
5953     break;
5954   }
5955   case RISCVISD::GREV:
5956   case RISCVISD::GORC: {
5957     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5958     SDValue ShAmt = N->getOperand(1);
5959     unsigned BitWidth = ShAmt.getValueSizeInBits();
5960     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5961     APInt ShAmtMask(BitWidth, BitWidth - 1);
5962     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5963       if (N->getOpcode() != ISD::DELETED_NODE)
5964         DCI.AddToWorklist(N);
5965       return SDValue(N, 0);
5966     }
5967 
5968     return combineGREVI_GORCI(N, DCI.DAG);
5969   }
5970   case RISCVISD::GREVW:
5971   case RISCVISD::GORCW: {
5972     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5973     SDValue LHS = N->getOperand(0);
5974     SDValue RHS = N->getOperand(1);
5975     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5976     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5977     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5978         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5979       if (N->getOpcode() != ISD::DELETED_NODE)
5980         DCI.AddToWorklist(N);
5981       return SDValue(N, 0);
5982     }
5983 
5984     return combineGREVI_GORCI(N, DCI.DAG);
5985   }
5986   case RISCVISD::SHFL:
5987   case RISCVISD::UNSHFL: {
5988     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5989     SDValue ShAmt = N->getOperand(1);
5990     unsigned BitWidth = ShAmt.getValueSizeInBits();
5991     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5992     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5993     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5994       if (N->getOpcode() != ISD::DELETED_NODE)
5995         DCI.AddToWorklist(N);
5996       return SDValue(N, 0);
5997     }
5998 
5999     break;
6000   }
6001   case RISCVISD::SHFLW:
6002   case RISCVISD::UNSHFLW: {
6003     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6004     SDValue LHS = N->getOperand(0);
6005     SDValue RHS = N->getOperand(1);
6006     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6007     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
6008     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
6009         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
6010       if (N->getOpcode() != ISD::DELETED_NODE)
6011         DCI.AddToWorklist(N);
6012       return SDValue(N, 0);
6013     }
6014 
6015     break;
6016   }
6017   case RISCVISD::BCOMPRESSW:
6018   case RISCVISD::BDECOMPRESSW: {
6019     // Only the lower 32 bits of LHS and RHS are read.
6020     SDValue LHS = N->getOperand(0);
6021     SDValue RHS = N->getOperand(1);
6022     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6023     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
6024         SimplifyDemandedBits(RHS, Mask, DCI)) {
6025       if (N->getOpcode() != ISD::DELETED_NODE)
6026         DCI.AddToWorklist(N);
6027       return SDValue(N, 0);
6028     }
6029 
6030     break;
6031   }
6032   case RISCVISD::FMV_X_ANYEXTW_RV64: {
6033     SDLoc DL(N);
6034     SDValue Op0 = N->getOperand(0);
6035     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
6036     // conversion is unnecessary and can be replaced with an ANY_EXTEND
6037     // of the FMV_W_X_RV64 operand.
6038     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
6039       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
6040              "Unexpected value type!");
6041       return Op0.getOperand(0);
6042     }
6043 
6044     // This is a target-specific version of a DAGCombine performed in
6045     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6046     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6047     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6048     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6049         !Op0.getNode()->hasOneUse())
6050       break;
6051     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
6052                                  Op0.getOperand(0));
6053     APInt SignBit = APInt::getSignMask(32).sext(64);
6054     if (Op0.getOpcode() == ISD::FNEG)
6055       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
6056                          DAG.getConstant(SignBit, DL, MVT::i64));
6057 
6058     assert(Op0.getOpcode() == ISD::FABS);
6059     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
6060                        DAG.getConstant(~SignBit, DL, MVT::i64));
6061   }
6062   case ISD::AND:
6063     return performANDCombine(N, DCI, Subtarget);
6064   case ISD::OR:
6065     return performORCombine(N, DCI, Subtarget);
6066   case ISD::XOR:
6067     return performXORCombine(N, DCI, Subtarget);
6068   case ISD::ANY_EXTEND:
6069     return performANY_EXTENDCombine(N, DCI, Subtarget);
6070   case ISD::ZERO_EXTEND:
6071     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
6072     // type legalization. This is safe because fp_to_uint produces poison if
6073     // it overflows.
6074     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() &&
6075         N->getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
6076         isTypeLegal(N->getOperand(0).getOperand(0).getValueType()))
6077       return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
6078                          N->getOperand(0).getOperand(0));
6079     return SDValue();
6080   case RISCVISD::SELECT_CC: {
6081     // Transform
6082     SDValue LHS = N->getOperand(0);
6083     SDValue RHS = N->getOperand(1);
6084     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
6085     if (!ISD::isIntEqualitySetCC(CCVal))
6086       break;
6087 
6088     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
6089     //      (select_cc X, Y, lt, trueV, falseV)
6090     // Sometimes the setcc is introduced after select_cc has been formed.
6091     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6092         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6093       // If we're looking for eq 0 instead of ne 0, we need to invert the
6094       // condition.
6095       bool Invert = CCVal == ISD::SETEQ;
6096       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6097       if (Invert)
6098         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6099 
6100       SDLoc DL(N);
6101       RHS = LHS.getOperand(1);
6102       LHS = LHS.getOperand(0);
6103       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6104 
6105       SDValue TargetCC =
6106           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
6107       return DAG.getNode(
6108           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6109           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6110     }
6111 
6112     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
6113     //      (select_cc X, Y, eq/ne, trueV, falseV)
6114     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6115       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
6116                          {LHS.getOperand(0), LHS.getOperand(1),
6117                           N->getOperand(2), N->getOperand(3),
6118                           N->getOperand(4)});
6119     // (select_cc X, 1, setne, trueV, falseV) ->
6120     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
6121     // This can occur when legalizing some floating point comparisons.
6122     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6123     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6124       SDLoc DL(N);
6125       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6126       SDValue TargetCC =
6127           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
6128       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6129       return DAG.getNode(
6130           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6131           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6132     }
6133 
6134     break;
6135   }
6136   case RISCVISD::BR_CC: {
6137     SDValue LHS = N->getOperand(1);
6138     SDValue RHS = N->getOperand(2);
6139     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6140     if (!ISD::isIntEqualitySetCC(CCVal))
6141       break;
6142 
6143     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6144     //      (br_cc X, Y, lt, dest)
6145     // Sometimes the setcc is introduced after br_cc has been formed.
6146     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6147         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6148       // If we're looking for eq 0 instead of ne 0, we need to invert the
6149       // condition.
6150       bool Invert = CCVal == ISD::SETEQ;
6151       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6152       if (Invert)
6153         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6154 
6155       SDLoc DL(N);
6156       RHS = LHS.getOperand(1);
6157       LHS = LHS.getOperand(0);
6158       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6159 
6160       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6161                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6162                          N->getOperand(4));
6163     }
6164 
6165     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6166     //      (br_cc X, Y, eq/ne, trueV, falseV)
6167     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6168       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6169                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6170                          N->getOperand(3), N->getOperand(4));
6171 
6172     // (br_cc X, 1, setne, br_cc) ->
6173     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6174     // This can occur when legalizing some floating point comparisons.
6175     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6176     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6177       SDLoc DL(N);
6178       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6179       SDValue TargetCC = DAG.getCondCode(CCVal);
6180       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6181       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6182                          N->getOperand(0), LHS, RHS, TargetCC,
6183                          N->getOperand(4));
6184     }
6185     break;
6186   }
6187   case ISD::FCOPYSIGN: {
6188     EVT VT = N->getValueType(0);
6189     if (!VT.isVector())
6190       break;
6191     // There is a form of VFSGNJ which injects the negated sign of its second
6192     // operand. Try and bubble any FNEG up after the extend/round to produce
6193     // this optimized pattern. Avoid modifying cases where FP_ROUND and
6194     // TRUNC=1.
6195     SDValue In2 = N->getOperand(1);
6196     // Avoid cases where the extend/round has multiple uses, as duplicating
6197     // those is typically more expensive than removing a fneg.
6198     if (!In2.hasOneUse())
6199       break;
6200     if (In2.getOpcode() != ISD::FP_EXTEND &&
6201         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6202       break;
6203     In2 = In2.getOperand(0);
6204     if (In2.getOpcode() != ISD::FNEG)
6205       break;
6206     SDLoc DL(N);
6207     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
6208     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
6209                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
6210   }
6211   case ISD::MGATHER:
6212   case ISD::MSCATTER: {
6213     if (!DCI.isBeforeLegalize())
6214       break;
6215     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
6216     SDValue Index = MGSN->getIndex();
6217     EVT IndexVT = Index.getValueType();
6218     MVT XLenVT = Subtarget.getXLenVT();
6219     // RISCV indexed loads only support the "unsigned unscaled" addressing
6220     // mode, so anything else must be manually legalized.
6221     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
6222                                 (MGSN->isIndexSigned() &&
6223                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
6224     if (!NeedsIdxLegalization)
6225       break;
6226 
6227     SDLoc DL(N);
6228 
6229     // Any index legalization should first promote to XLenVT, so we don't lose
6230     // bits when scaling. This may create an illegal index type so we let
6231     // LLVM's legalization take care of the splitting.
6232     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
6233       IndexVT = IndexVT.changeVectorElementType(XLenVT);
6234       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
6235                                                 : ISD::ZERO_EXTEND,
6236                           DL, IndexVT, Index);
6237     }
6238 
6239     unsigned Scale = N->getConstantOperandVal(5);
6240     if (MGSN->isIndexScaled() && Scale != 1) {
6241       // Manually scale the indices by the element size.
6242       // TODO: Sanitize the scale operand here?
6243       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
6244       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
6245       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
6246     }
6247 
6248     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6249     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
6250       return DAG.getMaskedGather(
6251           N->getVTList(), MGSN->getMemoryVT(), DL,
6252           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
6253            MGSN->getBasePtr(), Index, MGN->getScale()},
6254           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6255     }
6256     const auto *MSN = cast<MaskedScatterSDNode>(N);
6257     return DAG.getMaskedScatter(
6258         N->getVTList(), MGSN->getMemoryVT(), DL,
6259         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
6260          Index, MGSN->getScale()},
6261         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6262   }
6263   case RISCVISD::SRA_VL:
6264   case RISCVISD::SRL_VL:
6265   case RISCVISD::SHL_VL: {
6266     SDValue ShAmt = N->getOperand(1);
6267     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6268       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6269       SDLoc DL(N);
6270       SDValue VL = N->getOperand(3);
6271       EVT VT = N->getValueType(0);
6272       ShAmt =
6273           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6274       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6275                          N->getOperand(2), N->getOperand(3));
6276     }
6277     break;
6278   }
6279   case ISD::SRA:
6280   case ISD::SRL:
6281   case ISD::SHL: {
6282     SDValue ShAmt = N->getOperand(1);
6283     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6284       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6285       SDLoc DL(N);
6286       EVT VT = N->getValueType(0);
6287       ShAmt =
6288           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6289       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6290     }
6291     break;
6292   }
6293   case RISCVISD::MUL_VL: {
6294     // Try to form VWMUL or VWMULU.
6295     // FIXME: Look for splat of extended scalar as well.
6296     // FIXME: Support VWMULSU.
6297     SDValue Op0 = N->getOperand(0);
6298     SDValue Op1 = N->getOperand(1);
6299     bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6300     bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6301     if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode())
6302       return SDValue();
6303 
6304     // Make sure the extends have a single use.
6305     if (!Op0.hasOneUse() || !Op1.hasOneUse())
6306       return SDValue();
6307 
6308     SDValue Mask = N->getOperand(2);
6309     SDValue VL = N->getOperand(3);
6310     if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask ||
6311         Op0.getOperand(2) != VL || Op1.getOperand(2) != VL)
6312       return SDValue();
6313 
6314     Op0 = Op0.getOperand(0);
6315     Op1 = Op1.getOperand(0);
6316 
6317     MVT VT = N->getSimpleValueType(0);
6318     MVT NarrowVT =
6319         MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2),
6320                          VT.getVectorElementCount());
6321 
6322     SDLoc DL(N);
6323 
6324     // Re-introduce narrower extends if needed.
6325     unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6326     if (Op0.getValueType() != NarrowVT)
6327       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6328     if (Op1.getValueType() != NarrowVT)
6329       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6330 
6331     unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6332     return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6333   }
6334   }
6335 
6336   return SDValue();
6337 }
6338 
6339 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6340     const SDNode *N, CombineLevel Level) const {
6341   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6342   // materialised in fewer instructions than `(OP _, c1)`:
6343   //
6344   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6345   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6346   SDValue N0 = N->getOperand(0);
6347   EVT Ty = N0.getValueType();
6348   if (Ty.isScalarInteger() &&
6349       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6350     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6351     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6352     if (C1 && C2) {
6353       const APInt &C1Int = C1->getAPIntValue();
6354       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6355 
6356       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6357       // and the combine should happen, to potentially allow further combines
6358       // later.
6359       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6360           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6361         return true;
6362 
6363       // We can materialise `c1` in an add immediate, so it's "free", and the
6364       // combine should be prevented.
6365       if (C1Int.getMinSignedBits() <= 64 &&
6366           isLegalAddImmediate(C1Int.getSExtValue()))
6367         return false;
6368 
6369       // Neither constant will fit into an immediate, so find materialisation
6370       // costs.
6371       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6372                                               Subtarget.getFeatureBits(),
6373                                               /*CompressionCost*/true);
6374       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6375           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
6376           /*CompressionCost*/true);
6377 
6378       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6379       // combine should be prevented.
6380       if (C1Cost < ShiftedC1Cost)
6381         return false;
6382     }
6383   }
6384   return true;
6385 }
6386 
6387 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6388     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6389     TargetLoweringOpt &TLO) const {
6390   // Delay this optimization as late as possible.
6391   if (!TLO.LegalOps)
6392     return false;
6393 
6394   EVT VT = Op.getValueType();
6395   if (VT.isVector())
6396     return false;
6397 
6398   // Only handle AND for now.
6399   if (Op.getOpcode() != ISD::AND)
6400     return false;
6401 
6402   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6403   if (!C)
6404     return false;
6405 
6406   const APInt &Mask = C->getAPIntValue();
6407 
6408   // Clear all non-demanded bits initially.
6409   APInt ShrunkMask = Mask & DemandedBits;
6410 
6411   // Try to make a smaller immediate by setting undemanded bits.
6412 
6413   APInt ExpandedMask = Mask | ~DemandedBits;
6414 
6415   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6416     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6417   };
6418   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6419     if (NewMask == Mask)
6420       return true;
6421     SDLoc DL(Op);
6422     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6423     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6424     return TLO.CombineTo(Op, NewOp);
6425   };
6426 
6427   // If the shrunk mask fits in sign extended 12 bits, let the target
6428   // independent code apply it.
6429   if (ShrunkMask.isSignedIntN(12))
6430     return false;
6431 
6432   // Preserve (and X, 0xffff) when zext.h is supported.
6433   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6434     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6435     if (IsLegalMask(NewMask))
6436       return UseMask(NewMask);
6437   }
6438 
6439   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6440   if (VT == MVT::i64) {
6441     APInt NewMask = APInt(64, 0xffffffff);
6442     if (IsLegalMask(NewMask))
6443       return UseMask(NewMask);
6444   }
6445 
6446   // For the remaining optimizations, we need to be able to make a negative
6447   // number through a combination of mask and undemanded bits.
6448   if (!ExpandedMask.isNegative())
6449     return false;
6450 
6451   // What is the fewest number of bits we need to represent the negative number.
6452   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6453 
6454   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6455   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6456   APInt NewMask = ShrunkMask;
6457   if (MinSignedBits <= 12)
6458     NewMask.setBitsFrom(11);
6459   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6460     NewMask.setBitsFrom(31);
6461   else
6462     return false;
6463 
6464   // Sanity check that our new mask is a subset of the demanded mask.
6465   assert(IsLegalMask(NewMask));
6466   return UseMask(NewMask);
6467 }
6468 
6469 static void computeGREV(APInt &Src, unsigned ShAmt) {
6470   ShAmt &= Src.getBitWidth() - 1;
6471   uint64_t x = Src.getZExtValue();
6472   if (ShAmt & 1)
6473     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6474   if (ShAmt & 2)
6475     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6476   if (ShAmt & 4)
6477     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6478   if (ShAmt & 8)
6479     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6480   if (ShAmt & 16)
6481     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6482   if (ShAmt & 32)
6483     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6484   Src = x;
6485 }
6486 
6487 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6488                                                         KnownBits &Known,
6489                                                         const APInt &DemandedElts,
6490                                                         const SelectionDAG &DAG,
6491                                                         unsigned Depth) const {
6492   unsigned BitWidth = Known.getBitWidth();
6493   unsigned Opc = Op.getOpcode();
6494   assert((Opc >= ISD::BUILTIN_OP_END ||
6495           Opc == ISD::INTRINSIC_WO_CHAIN ||
6496           Opc == ISD::INTRINSIC_W_CHAIN ||
6497           Opc == ISD::INTRINSIC_VOID) &&
6498          "Should use MaskedValueIsZero if you don't know whether Op"
6499          " is a target node!");
6500 
6501   Known.resetAll();
6502   switch (Opc) {
6503   default: break;
6504   case RISCVISD::SELECT_CC: {
6505     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6506     // If we don't know any bits, early out.
6507     if (Known.isUnknown())
6508       break;
6509     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6510 
6511     // Only known if known in both the LHS and RHS.
6512     Known = KnownBits::commonBits(Known, Known2);
6513     break;
6514   }
6515   case RISCVISD::REMUW: {
6516     KnownBits Known2;
6517     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6518     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6519     // We only care about the lower 32 bits.
6520     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6521     // Restore the original width by sign extending.
6522     Known = Known.sext(BitWidth);
6523     break;
6524   }
6525   case RISCVISD::DIVUW: {
6526     KnownBits Known2;
6527     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6528     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6529     // We only care about the lower 32 bits.
6530     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6531     // Restore the original width by sign extending.
6532     Known = Known.sext(BitWidth);
6533     break;
6534   }
6535   case RISCVISD::CTZW: {
6536     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6537     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6538     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6539     Known.Zero.setBitsFrom(LowBits);
6540     break;
6541   }
6542   case RISCVISD::CLZW: {
6543     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6544     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6545     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6546     Known.Zero.setBitsFrom(LowBits);
6547     break;
6548   }
6549   case RISCVISD::GREV:
6550   case RISCVISD::GREVW: {
6551     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6552       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6553       if (Opc == RISCVISD::GREVW)
6554         Known = Known.trunc(32);
6555       unsigned ShAmt = C->getZExtValue();
6556       computeGREV(Known.Zero, ShAmt);
6557       computeGREV(Known.One, ShAmt);
6558       if (Opc == RISCVISD::GREVW)
6559         Known = Known.sext(BitWidth);
6560     }
6561     break;
6562   }
6563   case RISCVISD::READ_VLENB:
6564     // We assume VLENB is at least 16 bytes.
6565     Known.Zero.setLowBits(4);
6566     // We assume VLENB is no more than 65536 / 8 bytes.
6567     Known.Zero.setBitsFrom(14);
6568     break;
6569   case ISD::INTRINSIC_W_CHAIN: {
6570     unsigned IntNo = Op.getConstantOperandVal(1);
6571     switch (IntNo) {
6572     default:
6573       // We can't do anything for most intrinsics.
6574       break;
6575     case Intrinsic::riscv_vsetvli:
6576     case Intrinsic::riscv_vsetvlimax:
6577       // Assume that VL output is positive and would fit in an int32_t.
6578       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6579       if (BitWidth >= 32)
6580         Known.Zero.setBitsFrom(31);
6581       break;
6582     }
6583     break;
6584   }
6585   }
6586 }
6587 
6588 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6589     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6590     unsigned Depth) const {
6591   switch (Op.getOpcode()) {
6592   default:
6593     break;
6594   case RISCVISD::SLLW:
6595   case RISCVISD::SRAW:
6596   case RISCVISD::SRLW:
6597   case RISCVISD::DIVW:
6598   case RISCVISD::DIVUW:
6599   case RISCVISD::REMUW:
6600   case RISCVISD::ROLW:
6601   case RISCVISD::RORW:
6602   case RISCVISD::GREVW:
6603   case RISCVISD::GORCW:
6604   case RISCVISD::FSLW:
6605   case RISCVISD::FSRW:
6606   case RISCVISD::SHFLW:
6607   case RISCVISD::UNSHFLW:
6608   case RISCVISD::BCOMPRESSW:
6609   case RISCVISD::BDECOMPRESSW:
6610   case RISCVISD::FCVT_W_RV64:
6611   case RISCVISD::FCVT_WU_RV64:
6612     // TODO: As the result is sign-extended, this is conservatively correct. A
6613     // more precise answer could be calculated for SRAW depending on known
6614     // bits in the shift amount.
6615     return 33;
6616   case RISCVISD::SHFL:
6617   case RISCVISD::UNSHFL: {
6618     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6619     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6620     // will stay within the upper 32 bits. If there were more than 32 sign bits
6621     // before there will be at least 33 sign bits after.
6622     if (Op.getValueType() == MVT::i64 &&
6623         isa<ConstantSDNode>(Op.getOperand(1)) &&
6624         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6625       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6626       if (Tmp > 32)
6627         return 33;
6628     }
6629     break;
6630   }
6631   case RISCVISD::VMV_X_S:
6632     // The number of sign bits of the scalar result is computed by obtaining the
6633     // element type of the input vector operand, subtracting its width from the
6634     // XLEN, and then adding one (sign bit within the element type). If the
6635     // element type is wider than XLen, the least-significant XLEN bits are
6636     // taken.
6637     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6638       return 1;
6639     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6640   }
6641 
6642   return 1;
6643 }
6644 
6645 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6646                                                   MachineBasicBlock *BB) {
6647   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6648 
6649   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6650   // Should the count have wrapped while it was being read, we need to try
6651   // again.
6652   // ...
6653   // read:
6654   // rdcycleh x3 # load high word of cycle
6655   // rdcycle  x2 # load low word of cycle
6656   // rdcycleh x4 # load high word of cycle
6657   // bne x3, x4, read # check if high word reads match, otherwise try again
6658   // ...
6659 
6660   MachineFunction &MF = *BB->getParent();
6661   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6662   MachineFunction::iterator It = ++BB->getIterator();
6663 
6664   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6665   MF.insert(It, LoopMBB);
6666 
6667   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6668   MF.insert(It, DoneMBB);
6669 
6670   // Transfer the remainder of BB and its successor edges to DoneMBB.
6671   DoneMBB->splice(DoneMBB->begin(), BB,
6672                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6673   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6674 
6675   BB->addSuccessor(LoopMBB);
6676 
6677   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6678   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6679   Register LoReg = MI.getOperand(0).getReg();
6680   Register HiReg = MI.getOperand(1).getReg();
6681   DebugLoc DL = MI.getDebugLoc();
6682 
6683   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6684   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6685       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6686       .addReg(RISCV::X0);
6687   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6688       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6689       .addReg(RISCV::X0);
6690   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6691       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6692       .addReg(RISCV::X0);
6693 
6694   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6695       .addReg(HiReg)
6696       .addReg(ReadAgainReg)
6697       .addMBB(LoopMBB);
6698 
6699   LoopMBB->addSuccessor(LoopMBB);
6700   LoopMBB->addSuccessor(DoneMBB);
6701 
6702   MI.eraseFromParent();
6703 
6704   return DoneMBB;
6705 }
6706 
6707 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6708                                              MachineBasicBlock *BB) {
6709   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6710 
6711   MachineFunction &MF = *BB->getParent();
6712   DebugLoc DL = MI.getDebugLoc();
6713   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6714   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6715   Register LoReg = MI.getOperand(0).getReg();
6716   Register HiReg = MI.getOperand(1).getReg();
6717   Register SrcReg = MI.getOperand(2).getReg();
6718   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6719   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6720 
6721   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6722                           RI);
6723   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6724   MachineMemOperand *MMOLo =
6725       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6726   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6727       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6728   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6729       .addFrameIndex(FI)
6730       .addImm(0)
6731       .addMemOperand(MMOLo);
6732   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6733       .addFrameIndex(FI)
6734       .addImm(4)
6735       .addMemOperand(MMOHi);
6736   MI.eraseFromParent(); // The pseudo instruction is gone now.
6737   return BB;
6738 }
6739 
6740 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6741                                                  MachineBasicBlock *BB) {
6742   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6743          "Unexpected instruction");
6744 
6745   MachineFunction &MF = *BB->getParent();
6746   DebugLoc DL = MI.getDebugLoc();
6747   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6748   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6749   Register DstReg = MI.getOperand(0).getReg();
6750   Register LoReg = MI.getOperand(1).getReg();
6751   Register HiReg = MI.getOperand(2).getReg();
6752   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6753   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6754 
6755   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6756   MachineMemOperand *MMOLo =
6757       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6758   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6759       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6760   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6761       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6762       .addFrameIndex(FI)
6763       .addImm(0)
6764       .addMemOperand(MMOLo);
6765   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6766       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6767       .addFrameIndex(FI)
6768       .addImm(4)
6769       .addMemOperand(MMOHi);
6770   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6771   MI.eraseFromParent(); // The pseudo instruction is gone now.
6772   return BB;
6773 }
6774 
6775 static bool isSelectPseudo(MachineInstr &MI) {
6776   switch (MI.getOpcode()) {
6777   default:
6778     return false;
6779   case RISCV::Select_GPR_Using_CC_GPR:
6780   case RISCV::Select_FPR16_Using_CC_GPR:
6781   case RISCV::Select_FPR32_Using_CC_GPR:
6782   case RISCV::Select_FPR64_Using_CC_GPR:
6783     return true;
6784   }
6785 }
6786 
6787 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6788                                            MachineBasicBlock *BB) {
6789   // To "insert" Select_* instructions, we actually have to insert the triangle
6790   // control-flow pattern.  The incoming instructions know the destination vreg
6791   // to set, the condition code register to branch on, the true/false values to
6792   // select between, and the condcode to use to select the appropriate branch.
6793   //
6794   // We produce the following control flow:
6795   //     HeadMBB
6796   //     |  \
6797   //     |  IfFalseMBB
6798   //     | /
6799   //    TailMBB
6800   //
6801   // When we find a sequence of selects we attempt to optimize their emission
6802   // by sharing the control flow. Currently we only handle cases where we have
6803   // multiple selects with the exact same condition (same LHS, RHS and CC).
6804   // The selects may be interleaved with other instructions if the other
6805   // instructions meet some requirements we deem safe:
6806   // - They are debug instructions. Otherwise,
6807   // - They do not have side-effects, do not access memory and their inputs do
6808   //   not depend on the results of the select pseudo-instructions.
6809   // The TrueV/FalseV operands of the selects cannot depend on the result of
6810   // previous selects in the sequence.
6811   // These conditions could be further relaxed. See the X86 target for a
6812   // related approach and more information.
6813   Register LHS = MI.getOperand(1).getReg();
6814   Register RHS = MI.getOperand(2).getReg();
6815   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6816 
6817   SmallVector<MachineInstr *, 4> SelectDebugValues;
6818   SmallSet<Register, 4> SelectDests;
6819   SelectDests.insert(MI.getOperand(0).getReg());
6820 
6821   MachineInstr *LastSelectPseudo = &MI;
6822 
6823   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6824        SequenceMBBI != E; ++SequenceMBBI) {
6825     if (SequenceMBBI->isDebugInstr())
6826       continue;
6827     else if (isSelectPseudo(*SequenceMBBI)) {
6828       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6829           SequenceMBBI->getOperand(2).getReg() != RHS ||
6830           SequenceMBBI->getOperand(3).getImm() != CC ||
6831           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6832           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6833         break;
6834       LastSelectPseudo = &*SequenceMBBI;
6835       SequenceMBBI->collectDebugValues(SelectDebugValues);
6836       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6837     } else {
6838       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6839           SequenceMBBI->mayLoadOrStore())
6840         break;
6841       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6842             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6843           }))
6844         break;
6845     }
6846   }
6847 
6848   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6849   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6850   DebugLoc DL = MI.getDebugLoc();
6851   MachineFunction::iterator I = ++BB->getIterator();
6852 
6853   MachineBasicBlock *HeadMBB = BB;
6854   MachineFunction *F = BB->getParent();
6855   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6856   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6857 
6858   F->insert(I, IfFalseMBB);
6859   F->insert(I, TailMBB);
6860 
6861   // Transfer debug instructions associated with the selects to TailMBB.
6862   for (MachineInstr *DebugInstr : SelectDebugValues) {
6863     TailMBB->push_back(DebugInstr->removeFromParent());
6864   }
6865 
6866   // Move all instructions after the sequence to TailMBB.
6867   TailMBB->splice(TailMBB->end(), HeadMBB,
6868                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6869   // Update machine-CFG edges by transferring all successors of the current
6870   // block to the new block which will contain the Phi nodes for the selects.
6871   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6872   // Set the successors for HeadMBB.
6873   HeadMBB->addSuccessor(IfFalseMBB);
6874   HeadMBB->addSuccessor(TailMBB);
6875 
6876   // Insert appropriate branch.
6877   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6878 
6879   BuildMI(HeadMBB, DL, TII.get(Opcode))
6880     .addReg(LHS)
6881     .addReg(RHS)
6882     .addMBB(TailMBB);
6883 
6884   // IfFalseMBB just falls through to TailMBB.
6885   IfFalseMBB->addSuccessor(TailMBB);
6886 
6887   // Create PHIs for all of the select pseudo-instructions.
6888   auto SelectMBBI = MI.getIterator();
6889   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6890   auto InsertionPoint = TailMBB->begin();
6891   while (SelectMBBI != SelectEnd) {
6892     auto Next = std::next(SelectMBBI);
6893     if (isSelectPseudo(*SelectMBBI)) {
6894       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6895       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6896               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6897           .addReg(SelectMBBI->getOperand(4).getReg())
6898           .addMBB(HeadMBB)
6899           .addReg(SelectMBBI->getOperand(5).getReg())
6900           .addMBB(IfFalseMBB);
6901       SelectMBBI->eraseFromParent();
6902     }
6903     SelectMBBI = Next;
6904   }
6905 
6906   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6907   return TailMBB;
6908 }
6909 
6910 MachineBasicBlock *
6911 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6912                                                  MachineBasicBlock *BB) const {
6913   switch (MI.getOpcode()) {
6914   default:
6915     llvm_unreachable("Unexpected instr type to insert");
6916   case RISCV::ReadCycleWide:
6917     assert(!Subtarget.is64Bit() &&
6918            "ReadCycleWrite is only to be used on riscv32");
6919     return emitReadCycleWidePseudo(MI, BB);
6920   case RISCV::Select_GPR_Using_CC_GPR:
6921   case RISCV::Select_FPR16_Using_CC_GPR:
6922   case RISCV::Select_FPR32_Using_CC_GPR:
6923   case RISCV::Select_FPR64_Using_CC_GPR:
6924     return emitSelectPseudo(MI, BB);
6925   case RISCV::BuildPairF64Pseudo:
6926     return emitBuildPairF64Pseudo(MI, BB);
6927   case RISCV::SplitF64Pseudo:
6928     return emitSplitF64Pseudo(MI, BB);
6929   }
6930 }
6931 
6932 // Calling Convention Implementation.
6933 // The expectations for frontend ABI lowering vary from target to target.
6934 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6935 // details, but this is a longer term goal. For now, we simply try to keep the
6936 // role of the frontend as simple and well-defined as possible. The rules can
6937 // be summarised as:
6938 // * Never split up large scalar arguments. We handle them here.
6939 // * If a hardfloat calling convention is being used, and the struct may be
6940 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6941 // available, then pass as two separate arguments. If either the GPRs or FPRs
6942 // are exhausted, then pass according to the rule below.
6943 // * If a struct could never be passed in registers or directly in a stack
6944 // slot (as it is larger than 2*XLEN and the floating point rules don't
6945 // apply), then pass it using a pointer with the byval attribute.
6946 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6947 // word-sized array or a 2*XLEN scalar (depending on alignment).
6948 // * The frontend can determine whether a struct is returned by reference or
6949 // not based on its size and fields. If it will be returned by reference, the
6950 // frontend must modify the prototype so a pointer with the sret annotation is
6951 // passed as the first argument. This is not necessary for large scalar
6952 // returns.
6953 // * Struct return values and varargs should be coerced to structs containing
6954 // register-size fields in the same situations they would be for fixed
6955 // arguments.
6956 
6957 static const MCPhysReg ArgGPRs[] = {
6958   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6959   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6960 };
6961 static const MCPhysReg ArgFPR16s[] = {
6962   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6963   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6964 };
6965 static const MCPhysReg ArgFPR32s[] = {
6966   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6967   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6968 };
6969 static const MCPhysReg ArgFPR64s[] = {
6970   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6971   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6972 };
6973 // This is an interim calling convention and it may be changed in the future.
6974 static const MCPhysReg ArgVRs[] = {
6975     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6976     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6977     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6978 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6979                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6980                                      RISCV::V20M2, RISCV::V22M2};
6981 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6982                                      RISCV::V20M4};
6983 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6984 
6985 // Pass a 2*XLEN argument that has been split into two XLEN values through
6986 // registers or the stack as necessary.
6987 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6988                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6989                                 MVT ValVT2, MVT LocVT2,
6990                                 ISD::ArgFlagsTy ArgFlags2) {
6991   unsigned XLenInBytes = XLen / 8;
6992   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6993     // At least one half can be passed via register.
6994     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6995                                      VA1.getLocVT(), CCValAssign::Full));
6996   } else {
6997     // Both halves must be passed on the stack, with proper alignment.
6998     Align StackAlign =
6999         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
7000     State.addLoc(
7001         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
7002                             State.AllocateStack(XLenInBytes, StackAlign),
7003                             VA1.getLocVT(), CCValAssign::Full));
7004     State.addLoc(CCValAssign::getMem(
7005         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7006         LocVT2, CCValAssign::Full));
7007     return false;
7008   }
7009 
7010   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7011     // The second half can also be passed via register.
7012     State.addLoc(
7013         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
7014   } else {
7015     // The second half is passed via the stack, without additional alignment.
7016     State.addLoc(CCValAssign::getMem(
7017         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7018         LocVT2, CCValAssign::Full));
7019   }
7020 
7021   return false;
7022 }
7023 
7024 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
7025                                Optional<unsigned> FirstMaskArgument,
7026                                CCState &State, const RISCVTargetLowering &TLI) {
7027   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
7028   if (RC == &RISCV::VRRegClass) {
7029     // Assign the first mask argument to V0.
7030     // This is an interim calling convention and it may be changed in the
7031     // future.
7032     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
7033       return State.AllocateReg(RISCV::V0);
7034     return State.AllocateReg(ArgVRs);
7035   }
7036   if (RC == &RISCV::VRM2RegClass)
7037     return State.AllocateReg(ArgVRM2s);
7038   if (RC == &RISCV::VRM4RegClass)
7039     return State.AllocateReg(ArgVRM4s);
7040   if (RC == &RISCV::VRM8RegClass)
7041     return State.AllocateReg(ArgVRM8s);
7042   llvm_unreachable("Unhandled register class for ValueType");
7043 }
7044 
7045 // Implements the RISC-V calling convention. Returns true upon failure.
7046 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
7047                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
7048                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
7049                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
7050                      Optional<unsigned> FirstMaskArgument) {
7051   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
7052   assert(XLen == 32 || XLen == 64);
7053   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
7054 
7055   // Any return value split in to more than two values can't be returned
7056   // directly. Vectors are returned via the available vector registers.
7057   if (!LocVT.isVector() && IsRet && ValNo > 1)
7058     return true;
7059 
7060   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
7061   // variadic argument, or if no F16/F32 argument registers are available.
7062   bool UseGPRForF16_F32 = true;
7063   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
7064   // variadic argument, or if no F64 argument registers are available.
7065   bool UseGPRForF64 = true;
7066 
7067   switch (ABI) {
7068   default:
7069     llvm_unreachable("Unexpected ABI");
7070   case RISCVABI::ABI_ILP32:
7071   case RISCVABI::ABI_LP64:
7072     break;
7073   case RISCVABI::ABI_ILP32F:
7074   case RISCVABI::ABI_LP64F:
7075     UseGPRForF16_F32 = !IsFixed;
7076     break;
7077   case RISCVABI::ABI_ILP32D:
7078   case RISCVABI::ABI_LP64D:
7079     UseGPRForF16_F32 = !IsFixed;
7080     UseGPRForF64 = !IsFixed;
7081     break;
7082   }
7083 
7084   // FPR16, FPR32, and FPR64 alias each other.
7085   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
7086     UseGPRForF16_F32 = true;
7087     UseGPRForF64 = true;
7088   }
7089 
7090   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
7091   // similar local variables rather than directly checking against the target
7092   // ABI.
7093 
7094   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
7095     LocVT = XLenVT;
7096     LocInfo = CCValAssign::BCvt;
7097   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
7098     LocVT = MVT::i64;
7099     LocInfo = CCValAssign::BCvt;
7100   }
7101 
7102   // If this is a variadic argument, the RISC-V calling convention requires
7103   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
7104   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
7105   // be used regardless of whether the original argument was split during
7106   // legalisation or not. The argument will not be passed by registers if the
7107   // original type is larger than 2*XLEN, so the register alignment rule does
7108   // not apply.
7109   unsigned TwoXLenInBytes = (2 * XLen) / 8;
7110   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
7111       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
7112     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
7113     // Skip 'odd' register if necessary.
7114     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
7115       State.AllocateReg(ArgGPRs);
7116   }
7117 
7118   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
7119   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
7120       State.getPendingArgFlags();
7121 
7122   assert(PendingLocs.size() == PendingArgFlags.size() &&
7123          "PendingLocs and PendingArgFlags out of sync");
7124 
7125   // Handle passing f64 on RV32D with a soft float ABI or when floating point
7126   // registers are exhausted.
7127   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
7128     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
7129            "Can't lower f64 if it is split");
7130     // Depending on available argument GPRS, f64 may be passed in a pair of
7131     // GPRs, split between a GPR and the stack, or passed completely on the
7132     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
7133     // cases.
7134     Register Reg = State.AllocateReg(ArgGPRs);
7135     LocVT = MVT::i32;
7136     if (!Reg) {
7137       unsigned StackOffset = State.AllocateStack(8, Align(8));
7138       State.addLoc(
7139           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7140       return false;
7141     }
7142     if (!State.AllocateReg(ArgGPRs))
7143       State.AllocateStack(4, Align(4));
7144     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7145     return false;
7146   }
7147 
7148   // Fixed-length vectors are located in the corresponding scalable-vector
7149   // container types.
7150   if (ValVT.isFixedLengthVector())
7151     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7152 
7153   // Split arguments might be passed indirectly, so keep track of the pending
7154   // values. Split vectors are passed via a mix of registers and indirectly, so
7155   // treat them as we would any other argument.
7156   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
7157     LocVT = XLenVT;
7158     LocInfo = CCValAssign::Indirect;
7159     PendingLocs.push_back(
7160         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
7161     PendingArgFlags.push_back(ArgFlags);
7162     if (!ArgFlags.isSplitEnd()) {
7163       return false;
7164     }
7165   }
7166 
7167   // If the split argument only had two elements, it should be passed directly
7168   // in registers or on the stack.
7169   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
7170       PendingLocs.size() <= 2) {
7171     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
7172     // Apply the normal calling convention rules to the first half of the
7173     // split argument.
7174     CCValAssign VA = PendingLocs[0];
7175     ISD::ArgFlagsTy AF = PendingArgFlags[0];
7176     PendingLocs.clear();
7177     PendingArgFlags.clear();
7178     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
7179                                ArgFlags);
7180   }
7181 
7182   // Allocate to a register if possible, or else a stack slot.
7183   Register Reg;
7184   unsigned StoreSizeBytes = XLen / 8;
7185   Align StackAlign = Align(XLen / 8);
7186 
7187   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
7188     Reg = State.AllocateReg(ArgFPR16s);
7189   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
7190     Reg = State.AllocateReg(ArgFPR32s);
7191   else if (ValVT == MVT::f64 && !UseGPRForF64)
7192     Reg = State.AllocateReg(ArgFPR64s);
7193   else if (ValVT.isVector()) {
7194     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
7195     if (!Reg) {
7196       // For return values, the vector must be passed fully via registers or
7197       // via the stack.
7198       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
7199       // but we're using all of them.
7200       if (IsRet)
7201         return true;
7202       // Try using a GPR to pass the address
7203       if ((Reg = State.AllocateReg(ArgGPRs))) {
7204         LocVT = XLenVT;
7205         LocInfo = CCValAssign::Indirect;
7206       } else if (ValVT.isScalableVector()) {
7207         report_fatal_error("Unable to pass scalable vector types on the stack");
7208       } else {
7209         // Pass fixed-length vectors on the stack.
7210         LocVT = ValVT;
7211         StoreSizeBytes = ValVT.getStoreSize();
7212         // Align vectors to their element sizes, being careful for vXi1
7213         // vectors.
7214         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7215       }
7216     }
7217   } else {
7218     Reg = State.AllocateReg(ArgGPRs);
7219   }
7220 
7221   unsigned StackOffset =
7222       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7223 
7224   // If we reach this point and PendingLocs is non-empty, we must be at the
7225   // end of a split argument that must be passed indirectly.
7226   if (!PendingLocs.empty()) {
7227     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
7228     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
7229 
7230     for (auto &It : PendingLocs) {
7231       if (Reg)
7232         It.convertToReg(Reg);
7233       else
7234         It.convertToMem(StackOffset);
7235       State.addLoc(It);
7236     }
7237     PendingLocs.clear();
7238     PendingArgFlags.clear();
7239     return false;
7240   }
7241 
7242   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
7243           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
7244          "Expected an XLenVT or vector types at this stage");
7245 
7246   if (Reg) {
7247     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7248     return false;
7249   }
7250 
7251   // When a floating-point value is passed on the stack, no bit-conversion is
7252   // needed.
7253   if (ValVT.isFloatingPoint()) {
7254     LocVT = ValVT;
7255     LocInfo = CCValAssign::Full;
7256   }
7257   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7258   return false;
7259 }
7260 
7261 template <typename ArgTy>
7262 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
7263   for (const auto &ArgIdx : enumerate(Args)) {
7264     MVT ArgVT = ArgIdx.value().VT;
7265     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
7266       return ArgIdx.index();
7267   }
7268   return None;
7269 }
7270 
7271 void RISCVTargetLowering::analyzeInputArgs(
7272     MachineFunction &MF, CCState &CCInfo,
7273     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
7274     RISCVCCAssignFn Fn) const {
7275   unsigned NumArgs = Ins.size();
7276   FunctionType *FType = MF.getFunction().getFunctionType();
7277 
7278   Optional<unsigned> FirstMaskArgument;
7279   if (Subtarget.hasStdExtV())
7280     FirstMaskArgument = preAssignMask(Ins);
7281 
7282   for (unsigned i = 0; i != NumArgs; ++i) {
7283     MVT ArgVT = Ins[i].VT;
7284     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
7285 
7286     Type *ArgTy = nullptr;
7287     if (IsRet)
7288       ArgTy = FType->getReturnType();
7289     else if (Ins[i].isOrigArg())
7290       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7291 
7292     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7293     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7294            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7295            FirstMaskArgument)) {
7296       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7297                         << EVT(ArgVT).getEVTString() << '\n');
7298       llvm_unreachable(nullptr);
7299     }
7300   }
7301 }
7302 
7303 void RISCVTargetLowering::analyzeOutputArgs(
7304     MachineFunction &MF, CCState &CCInfo,
7305     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7306     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7307   unsigned NumArgs = Outs.size();
7308 
7309   Optional<unsigned> FirstMaskArgument;
7310   if (Subtarget.hasStdExtV())
7311     FirstMaskArgument = preAssignMask(Outs);
7312 
7313   for (unsigned i = 0; i != NumArgs; i++) {
7314     MVT ArgVT = Outs[i].VT;
7315     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7316     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7317 
7318     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7319     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7320            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7321            FirstMaskArgument)) {
7322       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7323                         << EVT(ArgVT).getEVTString() << "\n");
7324       llvm_unreachable(nullptr);
7325     }
7326   }
7327 }
7328 
7329 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7330 // values.
7331 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7332                                    const CCValAssign &VA, const SDLoc &DL,
7333                                    const RISCVSubtarget &Subtarget) {
7334   switch (VA.getLocInfo()) {
7335   default:
7336     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7337   case CCValAssign::Full:
7338     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7339       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7340     break;
7341   case CCValAssign::BCvt:
7342     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7343       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7344     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7345       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7346     else
7347       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7348     break;
7349   }
7350   return Val;
7351 }
7352 
7353 // The caller is responsible for loading the full value if the argument is
7354 // passed with CCValAssign::Indirect.
7355 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7356                                 const CCValAssign &VA, const SDLoc &DL,
7357                                 const RISCVTargetLowering &TLI) {
7358   MachineFunction &MF = DAG.getMachineFunction();
7359   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7360   EVT LocVT = VA.getLocVT();
7361   SDValue Val;
7362   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7363   Register VReg = RegInfo.createVirtualRegister(RC);
7364   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7365   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7366 
7367   if (VA.getLocInfo() == CCValAssign::Indirect)
7368     return Val;
7369 
7370   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7371 }
7372 
7373 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7374                                    const CCValAssign &VA, const SDLoc &DL,
7375                                    const RISCVSubtarget &Subtarget) {
7376   EVT LocVT = VA.getLocVT();
7377 
7378   switch (VA.getLocInfo()) {
7379   default:
7380     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7381   case CCValAssign::Full:
7382     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7383       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7384     break;
7385   case CCValAssign::BCvt:
7386     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7387       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7388     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7389       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7390     else
7391       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7392     break;
7393   }
7394   return Val;
7395 }
7396 
7397 // The caller is responsible for loading the full value if the argument is
7398 // passed with CCValAssign::Indirect.
7399 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7400                                 const CCValAssign &VA, const SDLoc &DL) {
7401   MachineFunction &MF = DAG.getMachineFunction();
7402   MachineFrameInfo &MFI = MF.getFrameInfo();
7403   EVT LocVT = VA.getLocVT();
7404   EVT ValVT = VA.getValVT();
7405   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7406   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7407                                  /*Immutable=*/true);
7408   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7409   SDValue Val;
7410 
7411   ISD::LoadExtType ExtType;
7412   switch (VA.getLocInfo()) {
7413   default:
7414     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7415   case CCValAssign::Full:
7416   case CCValAssign::Indirect:
7417   case CCValAssign::BCvt:
7418     ExtType = ISD::NON_EXTLOAD;
7419     break;
7420   }
7421   Val = DAG.getExtLoad(
7422       ExtType, DL, LocVT, Chain, FIN,
7423       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7424   return Val;
7425 }
7426 
7427 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7428                                        const CCValAssign &VA, const SDLoc &DL) {
7429   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7430          "Unexpected VA");
7431   MachineFunction &MF = DAG.getMachineFunction();
7432   MachineFrameInfo &MFI = MF.getFrameInfo();
7433   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7434 
7435   if (VA.isMemLoc()) {
7436     // f64 is passed on the stack.
7437     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7438     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7439     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7440                        MachinePointerInfo::getFixedStack(MF, FI));
7441   }
7442 
7443   assert(VA.isRegLoc() && "Expected register VA assignment");
7444 
7445   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7446   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7447   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7448   SDValue Hi;
7449   if (VA.getLocReg() == RISCV::X17) {
7450     // Second half of f64 is passed on the stack.
7451     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7452     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7453     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7454                      MachinePointerInfo::getFixedStack(MF, FI));
7455   } else {
7456     // Second half of f64 is passed in another GPR.
7457     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7458     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7459     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7460   }
7461   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7462 }
7463 
7464 // FastCC has less than 1% performance improvement for some particular
7465 // benchmark. But theoretically, it may has benenfit for some cases.
7466 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7467                             unsigned ValNo, MVT ValVT, MVT LocVT,
7468                             CCValAssign::LocInfo LocInfo,
7469                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7470                             bool IsFixed, bool IsRet, Type *OrigTy,
7471                             const RISCVTargetLowering &TLI,
7472                             Optional<unsigned> FirstMaskArgument) {
7473 
7474   // X5 and X6 might be used for save-restore libcall.
7475   static const MCPhysReg GPRList[] = {
7476       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7477       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7478       RISCV::X29, RISCV::X30, RISCV::X31};
7479 
7480   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7481     if (unsigned Reg = State.AllocateReg(GPRList)) {
7482       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7483       return false;
7484     }
7485   }
7486 
7487   if (LocVT == MVT::f16) {
7488     static const MCPhysReg FPR16List[] = {
7489         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7490         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7491         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7492         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7493     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7494       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7495       return false;
7496     }
7497   }
7498 
7499   if (LocVT == MVT::f32) {
7500     static const MCPhysReg FPR32List[] = {
7501         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7502         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7503         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7504         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7505     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7506       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7507       return false;
7508     }
7509   }
7510 
7511   if (LocVT == MVT::f64) {
7512     static const MCPhysReg FPR64List[] = {
7513         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7514         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7515         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7516         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7517     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7518       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7519       return false;
7520     }
7521   }
7522 
7523   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7524     unsigned Offset4 = State.AllocateStack(4, Align(4));
7525     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7526     return false;
7527   }
7528 
7529   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7530     unsigned Offset5 = State.AllocateStack(8, Align(8));
7531     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7532     return false;
7533   }
7534 
7535   if (LocVT.isVector()) {
7536     if (unsigned Reg =
7537             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7538       // Fixed-length vectors are located in the corresponding scalable-vector
7539       // container types.
7540       if (ValVT.isFixedLengthVector())
7541         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7542       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7543     } else {
7544       // Try and pass the address via a "fast" GPR.
7545       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7546         LocInfo = CCValAssign::Indirect;
7547         LocVT = TLI.getSubtarget().getXLenVT();
7548         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7549       } else if (ValVT.isFixedLengthVector()) {
7550         auto StackAlign =
7551             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7552         unsigned StackOffset =
7553             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7554         State.addLoc(
7555             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7556       } else {
7557         // Can't pass scalable vectors on the stack.
7558         return true;
7559       }
7560     }
7561 
7562     return false;
7563   }
7564 
7565   return true; // CC didn't match.
7566 }
7567 
7568 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7569                          CCValAssign::LocInfo LocInfo,
7570                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7571 
7572   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7573     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7574     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7575     static const MCPhysReg GPRList[] = {
7576         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7577         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7578     if (unsigned Reg = State.AllocateReg(GPRList)) {
7579       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7580       return false;
7581     }
7582   }
7583 
7584   if (LocVT == MVT::f32) {
7585     // Pass in STG registers: F1, ..., F6
7586     //                        fs0 ... fs5
7587     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7588                                           RISCV::F18_F, RISCV::F19_F,
7589                                           RISCV::F20_F, RISCV::F21_F};
7590     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7591       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7592       return false;
7593     }
7594   }
7595 
7596   if (LocVT == MVT::f64) {
7597     // Pass in STG registers: D1, ..., D6
7598     //                        fs6 ... fs11
7599     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7600                                           RISCV::F24_D, RISCV::F25_D,
7601                                           RISCV::F26_D, RISCV::F27_D};
7602     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7603       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7604       return false;
7605     }
7606   }
7607 
7608   report_fatal_error("No registers left in GHC calling convention");
7609   return true;
7610 }
7611 
7612 // Transform physical registers into virtual registers.
7613 SDValue RISCVTargetLowering::LowerFormalArguments(
7614     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7615     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7616     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7617 
7618   MachineFunction &MF = DAG.getMachineFunction();
7619 
7620   switch (CallConv) {
7621   default:
7622     report_fatal_error("Unsupported calling convention");
7623   case CallingConv::C:
7624   case CallingConv::Fast:
7625     break;
7626   case CallingConv::GHC:
7627     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7628         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7629       report_fatal_error(
7630         "GHC calling convention requires the F and D instruction set extensions");
7631   }
7632 
7633   const Function &Func = MF.getFunction();
7634   if (Func.hasFnAttribute("interrupt")) {
7635     if (!Func.arg_empty())
7636       report_fatal_error(
7637         "Functions with the interrupt attribute cannot have arguments!");
7638 
7639     StringRef Kind =
7640       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7641 
7642     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7643       report_fatal_error(
7644         "Function interrupt attribute argument not supported!");
7645   }
7646 
7647   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7648   MVT XLenVT = Subtarget.getXLenVT();
7649   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7650   // Used with vargs to acumulate store chains.
7651   std::vector<SDValue> OutChains;
7652 
7653   // Assign locations to all of the incoming arguments.
7654   SmallVector<CCValAssign, 16> ArgLocs;
7655   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7656 
7657   if (CallConv == CallingConv::GHC)
7658     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7659   else
7660     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7661                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7662                                                    : CC_RISCV);
7663 
7664   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7665     CCValAssign &VA = ArgLocs[i];
7666     SDValue ArgValue;
7667     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7668     // case.
7669     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7670       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7671     else if (VA.isRegLoc())
7672       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7673     else
7674       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7675 
7676     if (VA.getLocInfo() == CCValAssign::Indirect) {
7677       // If the original argument was split and passed by reference (e.g. i128
7678       // on RV32), we need to load all parts of it here (using the same
7679       // address). Vectors may be partly split to registers and partly to the
7680       // stack, in which case the base address is partly offset and subsequent
7681       // stores are relative to that.
7682       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7683                                    MachinePointerInfo()));
7684       unsigned ArgIndex = Ins[i].OrigArgIndex;
7685       unsigned ArgPartOffset = Ins[i].PartOffset;
7686       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7687       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7688         CCValAssign &PartVA = ArgLocs[i + 1];
7689         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7690         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7691         if (PartVA.getValVT().isScalableVector())
7692           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7693         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7694         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7695                                      MachinePointerInfo()));
7696         ++i;
7697       }
7698       continue;
7699     }
7700     InVals.push_back(ArgValue);
7701   }
7702 
7703   if (IsVarArg) {
7704     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7705     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7706     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7707     MachineFrameInfo &MFI = MF.getFrameInfo();
7708     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7709     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7710 
7711     // Offset of the first variable argument from stack pointer, and size of
7712     // the vararg save area. For now, the varargs save area is either zero or
7713     // large enough to hold a0-a7.
7714     int VaArgOffset, VarArgsSaveSize;
7715 
7716     // If all registers are allocated, then all varargs must be passed on the
7717     // stack and we don't need to save any argregs.
7718     if (ArgRegs.size() == Idx) {
7719       VaArgOffset = CCInfo.getNextStackOffset();
7720       VarArgsSaveSize = 0;
7721     } else {
7722       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7723       VaArgOffset = -VarArgsSaveSize;
7724     }
7725 
7726     // Record the frame index of the first variable argument
7727     // which is a value necessary to VASTART.
7728     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7729     RVFI->setVarArgsFrameIndex(FI);
7730 
7731     // If saving an odd number of registers then create an extra stack slot to
7732     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7733     // offsets to even-numbered registered remain 2*XLEN-aligned.
7734     if (Idx % 2) {
7735       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7736       VarArgsSaveSize += XLenInBytes;
7737     }
7738 
7739     // Copy the integer registers that may have been used for passing varargs
7740     // to the vararg save area.
7741     for (unsigned I = Idx; I < ArgRegs.size();
7742          ++I, VaArgOffset += XLenInBytes) {
7743       const Register Reg = RegInfo.createVirtualRegister(RC);
7744       RegInfo.addLiveIn(ArgRegs[I], Reg);
7745       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7746       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7747       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7748       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7749                                    MachinePointerInfo::getFixedStack(MF, FI));
7750       cast<StoreSDNode>(Store.getNode())
7751           ->getMemOperand()
7752           ->setValue((Value *)nullptr);
7753       OutChains.push_back(Store);
7754     }
7755     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7756   }
7757 
7758   // All stores are grouped in one node to allow the matching between
7759   // the size of Ins and InVals. This only happens for vararg functions.
7760   if (!OutChains.empty()) {
7761     OutChains.push_back(Chain);
7762     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7763   }
7764 
7765   return Chain;
7766 }
7767 
7768 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7769 /// for tail call optimization.
7770 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7771 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7772     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7773     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7774 
7775   auto &Callee = CLI.Callee;
7776   auto CalleeCC = CLI.CallConv;
7777   auto &Outs = CLI.Outs;
7778   auto &Caller = MF.getFunction();
7779   auto CallerCC = Caller.getCallingConv();
7780 
7781   // Exception-handling functions need a special set of instructions to
7782   // indicate a return to the hardware. Tail-calling another function would
7783   // probably break this.
7784   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7785   // should be expanded as new function attributes are introduced.
7786   if (Caller.hasFnAttribute("interrupt"))
7787     return false;
7788 
7789   // Do not tail call opt if the stack is used to pass parameters.
7790   if (CCInfo.getNextStackOffset() != 0)
7791     return false;
7792 
7793   // Do not tail call opt if any parameters need to be passed indirectly.
7794   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7795   // passed indirectly. So the address of the value will be passed in a
7796   // register, or if not available, then the address is put on the stack. In
7797   // order to pass indirectly, space on the stack often needs to be allocated
7798   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7799   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7800   // are passed CCValAssign::Indirect.
7801   for (auto &VA : ArgLocs)
7802     if (VA.getLocInfo() == CCValAssign::Indirect)
7803       return false;
7804 
7805   // Do not tail call opt if either caller or callee uses struct return
7806   // semantics.
7807   auto IsCallerStructRet = Caller.hasStructRetAttr();
7808   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7809   if (IsCallerStructRet || IsCalleeStructRet)
7810     return false;
7811 
7812   // Externally-defined functions with weak linkage should not be
7813   // tail-called. The behaviour of branch instructions in this situation (as
7814   // used for tail calls) is implementation-defined, so we cannot rely on the
7815   // linker replacing the tail call with a return.
7816   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7817     const GlobalValue *GV = G->getGlobal();
7818     if (GV->hasExternalWeakLinkage())
7819       return false;
7820   }
7821 
7822   // The callee has to preserve all registers the caller needs to preserve.
7823   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7824   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7825   if (CalleeCC != CallerCC) {
7826     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7827     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7828       return false;
7829   }
7830 
7831   // Byval parameters hand the function a pointer directly into the stack area
7832   // we want to reuse during a tail call. Working around this *is* possible
7833   // but less efficient and uglier in LowerCall.
7834   for (auto &Arg : Outs)
7835     if (Arg.Flags.isByVal())
7836       return false;
7837 
7838   return true;
7839 }
7840 
7841 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7842   return DAG.getDataLayout().getPrefTypeAlign(
7843       VT.getTypeForEVT(*DAG.getContext()));
7844 }
7845 
7846 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7847 // and output parameter nodes.
7848 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7849                                        SmallVectorImpl<SDValue> &InVals) const {
7850   SelectionDAG &DAG = CLI.DAG;
7851   SDLoc &DL = CLI.DL;
7852   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7853   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7854   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7855   SDValue Chain = CLI.Chain;
7856   SDValue Callee = CLI.Callee;
7857   bool &IsTailCall = CLI.IsTailCall;
7858   CallingConv::ID CallConv = CLI.CallConv;
7859   bool IsVarArg = CLI.IsVarArg;
7860   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7861   MVT XLenVT = Subtarget.getXLenVT();
7862 
7863   MachineFunction &MF = DAG.getMachineFunction();
7864 
7865   // Analyze the operands of the call, assigning locations to each operand.
7866   SmallVector<CCValAssign, 16> ArgLocs;
7867   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7868 
7869   if (CallConv == CallingConv::GHC)
7870     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7871   else
7872     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7873                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7874                                                     : CC_RISCV);
7875 
7876   // Check if it's really possible to do a tail call.
7877   if (IsTailCall)
7878     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7879 
7880   if (IsTailCall)
7881     ++NumTailCalls;
7882   else if (CLI.CB && CLI.CB->isMustTailCall())
7883     report_fatal_error("failed to perform tail call elimination on a call "
7884                        "site marked musttail");
7885 
7886   // Get a count of how many bytes are to be pushed on the stack.
7887   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7888 
7889   // Create local copies for byval args
7890   SmallVector<SDValue, 8> ByValArgs;
7891   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7892     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7893     if (!Flags.isByVal())
7894       continue;
7895 
7896     SDValue Arg = OutVals[i];
7897     unsigned Size = Flags.getByValSize();
7898     Align Alignment = Flags.getNonZeroByValAlign();
7899 
7900     int FI =
7901         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7902     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7903     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7904 
7905     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7906                           /*IsVolatile=*/false,
7907                           /*AlwaysInline=*/false, IsTailCall,
7908                           MachinePointerInfo(), MachinePointerInfo());
7909     ByValArgs.push_back(FIPtr);
7910   }
7911 
7912   if (!IsTailCall)
7913     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7914 
7915   // Copy argument values to their designated locations.
7916   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7917   SmallVector<SDValue, 8> MemOpChains;
7918   SDValue StackPtr;
7919   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7920     CCValAssign &VA = ArgLocs[i];
7921     SDValue ArgValue = OutVals[i];
7922     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7923 
7924     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7925     bool IsF64OnRV32DSoftABI =
7926         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7927     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7928       SDValue SplitF64 = DAG.getNode(
7929           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7930       SDValue Lo = SplitF64.getValue(0);
7931       SDValue Hi = SplitF64.getValue(1);
7932 
7933       Register RegLo = VA.getLocReg();
7934       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7935 
7936       if (RegLo == RISCV::X17) {
7937         // Second half of f64 is passed on the stack.
7938         // Work out the address of the stack slot.
7939         if (!StackPtr.getNode())
7940           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7941         // Emit the store.
7942         MemOpChains.push_back(
7943             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7944       } else {
7945         // Second half of f64 is passed in another GPR.
7946         assert(RegLo < RISCV::X31 && "Invalid register pair");
7947         Register RegHigh = RegLo + 1;
7948         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7949       }
7950       continue;
7951     }
7952 
7953     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7954     // as any other MemLoc.
7955 
7956     // Promote the value if needed.
7957     // For now, only handle fully promoted and indirect arguments.
7958     if (VA.getLocInfo() == CCValAssign::Indirect) {
7959       // Store the argument in a stack slot and pass its address.
7960       Align StackAlign =
7961           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7962                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7963       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7964       // If the original argument was split (e.g. i128), we need
7965       // to store the required parts of it here (and pass just one address).
7966       // Vectors may be partly split to registers and partly to the stack, in
7967       // which case the base address is partly offset and subsequent stores are
7968       // relative to that.
7969       unsigned ArgIndex = Outs[i].OrigArgIndex;
7970       unsigned ArgPartOffset = Outs[i].PartOffset;
7971       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7972       // Calculate the total size to store. We don't have access to what we're
7973       // actually storing other than performing the loop and collecting the
7974       // info.
7975       SmallVector<std::pair<SDValue, SDValue>> Parts;
7976       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7977         SDValue PartValue = OutVals[i + 1];
7978         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7979         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7980         EVT PartVT = PartValue.getValueType();
7981         if (PartVT.isScalableVector())
7982           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7983         StoredSize += PartVT.getStoreSize();
7984         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7985         Parts.push_back(std::make_pair(PartValue, Offset));
7986         ++i;
7987       }
7988       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7989       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7990       MemOpChains.push_back(
7991           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7992                        MachinePointerInfo::getFixedStack(MF, FI)));
7993       for (const auto &Part : Parts) {
7994         SDValue PartValue = Part.first;
7995         SDValue PartOffset = Part.second;
7996         SDValue Address =
7997             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
7998         MemOpChains.push_back(
7999             DAG.getStore(Chain, DL, PartValue, Address,
8000                          MachinePointerInfo::getFixedStack(MF, FI)));
8001       }
8002       ArgValue = SpillSlot;
8003     } else {
8004       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
8005     }
8006 
8007     // Use local copy if it is a byval arg.
8008     if (Flags.isByVal())
8009       ArgValue = ByValArgs[j++];
8010 
8011     if (VA.isRegLoc()) {
8012       // Queue up the argument copies and emit them at the end.
8013       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
8014     } else {
8015       assert(VA.isMemLoc() && "Argument not register or memory");
8016       assert(!IsTailCall && "Tail call not allowed if stack is used "
8017                             "for passing parameters");
8018 
8019       // Work out the address of the stack slot.
8020       if (!StackPtr.getNode())
8021         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8022       SDValue Address =
8023           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
8024                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
8025 
8026       // Emit the store.
8027       MemOpChains.push_back(
8028           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
8029     }
8030   }
8031 
8032   // Join the stores, which are independent of one another.
8033   if (!MemOpChains.empty())
8034     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
8035 
8036   SDValue Glue;
8037 
8038   // Build a sequence of copy-to-reg nodes, chained and glued together.
8039   for (auto &Reg : RegsToPass) {
8040     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
8041     Glue = Chain.getValue(1);
8042   }
8043 
8044   // Validate that none of the argument registers have been marked as
8045   // reserved, if so report an error. Do the same for the return address if this
8046   // is not a tailcall.
8047   validateCCReservedRegs(RegsToPass, MF);
8048   if (!IsTailCall &&
8049       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
8050     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8051         MF.getFunction(),
8052         "Return address register required, but has been reserved."});
8053 
8054   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
8055   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
8056   // split it and then direct call can be matched by PseudoCALL.
8057   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
8058     const GlobalValue *GV = S->getGlobal();
8059 
8060     unsigned OpFlags = RISCVII::MO_CALL;
8061     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
8062       OpFlags = RISCVII::MO_PLT;
8063 
8064     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
8065   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
8066     unsigned OpFlags = RISCVII::MO_CALL;
8067 
8068     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
8069                                                  nullptr))
8070       OpFlags = RISCVII::MO_PLT;
8071 
8072     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
8073   }
8074 
8075   // The first call operand is the chain and the second is the target address.
8076   SmallVector<SDValue, 8> Ops;
8077   Ops.push_back(Chain);
8078   Ops.push_back(Callee);
8079 
8080   // Add argument registers to the end of the list so that they are
8081   // known live into the call.
8082   for (auto &Reg : RegsToPass)
8083     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
8084 
8085   if (!IsTailCall) {
8086     // Add a register mask operand representing the call-preserved registers.
8087     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
8088     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
8089     assert(Mask && "Missing call preserved mask for calling convention");
8090     Ops.push_back(DAG.getRegisterMask(Mask));
8091   }
8092 
8093   // Glue the call to the argument copies, if any.
8094   if (Glue.getNode())
8095     Ops.push_back(Glue);
8096 
8097   // Emit the call.
8098   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8099 
8100   if (IsTailCall) {
8101     MF.getFrameInfo().setHasTailCall();
8102     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
8103   }
8104 
8105   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
8106   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
8107   Glue = Chain.getValue(1);
8108 
8109   // Mark the end of the call, which is glued to the call itself.
8110   Chain = DAG.getCALLSEQ_END(Chain,
8111                              DAG.getConstant(NumBytes, DL, PtrVT, true),
8112                              DAG.getConstant(0, DL, PtrVT, true),
8113                              Glue, DL);
8114   Glue = Chain.getValue(1);
8115 
8116   // Assign locations to each value returned by this call.
8117   SmallVector<CCValAssign, 16> RVLocs;
8118   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
8119   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
8120 
8121   // Copy all of the result registers out of their specified physreg.
8122   for (auto &VA : RVLocs) {
8123     // Copy the value out
8124     SDValue RetValue =
8125         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
8126     // Glue the RetValue to the end of the call sequence
8127     Chain = RetValue.getValue(1);
8128     Glue = RetValue.getValue(2);
8129 
8130     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8131       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
8132       SDValue RetValue2 =
8133           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
8134       Chain = RetValue2.getValue(1);
8135       Glue = RetValue2.getValue(2);
8136       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8137                              RetValue2);
8138     }
8139 
8140     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8141 
8142     InVals.push_back(RetValue);
8143   }
8144 
8145   return Chain;
8146 }
8147 
8148 bool RISCVTargetLowering::CanLowerReturn(
8149     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
8150     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
8151   SmallVector<CCValAssign, 16> RVLocs;
8152   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8153 
8154   Optional<unsigned> FirstMaskArgument;
8155   if (Subtarget.hasStdExtV())
8156     FirstMaskArgument = preAssignMask(Outs);
8157 
8158   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8159     MVT VT = Outs[i].VT;
8160     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8161     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8162     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
8163                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
8164                  *this, FirstMaskArgument))
8165       return false;
8166   }
8167   return true;
8168 }
8169 
8170 SDValue
8171 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
8172                                  bool IsVarArg,
8173                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
8174                                  const SmallVectorImpl<SDValue> &OutVals,
8175                                  const SDLoc &DL, SelectionDAG &DAG) const {
8176   const MachineFunction &MF = DAG.getMachineFunction();
8177   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8178 
8179   // Stores the assignment of the return value to a location.
8180   SmallVector<CCValAssign, 16> RVLocs;
8181 
8182   // Info about the registers and stack slot.
8183   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
8184                  *DAG.getContext());
8185 
8186   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
8187                     nullptr, CC_RISCV);
8188 
8189   if (CallConv == CallingConv::GHC && !RVLocs.empty())
8190     report_fatal_error("GHC functions return void only");
8191 
8192   SDValue Glue;
8193   SmallVector<SDValue, 4> RetOps(1, Chain);
8194 
8195   // Copy the result values into the output registers.
8196   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
8197     SDValue Val = OutVals[i];
8198     CCValAssign &VA = RVLocs[i];
8199     assert(VA.isRegLoc() && "Can only return in registers!");
8200 
8201     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8202       // Handle returning f64 on RV32D with a soft float ABI.
8203       assert(VA.isRegLoc() && "Expected return via registers");
8204       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
8205                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
8206       SDValue Lo = SplitF64.getValue(0);
8207       SDValue Hi = SplitF64.getValue(1);
8208       Register RegLo = VA.getLocReg();
8209       assert(RegLo < RISCV::X31 && "Invalid register pair");
8210       Register RegHi = RegLo + 1;
8211 
8212       if (STI.isRegisterReservedByUser(RegLo) ||
8213           STI.isRegisterReservedByUser(RegHi))
8214         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8215             MF.getFunction(),
8216             "Return value register required, but has been reserved."});
8217 
8218       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
8219       Glue = Chain.getValue(1);
8220       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
8221       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
8222       Glue = Chain.getValue(1);
8223       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
8224     } else {
8225       // Handle a 'normal' return.
8226       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
8227       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
8228 
8229       if (STI.isRegisterReservedByUser(VA.getLocReg()))
8230         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8231             MF.getFunction(),
8232             "Return value register required, but has been reserved."});
8233 
8234       // Guarantee that all emitted copies are stuck together.
8235       Glue = Chain.getValue(1);
8236       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
8237     }
8238   }
8239 
8240   RetOps[0] = Chain; // Update chain.
8241 
8242   // Add the glue node if we have it.
8243   if (Glue.getNode()) {
8244     RetOps.push_back(Glue);
8245   }
8246 
8247   unsigned RetOpc = RISCVISD::RET_FLAG;
8248   // Interrupt service routines use different return instructions.
8249   const Function &Func = DAG.getMachineFunction().getFunction();
8250   if (Func.hasFnAttribute("interrupt")) {
8251     if (!Func.getReturnType()->isVoidTy())
8252       report_fatal_error(
8253           "Functions with the interrupt attribute must have void return type!");
8254 
8255     MachineFunction &MF = DAG.getMachineFunction();
8256     StringRef Kind =
8257       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8258 
8259     if (Kind == "user")
8260       RetOpc = RISCVISD::URET_FLAG;
8261     else if (Kind == "supervisor")
8262       RetOpc = RISCVISD::SRET_FLAG;
8263     else
8264       RetOpc = RISCVISD::MRET_FLAG;
8265   }
8266 
8267   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
8268 }
8269 
8270 void RISCVTargetLowering::validateCCReservedRegs(
8271     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
8272     MachineFunction &MF) const {
8273   const Function &F = MF.getFunction();
8274   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8275 
8276   if (llvm::any_of(Regs, [&STI](auto Reg) {
8277         return STI.isRegisterReservedByUser(Reg.first);
8278       }))
8279     F.getContext().diagnose(DiagnosticInfoUnsupported{
8280         F, "Argument register required, but has been reserved."});
8281 }
8282 
8283 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
8284   return CI->isTailCall();
8285 }
8286 
8287 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
8288 #define NODE_NAME_CASE(NODE)                                                   \
8289   case RISCVISD::NODE:                                                         \
8290     return "RISCVISD::" #NODE;
8291   // clang-format off
8292   switch ((RISCVISD::NodeType)Opcode) {
8293   case RISCVISD::FIRST_NUMBER:
8294     break;
8295   NODE_NAME_CASE(RET_FLAG)
8296   NODE_NAME_CASE(URET_FLAG)
8297   NODE_NAME_CASE(SRET_FLAG)
8298   NODE_NAME_CASE(MRET_FLAG)
8299   NODE_NAME_CASE(CALL)
8300   NODE_NAME_CASE(SELECT_CC)
8301   NODE_NAME_CASE(BR_CC)
8302   NODE_NAME_CASE(BuildPairF64)
8303   NODE_NAME_CASE(SplitF64)
8304   NODE_NAME_CASE(TAIL)
8305   NODE_NAME_CASE(MULHSU)
8306   NODE_NAME_CASE(SLLW)
8307   NODE_NAME_CASE(SRAW)
8308   NODE_NAME_CASE(SRLW)
8309   NODE_NAME_CASE(DIVW)
8310   NODE_NAME_CASE(DIVUW)
8311   NODE_NAME_CASE(REMUW)
8312   NODE_NAME_CASE(ROLW)
8313   NODE_NAME_CASE(RORW)
8314   NODE_NAME_CASE(CLZW)
8315   NODE_NAME_CASE(CTZW)
8316   NODE_NAME_CASE(FSLW)
8317   NODE_NAME_CASE(FSRW)
8318   NODE_NAME_CASE(FSL)
8319   NODE_NAME_CASE(FSR)
8320   NODE_NAME_CASE(FMV_H_X)
8321   NODE_NAME_CASE(FMV_X_ANYEXTH)
8322   NODE_NAME_CASE(FMV_W_X_RV64)
8323   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8324   NODE_NAME_CASE(FCVT_W_RV64)
8325   NODE_NAME_CASE(FCVT_WU_RV64)
8326   NODE_NAME_CASE(READ_CYCLE_WIDE)
8327   NODE_NAME_CASE(GREV)
8328   NODE_NAME_CASE(GREVW)
8329   NODE_NAME_CASE(GORC)
8330   NODE_NAME_CASE(GORCW)
8331   NODE_NAME_CASE(SHFL)
8332   NODE_NAME_CASE(SHFLW)
8333   NODE_NAME_CASE(UNSHFL)
8334   NODE_NAME_CASE(UNSHFLW)
8335   NODE_NAME_CASE(BCOMPRESS)
8336   NODE_NAME_CASE(BCOMPRESSW)
8337   NODE_NAME_CASE(BDECOMPRESS)
8338   NODE_NAME_CASE(BDECOMPRESSW)
8339   NODE_NAME_CASE(VMV_V_X_VL)
8340   NODE_NAME_CASE(VFMV_V_F_VL)
8341   NODE_NAME_CASE(VMV_X_S)
8342   NODE_NAME_CASE(VMV_S_X_VL)
8343   NODE_NAME_CASE(VFMV_S_F_VL)
8344   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8345   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8346   NODE_NAME_CASE(READ_VLENB)
8347   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8348   NODE_NAME_CASE(VSLIDEUP_VL)
8349   NODE_NAME_CASE(VSLIDE1UP_VL)
8350   NODE_NAME_CASE(VSLIDEDOWN_VL)
8351   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8352   NODE_NAME_CASE(VID_VL)
8353   NODE_NAME_CASE(VFNCVT_ROD_VL)
8354   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8355   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8356   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8357   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8358   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8359   NODE_NAME_CASE(VECREDUCE_AND_VL)
8360   NODE_NAME_CASE(VECREDUCE_OR_VL)
8361   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8362   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8363   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8364   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8365   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8366   NODE_NAME_CASE(ADD_VL)
8367   NODE_NAME_CASE(AND_VL)
8368   NODE_NAME_CASE(MUL_VL)
8369   NODE_NAME_CASE(OR_VL)
8370   NODE_NAME_CASE(SDIV_VL)
8371   NODE_NAME_CASE(SHL_VL)
8372   NODE_NAME_CASE(SREM_VL)
8373   NODE_NAME_CASE(SRA_VL)
8374   NODE_NAME_CASE(SRL_VL)
8375   NODE_NAME_CASE(SUB_VL)
8376   NODE_NAME_CASE(UDIV_VL)
8377   NODE_NAME_CASE(UREM_VL)
8378   NODE_NAME_CASE(XOR_VL)
8379   NODE_NAME_CASE(FADD_VL)
8380   NODE_NAME_CASE(FSUB_VL)
8381   NODE_NAME_CASE(FMUL_VL)
8382   NODE_NAME_CASE(FDIV_VL)
8383   NODE_NAME_CASE(FNEG_VL)
8384   NODE_NAME_CASE(FABS_VL)
8385   NODE_NAME_CASE(FSQRT_VL)
8386   NODE_NAME_CASE(FMA_VL)
8387   NODE_NAME_CASE(FCOPYSIGN_VL)
8388   NODE_NAME_CASE(SMIN_VL)
8389   NODE_NAME_CASE(SMAX_VL)
8390   NODE_NAME_CASE(UMIN_VL)
8391   NODE_NAME_CASE(UMAX_VL)
8392   NODE_NAME_CASE(FMINNUM_VL)
8393   NODE_NAME_CASE(FMAXNUM_VL)
8394   NODE_NAME_CASE(MULHS_VL)
8395   NODE_NAME_CASE(MULHU_VL)
8396   NODE_NAME_CASE(FP_TO_SINT_VL)
8397   NODE_NAME_CASE(FP_TO_UINT_VL)
8398   NODE_NAME_CASE(SINT_TO_FP_VL)
8399   NODE_NAME_CASE(UINT_TO_FP_VL)
8400   NODE_NAME_CASE(FP_EXTEND_VL)
8401   NODE_NAME_CASE(FP_ROUND_VL)
8402   NODE_NAME_CASE(VWMUL_VL)
8403   NODE_NAME_CASE(VWMULU_VL)
8404   NODE_NAME_CASE(SETCC_VL)
8405   NODE_NAME_CASE(VSELECT_VL)
8406   NODE_NAME_CASE(VMAND_VL)
8407   NODE_NAME_CASE(VMOR_VL)
8408   NODE_NAME_CASE(VMXOR_VL)
8409   NODE_NAME_CASE(VMCLR_VL)
8410   NODE_NAME_CASE(VMSET_VL)
8411   NODE_NAME_CASE(VRGATHER_VX_VL)
8412   NODE_NAME_CASE(VRGATHER_VV_VL)
8413   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8414   NODE_NAME_CASE(VSEXT_VL)
8415   NODE_NAME_CASE(VZEXT_VL)
8416   NODE_NAME_CASE(VPOPC_VL)
8417   NODE_NAME_CASE(VLE_VL)
8418   NODE_NAME_CASE(VSE_VL)
8419   NODE_NAME_CASE(READ_CSR)
8420   NODE_NAME_CASE(WRITE_CSR)
8421   NODE_NAME_CASE(SWAP_CSR)
8422   }
8423   // clang-format on
8424   return nullptr;
8425 #undef NODE_NAME_CASE
8426 }
8427 
8428 /// getConstraintType - Given a constraint letter, return the type of
8429 /// constraint it is for this target.
8430 RISCVTargetLowering::ConstraintType
8431 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8432   if (Constraint.size() == 1) {
8433     switch (Constraint[0]) {
8434     default:
8435       break;
8436     case 'f':
8437     case 'v':
8438       return C_RegisterClass;
8439     case 'I':
8440     case 'J':
8441     case 'K':
8442       return C_Immediate;
8443     case 'A':
8444       return C_Memory;
8445     case 'S': // A symbolic address
8446       return C_Other;
8447     }
8448   }
8449   return TargetLowering::getConstraintType(Constraint);
8450 }
8451 
8452 std::pair<unsigned, const TargetRegisterClass *>
8453 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8454                                                   StringRef Constraint,
8455                                                   MVT VT) const {
8456   // First, see if this is a constraint that directly corresponds to a
8457   // RISCV register class.
8458   if (Constraint.size() == 1) {
8459     switch (Constraint[0]) {
8460     case 'r':
8461       return std::make_pair(0U, &RISCV::GPRRegClass);
8462     case 'f':
8463       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8464         return std::make_pair(0U, &RISCV::FPR16RegClass);
8465       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8466         return std::make_pair(0U, &RISCV::FPR32RegClass);
8467       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8468         return std::make_pair(0U, &RISCV::FPR64RegClass);
8469       break;
8470     case 'v':
8471       for (const auto *RC :
8472            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8473             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8474         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8475           return std::make_pair(0U, RC);
8476       }
8477       break;
8478     default:
8479       break;
8480     }
8481   }
8482 
8483   // Clang will correctly decode the usage of register name aliases into their
8484   // official names. However, other frontends like `rustc` do not. This allows
8485   // users of these frontends to use the ABI names for registers in LLVM-style
8486   // register constraints.
8487   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8488                                .Case("{zero}", RISCV::X0)
8489                                .Case("{ra}", RISCV::X1)
8490                                .Case("{sp}", RISCV::X2)
8491                                .Case("{gp}", RISCV::X3)
8492                                .Case("{tp}", RISCV::X4)
8493                                .Case("{t0}", RISCV::X5)
8494                                .Case("{t1}", RISCV::X6)
8495                                .Case("{t2}", RISCV::X7)
8496                                .Cases("{s0}", "{fp}", RISCV::X8)
8497                                .Case("{s1}", RISCV::X9)
8498                                .Case("{a0}", RISCV::X10)
8499                                .Case("{a1}", RISCV::X11)
8500                                .Case("{a2}", RISCV::X12)
8501                                .Case("{a3}", RISCV::X13)
8502                                .Case("{a4}", RISCV::X14)
8503                                .Case("{a5}", RISCV::X15)
8504                                .Case("{a6}", RISCV::X16)
8505                                .Case("{a7}", RISCV::X17)
8506                                .Case("{s2}", RISCV::X18)
8507                                .Case("{s3}", RISCV::X19)
8508                                .Case("{s4}", RISCV::X20)
8509                                .Case("{s5}", RISCV::X21)
8510                                .Case("{s6}", RISCV::X22)
8511                                .Case("{s7}", RISCV::X23)
8512                                .Case("{s8}", RISCV::X24)
8513                                .Case("{s9}", RISCV::X25)
8514                                .Case("{s10}", RISCV::X26)
8515                                .Case("{s11}", RISCV::X27)
8516                                .Case("{t3}", RISCV::X28)
8517                                .Case("{t4}", RISCV::X29)
8518                                .Case("{t5}", RISCV::X30)
8519                                .Case("{t6}", RISCV::X31)
8520                                .Default(RISCV::NoRegister);
8521   if (XRegFromAlias != RISCV::NoRegister)
8522     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8523 
8524   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8525   // TableGen record rather than the AsmName to choose registers for InlineAsm
8526   // constraints, plus we want to match those names to the widest floating point
8527   // register type available, manually select floating point registers here.
8528   //
8529   // The second case is the ABI name of the register, so that frontends can also
8530   // use the ABI names in register constraint lists.
8531   if (Subtarget.hasStdExtF()) {
8532     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8533                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8534                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8535                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8536                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8537                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8538                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8539                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8540                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8541                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8542                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8543                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8544                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8545                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8546                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8547                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8548                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8549                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8550                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8551                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8552                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8553                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8554                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8555                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8556                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8557                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8558                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8559                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8560                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8561                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8562                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8563                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8564                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8565                         .Default(RISCV::NoRegister);
8566     if (FReg != RISCV::NoRegister) {
8567       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8568       if (Subtarget.hasStdExtD()) {
8569         unsigned RegNo = FReg - RISCV::F0_F;
8570         unsigned DReg = RISCV::F0_D + RegNo;
8571         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8572       }
8573       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8574     }
8575   }
8576 
8577   if (Subtarget.hasStdExtV()) {
8578     Register VReg = StringSwitch<Register>(Constraint.lower())
8579                         .Case("{v0}", RISCV::V0)
8580                         .Case("{v1}", RISCV::V1)
8581                         .Case("{v2}", RISCV::V2)
8582                         .Case("{v3}", RISCV::V3)
8583                         .Case("{v4}", RISCV::V4)
8584                         .Case("{v5}", RISCV::V5)
8585                         .Case("{v6}", RISCV::V6)
8586                         .Case("{v7}", RISCV::V7)
8587                         .Case("{v8}", RISCV::V8)
8588                         .Case("{v9}", RISCV::V9)
8589                         .Case("{v10}", RISCV::V10)
8590                         .Case("{v11}", RISCV::V11)
8591                         .Case("{v12}", RISCV::V12)
8592                         .Case("{v13}", RISCV::V13)
8593                         .Case("{v14}", RISCV::V14)
8594                         .Case("{v15}", RISCV::V15)
8595                         .Case("{v16}", RISCV::V16)
8596                         .Case("{v17}", RISCV::V17)
8597                         .Case("{v18}", RISCV::V18)
8598                         .Case("{v19}", RISCV::V19)
8599                         .Case("{v20}", RISCV::V20)
8600                         .Case("{v21}", RISCV::V21)
8601                         .Case("{v22}", RISCV::V22)
8602                         .Case("{v23}", RISCV::V23)
8603                         .Case("{v24}", RISCV::V24)
8604                         .Case("{v25}", RISCV::V25)
8605                         .Case("{v26}", RISCV::V26)
8606                         .Case("{v27}", RISCV::V27)
8607                         .Case("{v28}", RISCV::V28)
8608                         .Case("{v29}", RISCV::V29)
8609                         .Case("{v30}", RISCV::V30)
8610                         .Case("{v31}", RISCV::V31)
8611                         .Default(RISCV::NoRegister);
8612     if (VReg != RISCV::NoRegister) {
8613       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8614         return std::make_pair(VReg, &RISCV::VMRegClass);
8615       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8616         return std::make_pair(VReg, &RISCV::VRRegClass);
8617       for (const auto *RC :
8618            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8619         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8620           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8621           return std::make_pair(VReg, RC);
8622         }
8623       }
8624     }
8625   }
8626 
8627   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8628 }
8629 
8630 unsigned
8631 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8632   // Currently only support length 1 constraints.
8633   if (ConstraintCode.size() == 1) {
8634     switch (ConstraintCode[0]) {
8635     case 'A':
8636       return InlineAsm::Constraint_A;
8637     default:
8638       break;
8639     }
8640   }
8641 
8642   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8643 }
8644 
8645 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8646     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8647     SelectionDAG &DAG) const {
8648   // Currently only support length 1 constraints.
8649   if (Constraint.length() == 1) {
8650     switch (Constraint[0]) {
8651     case 'I':
8652       // Validate & create a 12-bit signed immediate operand.
8653       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8654         uint64_t CVal = C->getSExtValue();
8655         if (isInt<12>(CVal))
8656           Ops.push_back(
8657               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8658       }
8659       return;
8660     case 'J':
8661       // Validate & create an integer zero operand.
8662       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8663         if (C->getZExtValue() == 0)
8664           Ops.push_back(
8665               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8666       return;
8667     case 'K':
8668       // Validate & create a 5-bit unsigned immediate operand.
8669       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8670         uint64_t CVal = C->getZExtValue();
8671         if (isUInt<5>(CVal))
8672           Ops.push_back(
8673               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8674       }
8675       return;
8676     case 'S':
8677       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
8678         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
8679                                                  GA->getValueType(0)));
8680       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
8681         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
8682                                                 BA->getValueType(0)));
8683       }
8684       return;
8685     default:
8686       break;
8687     }
8688   }
8689   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8690 }
8691 
8692 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8693                                                    Instruction *Inst,
8694                                                    AtomicOrdering Ord) const {
8695   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8696     return Builder.CreateFence(Ord);
8697   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8698     return Builder.CreateFence(AtomicOrdering::Release);
8699   return nullptr;
8700 }
8701 
8702 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8703                                                     Instruction *Inst,
8704                                                     AtomicOrdering Ord) const {
8705   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8706     return Builder.CreateFence(AtomicOrdering::Acquire);
8707   return nullptr;
8708 }
8709 
8710 TargetLowering::AtomicExpansionKind
8711 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8712   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8713   // point operations can't be used in an lr/sc sequence without breaking the
8714   // forward-progress guarantee.
8715   if (AI->isFloatingPointOperation())
8716     return AtomicExpansionKind::CmpXChg;
8717 
8718   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8719   if (Size == 8 || Size == 16)
8720     return AtomicExpansionKind::MaskedIntrinsic;
8721   return AtomicExpansionKind::None;
8722 }
8723 
8724 static Intrinsic::ID
8725 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8726   if (XLen == 32) {
8727     switch (BinOp) {
8728     default:
8729       llvm_unreachable("Unexpected AtomicRMW BinOp");
8730     case AtomicRMWInst::Xchg:
8731       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8732     case AtomicRMWInst::Add:
8733       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8734     case AtomicRMWInst::Sub:
8735       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8736     case AtomicRMWInst::Nand:
8737       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8738     case AtomicRMWInst::Max:
8739       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8740     case AtomicRMWInst::Min:
8741       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8742     case AtomicRMWInst::UMax:
8743       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8744     case AtomicRMWInst::UMin:
8745       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8746     }
8747   }
8748 
8749   if (XLen == 64) {
8750     switch (BinOp) {
8751     default:
8752       llvm_unreachable("Unexpected AtomicRMW BinOp");
8753     case AtomicRMWInst::Xchg:
8754       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8755     case AtomicRMWInst::Add:
8756       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8757     case AtomicRMWInst::Sub:
8758       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8759     case AtomicRMWInst::Nand:
8760       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8761     case AtomicRMWInst::Max:
8762       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8763     case AtomicRMWInst::Min:
8764       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8765     case AtomicRMWInst::UMax:
8766       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8767     case AtomicRMWInst::UMin:
8768       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8769     }
8770   }
8771 
8772   llvm_unreachable("Unexpected XLen\n");
8773 }
8774 
8775 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8776     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8777     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8778   unsigned XLen = Subtarget.getXLen();
8779   Value *Ordering =
8780       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8781   Type *Tys[] = {AlignedAddr->getType()};
8782   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8783       AI->getModule(),
8784       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8785 
8786   if (XLen == 64) {
8787     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8788     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8789     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8790   }
8791 
8792   Value *Result;
8793 
8794   // Must pass the shift amount needed to sign extend the loaded value prior
8795   // to performing a signed comparison for min/max. ShiftAmt is the number of
8796   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8797   // is the number of bits to left+right shift the value in order to
8798   // sign-extend.
8799   if (AI->getOperation() == AtomicRMWInst::Min ||
8800       AI->getOperation() == AtomicRMWInst::Max) {
8801     const DataLayout &DL = AI->getModule()->getDataLayout();
8802     unsigned ValWidth =
8803         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8804     Value *SextShamt =
8805         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8806     Result = Builder.CreateCall(LrwOpScwLoop,
8807                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8808   } else {
8809     Result =
8810         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8811   }
8812 
8813   if (XLen == 64)
8814     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8815   return Result;
8816 }
8817 
8818 TargetLowering::AtomicExpansionKind
8819 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8820     AtomicCmpXchgInst *CI) const {
8821   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8822   if (Size == 8 || Size == 16)
8823     return AtomicExpansionKind::MaskedIntrinsic;
8824   return AtomicExpansionKind::None;
8825 }
8826 
8827 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8828     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8829     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8830   unsigned XLen = Subtarget.getXLen();
8831   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8832   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8833   if (XLen == 64) {
8834     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8835     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8836     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8837     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8838   }
8839   Type *Tys[] = {AlignedAddr->getType()};
8840   Function *MaskedCmpXchg =
8841       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8842   Value *Result = Builder.CreateCall(
8843       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8844   if (XLen == 64)
8845     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8846   return Result;
8847 }
8848 
8849 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8850   return false;
8851 }
8852 
8853 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8854                                                      EVT VT) const {
8855   VT = VT.getScalarType();
8856 
8857   if (!VT.isSimple())
8858     return false;
8859 
8860   switch (VT.getSimpleVT().SimpleTy) {
8861   case MVT::f16:
8862     return Subtarget.hasStdExtZfh();
8863   case MVT::f32:
8864     return Subtarget.hasStdExtF();
8865   case MVT::f64:
8866     return Subtarget.hasStdExtD();
8867   default:
8868     break;
8869   }
8870 
8871   return false;
8872 }
8873 
8874 Register RISCVTargetLowering::getExceptionPointerRegister(
8875     const Constant *PersonalityFn) const {
8876   return RISCV::X10;
8877 }
8878 
8879 Register RISCVTargetLowering::getExceptionSelectorRegister(
8880     const Constant *PersonalityFn) const {
8881   return RISCV::X11;
8882 }
8883 
8884 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8885   // Return false to suppress the unnecessary extensions if the LibCall
8886   // arguments or return value is f32 type for LP64 ABI.
8887   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8888   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8889     return false;
8890 
8891   return true;
8892 }
8893 
8894 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8895   if (Subtarget.is64Bit() && Type == MVT::i32)
8896     return true;
8897 
8898   return IsSigned;
8899 }
8900 
8901 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8902                                                  SDValue C) const {
8903   // Check integral scalar types.
8904   if (VT.isScalarInteger()) {
8905     // Omit the optimization if the sub target has the M extension and the data
8906     // size exceeds XLen.
8907     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8908       return false;
8909     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8910       // Break the MUL to a SLLI and an ADD/SUB.
8911       const APInt &Imm = ConstNode->getAPIntValue();
8912       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8913           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8914         return true;
8915       // Omit the following optimization if the sub target has the M extension
8916       // and the data size >= XLen.
8917       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8918         return false;
8919       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8920       // a pair of LUI/ADDI.
8921       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8922         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8923         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8924             (1 - ImmS).isPowerOf2())
8925         return true;
8926       }
8927     }
8928   }
8929 
8930   return false;
8931 }
8932 
8933 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8934     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8935     bool *Fast) const {
8936   if (!VT.isVector())
8937     return false;
8938 
8939   EVT ElemVT = VT.getVectorElementType();
8940   if (Alignment >= ElemVT.getStoreSize()) {
8941     if (Fast)
8942       *Fast = true;
8943     return true;
8944   }
8945 
8946   return false;
8947 }
8948 
8949 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8950     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8951     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8952   bool IsABIRegCopy = CC.hasValue();
8953   EVT ValueVT = Val.getValueType();
8954   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8955     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8956     // and cast to f32.
8957     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8958     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8959     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8960                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8961     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8962     Parts[0] = Val;
8963     return true;
8964   }
8965 
8966   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8967     LLVMContext &Context = *DAG.getContext();
8968     EVT ValueEltVT = ValueVT.getVectorElementType();
8969     EVT PartEltVT = PartVT.getVectorElementType();
8970     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8971     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8972     if (PartVTBitSize % ValueVTBitSize == 0) {
8973       // If the element types are different, bitcast to the same element type of
8974       // PartVT first.
8975       if (ValueEltVT != PartEltVT) {
8976         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8977         assert(Count != 0 && "The number of element should not be zero.");
8978         EVT SameEltTypeVT =
8979             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8980         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8981       }
8982       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8983                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8984       Parts[0] = Val;
8985       return true;
8986     }
8987   }
8988   return false;
8989 }
8990 
8991 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8992     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8993     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8994   bool IsABIRegCopy = CC.hasValue();
8995   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8996     SDValue Val = Parts[0];
8997 
8998     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8999     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
9000     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
9001     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
9002     return Val;
9003   }
9004 
9005   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9006     LLVMContext &Context = *DAG.getContext();
9007     SDValue Val = Parts[0];
9008     EVT ValueEltVT = ValueVT.getVectorElementType();
9009     EVT PartEltVT = PartVT.getVectorElementType();
9010     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9011     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9012     if (PartVTBitSize % ValueVTBitSize == 0) {
9013       EVT SameEltTypeVT = ValueVT;
9014       // If the element types are different, convert it to the same element type
9015       // of PartVT.
9016       if (ValueEltVT != PartEltVT) {
9017         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9018         assert(Count != 0 && "The number of element should not be zero.");
9019         SameEltTypeVT =
9020             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9021       }
9022       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
9023                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9024       if (ValueEltVT != PartEltVT)
9025         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
9026       return Val;
9027     }
9028   }
9029   return SDValue();
9030 }
9031 
9032 #define GET_REGISTER_MATCHER
9033 #include "RISCVGenAsmMatcher.inc"
9034 
9035 Register
9036 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
9037                                        const MachineFunction &MF) const {
9038   Register Reg = MatchRegisterAltName(RegName);
9039   if (Reg == RISCV::NoRegister)
9040     Reg = MatchRegisterName(RegName);
9041   if (Reg == RISCV::NoRegister)
9042     report_fatal_error(
9043         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
9044   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9045   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
9046     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
9047                              StringRef(RegName) + "\"."));
9048   return Reg;
9049 }
9050 
9051 namespace llvm {
9052 namespace RISCVVIntrinsicsTable {
9053 
9054 #define GET_RISCVVIntrinsicsTable_IMPL
9055 #include "RISCVGenSearchableTables.inc"
9056 
9057 } // namespace RISCVVIntrinsicsTable
9058 
9059 } // namespace llvm
9060