1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
254     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
255     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
256     // BSWAP i8 doesn't exist.
257     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
258     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
259 
260     if (Subtarget.is64Bit()) {
261       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
262       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
263     }
264   } else {
265     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
266     // pattern match it directly in isel.
267     setOperationAction(ISD::BSWAP, XLenVT,
268                        Subtarget.hasStdExtZbb() ? Legal : Expand);
269   }
270 
271   if (Subtarget.hasStdExtZbb()) {
272     setOperationAction(ISD::SMIN, XLenVT, Legal);
273     setOperationAction(ISD::SMAX, XLenVT, Legal);
274     setOperationAction(ISD::UMIN, XLenVT, Legal);
275     setOperationAction(ISD::UMAX, XLenVT, Legal);
276 
277     if (Subtarget.is64Bit()) {
278       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
279       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
280       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
281       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
282     }
283   } else {
284     setOperationAction(ISD::CTTZ, XLenVT, Expand);
285     setOperationAction(ISD::CTLZ, XLenVT, Expand);
286     setOperationAction(ISD::CTPOP, XLenVT, Expand);
287   }
288 
289   if (Subtarget.hasStdExtZbt()) {
290     setOperationAction(ISD::FSHL, XLenVT, Custom);
291     setOperationAction(ISD::FSHR, XLenVT, Custom);
292     setOperationAction(ISD::SELECT, XLenVT, Legal);
293 
294     if (Subtarget.is64Bit()) {
295       setOperationAction(ISD::FSHL, MVT::i32, Custom);
296       setOperationAction(ISD::FSHR, MVT::i32, Custom);
297     }
298   } else {
299     setOperationAction(ISD::SELECT, XLenVT, Custom);
300   }
301 
302   ISD::CondCode FPCCToExpand[] = {
303       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
304       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
305       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
306 
307   ISD::NodeType FPOpToExpand[] = {
308       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
309       ISD::FP_TO_FP16};
310 
311   if (Subtarget.hasStdExtZfh())
312     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
313 
314   if (Subtarget.hasStdExtZfh()) {
315     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
316     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
317     setOperationAction(ISD::LRINT, MVT::f16, Legal);
318     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
319     setOperationAction(ISD::LROUND, MVT::f16, Legal);
320     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
321     for (auto CC : FPCCToExpand)
322       setCondCodeAction(CC, MVT::f16, Expand);
323     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
324     setOperationAction(ISD::SELECT, MVT::f16, Custom);
325     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
326     for (auto Op : FPOpToExpand)
327       setOperationAction(Op, MVT::f16, Expand);
328   }
329 
330   if (Subtarget.hasStdExtF()) {
331     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
332     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
333     setOperationAction(ISD::LRINT, MVT::f32, Legal);
334     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
335     setOperationAction(ISD::LROUND, MVT::f32, Legal);
336     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
337     for (auto CC : FPCCToExpand)
338       setCondCodeAction(CC, MVT::f32, Expand);
339     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT, MVT::f32, Custom);
341     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
342     for (auto Op : FPOpToExpand)
343       setOperationAction(Op, MVT::f32, Expand);
344     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
345     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
346   }
347 
348   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
349     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
350 
351   if (Subtarget.hasStdExtD()) {
352     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
353     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
354     setOperationAction(ISD::LRINT, MVT::f64, Legal);
355     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
356     setOperationAction(ISD::LROUND, MVT::f64, Legal);
357     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
358     for (auto CC : FPCCToExpand)
359       setCondCodeAction(CC, MVT::f64, Expand);
360     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
361     setOperationAction(ISD::SELECT, MVT::f64, Custom);
362     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
363     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
364     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
365     for (auto Op : FPOpToExpand)
366       setOperationAction(Op, MVT::f64, Expand);
367     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
368     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
369   }
370 
371   if (Subtarget.is64Bit()) {
372     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
373     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
374     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
375     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
376   }
377 
378   if (Subtarget.hasStdExtF()) {
379     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
380     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
381 
382     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
383     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
384   }
385 
386   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
387   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
388   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
389   setOperationAction(ISD::JumpTable, XLenVT, Custom);
390 
391   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
392 
393   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
394   // Unfortunately this can't be determined just from the ISA naming string.
395   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
396                      Subtarget.is64Bit() ? Legal : Custom);
397 
398   setOperationAction(ISD::TRAP, MVT::Other, Legal);
399   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
400   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
401   if (Subtarget.is64Bit())
402     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
403 
404   if (Subtarget.hasStdExtA()) {
405     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
406     setMinCmpXchgSizeInBits(32);
407   } else {
408     setMaxAtomicSizeInBitsSupported(0);
409   }
410 
411   setBooleanContents(ZeroOrOneBooleanContent);
412 
413   if (Subtarget.hasStdExtV()) {
414     setBooleanVectorContents(ZeroOrOneBooleanContent);
415 
416     setOperationAction(ISD::VSCALE, XLenVT, Custom);
417 
418     // RVV intrinsics may have illegal operands.
419     // We also need to custom legalize vmv.x.s.
420     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
421     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
422     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
423     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
424     if (Subtarget.is64Bit()) {
425       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
426     } else {
427       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
428       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
429     }
430 
431     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
432 
433     static unsigned IntegerVPOps[] = {
434         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
435         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
436         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
437 
438     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
439                                             ISD::VP_FMUL, ISD::VP_FDIV};
440 
441     if (!Subtarget.is64Bit()) {
442       // We must custom-lower certain vXi64 operations on RV32 due to the vector
443       // element type being illegal.
444       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
445       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
446 
447       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
448       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
449       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
450       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
451       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
452       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
453       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
454       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
455     }
456 
457     for (MVT VT : BoolVecVTs) {
458       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
459 
460       // Mask VTs are custom-expanded into a series of standard nodes
461       setOperationAction(ISD::TRUNCATE, VT, Custom);
462       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
463       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
464       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
465 
466       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
467       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
468 
469       setOperationAction(ISD::SELECT, VT, Custom);
470       setOperationAction(ISD::SELECT_CC, VT, Expand);
471       setOperationAction(ISD::VSELECT, VT, Expand);
472 
473       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
474       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
475       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
476 
477       // RVV has native int->float & float->int conversions where the
478       // element type sizes are within one power-of-two of each other. Any
479       // wider distances between type sizes have to be lowered as sequences
480       // which progressively narrow the gap in stages.
481       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
482       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
483       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
484       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
485 
486       // Expand all extending loads to types larger than this, and truncating
487       // stores from types larger than this.
488       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
489         setTruncStoreAction(OtherVT, VT, Expand);
490         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
491         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
492         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
493       }
494     }
495 
496     for (MVT VT : IntVecVTs) {
497       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
498       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
499 
500       setOperationAction(ISD::SMIN, VT, Legal);
501       setOperationAction(ISD::SMAX, VT, Legal);
502       setOperationAction(ISD::UMIN, VT, Legal);
503       setOperationAction(ISD::UMAX, VT, Legal);
504 
505       setOperationAction(ISD::ROTL, VT, Expand);
506       setOperationAction(ISD::ROTR, VT, Expand);
507 
508       // Custom-lower extensions and truncations from/to mask types.
509       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
510       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
511       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
512 
513       // RVV has native int->float & float->int conversions where the
514       // element type sizes are within one power-of-two of each other. Any
515       // wider distances between type sizes have to be lowered as sequences
516       // which progressively narrow the gap in stages.
517       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
518       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
519       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
520       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
521 
522       setOperationAction(ISD::SADDSAT, VT, Legal);
523       setOperationAction(ISD::UADDSAT, VT, Legal);
524       setOperationAction(ISD::SSUBSAT, VT, Legal);
525       setOperationAction(ISD::USUBSAT, VT, Legal);
526 
527       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
528       // nodes which truncate by one power of two at a time.
529       setOperationAction(ISD::TRUNCATE, VT, Custom);
530 
531       // Custom-lower insert/extract operations to simplify patterns.
532       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
533       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
534 
535       // Custom-lower reduction operations to set up the corresponding custom
536       // nodes' operands.
537       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
538       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
539       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
540       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
541       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
542       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
543       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
544       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
545 
546       for (unsigned VPOpc : IntegerVPOps)
547         setOperationAction(VPOpc, VT, Custom);
548 
549       setOperationAction(ISD::LOAD, VT, Custom);
550       setOperationAction(ISD::STORE, VT, Custom);
551 
552       setOperationAction(ISD::MLOAD, VT, Custom);
553       setOperationAction(ISD::MSTORE, VT, Custom);
554       setOperationAction(ISD::MGATHER, VT, Custom);
555       setOperationAction(ISD::MSCATTER, VT, Custom);
556 
557       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
558       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
559       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
560 
561       setOperationAction(ISD::SELECT, VT, Custom);
562       setOperationAction(ISD::SELECT_CC, VT, Expand);
563 
564       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
565       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
566 
567       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
568         setTruncStoreAction(VT, OtherVT, Expand);
569         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
570         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
571         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
572       }
573     }
574 
575     // Expand various CCs to best match the RVV ISA, which natively supports UNE
576     // but no other unordered comparisons, and supports all ordered comparisons
577     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
578     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
579     // and we pattern-match those back to the "original", swapping operands once
580     // more. This way we catch both operations and both "vf" and "fv" forms with
581     // fewer patterns.
582     ISD::CondCode VFPCCToExpand[] = {
583         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
584         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
585         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
586     };
587 
588     // Sets common operation actions on RVV floating-point vector types.
589     const auto SetCommonVFPActions = [&](MVT VT) {
590       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
591       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
592       // sizes are within one power-of-two of each other. Therefore conversions
593       // between vXf16 and vXf64 must be lowered as sequences which convert via
594       // vXf32.
595       setOperationAction(ISD::FP_ROUND, VT, Custom);
596       setOperationAction(ISD::FP_EXTEND, VT, Custom);
597       // Custom-lower insert/extract operations to simplify patterns.
598       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
599       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
600       // Expand various condition codes (explained above).
601       for (auto CC : VFPCCToExpand)
602         setCondCodeAction(CC, VT, Expand);
603 
604       setOperationAction(ISD::FMINNUM, VT, Legal);
605       setOperationAction(ISD::FMAXNUM, VT, Legal);
606 
607       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
608       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
609       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
610       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
611       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
612 
613       setOperationAction(ISD::LOAD, VT, Custom);
614       setOperationAction(ISD::STORE, VT, Custom);
615 
616       setOperationAction(ISD::MLOAD, VT, Custom);
617       setOperationAction(ISD::MSTORE, VT, Custom);
618       setOperationAction(ISD::MGATHER, VT, Custom);
619       setOperationAction(ISD::MSCATTER, VT, Custom);
620 
621       setOperationAction(ISD::SELECT, VT, Custom);
622       setOperationAction(ISD::SELECT_CC, VT, Expand);
623 
624       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
625       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
626       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
627 
628       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
629 
630       for (unsigned VPOpc : FloatingPointVPOps)
631         setOperationAction(VPOpc, VT, Custom);
632     };
633 
634     // Sets common extload/truncstore actions on RVV floating-point vector
635     // types.
636     const auto SetCommonVFPExtLoadTruncStoreActions =
637         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
638           for (auto SmallVT : SmallerVTs) {
639             setTruncStoreAction(VT, SmallVT, Expand);
640             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
641           }
642         };
643 
644     if (Subtarget.hasStdExtZfh())
645       for (MVT VT : F16VecVTs)
646         SetCommonVFPActions(VT);
647 
648     for (MVT VT : F32VecVTs) {
649       if (Subtarget.hasStdExtF())
650         SetCommonVFPActions(VT);
651       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
652     }
653 
654     for (MVT VT : F64VecVTs) {
655       if (Subtarget.hasStdExtD())
656         SetCommonVFPActions(VT);
657       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
658       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
659     }
660 
661     if (Subtarget.useRVVForFixedLengthVectors()) {
662       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
663         if (!useRVVForFixedLengthVectorVT(VT))
664           continue;
665 
666         // By default everything must be expanded.
667         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
668           setOperationAction(Op, VT, Expand);
669         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
670           setTruncStoreAction(VT, OtherVT, Expand);
671           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
672           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
673           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
674         }
675 
676         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
677         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
678         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
679 
680         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
681         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
682 
683         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
684         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
685 
686         setOperationAction(ISD::LOAD, VT, Custom);
687         setOperationAction(ISD::STORE, VT, Custom);
688 
689         setOperationAction(ISD::SETCC, VT, Custom);
690 
691         setOperationAction(ISD::SELECT, VT, Custom);
692 
693         setOperationAction(ISD::TRUNCATE, VT, Custom);
694 
695         setOperationAction(ISD::BITCAST, VT, Custom);
696 
697         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
698         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
699         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
700 
701         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
702         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
703         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
704         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
705 
706         // Operations below are different for between masks and other vectors.
707         if (VT.getVectorElementType() == MVT::i1) {
708           setOperationAction(ISD::AND, VT, Custom);
709           setOperationAction(ISD::OR, VT, Custom);
710           setOperationAction(ISD::XOR, VT, Custom);
711           continue;
712         }
713 
714         // Use SPLAT_VECTOR to prevent type legalization from destroying the
715         // splats when type legalizing i64 scalar on RV32.
716         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
717         // improvements first.
718         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
719           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
720           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
721         }
722 
723         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
724         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
725 
726         setOperationAction(ISD::MLOAD, VT, Custom);
727         setOperationAction(ISD::MSTORE, VT, Custom);
728         setOperationAction(ISD::MGATHER, VT, Custom);
729         setOperationAction(ISD::MSCATTER, VT, Custom);
730         setOperationAction(ISD::ADD, VT, Custom);
731         setOperationAction(ISD::MUL, VT, Custom);
732         setOperationAction(ISD::SUB, VT, Custom);
733         setOperationAction(ISD::AND, VT, Custom);
734         setOperationAction(ISD::OR, VT, Custom);
735         setOperationAction(ISD::XOR, VT, Custom);
736         setOperationAction(ISD::SDIV, VT, Custom);
737         setOperationAction(ISD::SREM, VT, Custom);
738         setOperationAction(ISD::UDIV, VT, Custom);
739         setOperationAction(ISD::UREM, VT, Custom);
740         setOperationAction(ISD::SHL, VT, Custom);
741         setOperationAction(ISD::SRA, VT, Custom);
742         setOperationAction(ISD::SRL, VT, Custom);
743 
744         setOperationAction(ISD::SMIN, VT, Custom);
745         setOperationAction(ISD::SMAX, VT, Custom);
746         setOperationAction(ISD::UMIN, VT, Custom);
747         setOperationAction(ISD::UMAX, VT, Custom);
748         setOperationAction(ISD::ABS,  VT, Custom);
749 
750         setOperationAction(ISD::MULHS, VT, Custom);
751         setOperationAction(ISD::MULHU, VT, Custom);
752 
753         setOperationAction(ISD::SADDSAT, VT, Custom);
754         setOperationAction(ISD::UADDSAT, VT, Custom);
755         setOperationAction(ISD::SSUBSAT, VT, Custom);
756         setOperationAction(ISD::USUBSAT, VT, Custom);
757 
758         setOperationAction(ISD::VSELECT, VT, Custom);
759         setOperationAction(ISD::SELECT_CC, VT, Expand);
760 
761         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
762         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
763         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
764 
765         // Custom-lower reduction operations to set up the corresponding custom
766         // nodes' operands.
767         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
768         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
769         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
770         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
771         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
772 
773         for (unsigned VPOpc : IntegerVPOps)
774           setOperationAction(VPOpc, VT, Custom);
775       }
776 
777       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
778         if (!useRVVForFixedLengthVectorVT(VT))
779           continue;
780 
781         // By default everything must be expanded.
782         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
783           setOperationAction(Op, VT, Expand);
784         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
785           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
786           setTruncStoreAction(VT, OtherVT, Expand);
787         }
788 
789         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
790         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
791         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
792 
793         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
794         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
795         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
796         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
797         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
798 
799         setOperationAction(ISD::LOAD, VT, Custom);
800         setOperationAction(ISD::STORE, VT, Custom);
801         setOperationAction(ISD::MLOAD, VT, Custom);
802         setOperationAction(ISD::MSTORE, VT, Custom);
803         setOperationAction(ISD::MGATHER, VT, Custom);
804         setOperationAction(ISD::MSCATTER, VT, Custom);
805         setOperationAction(ISD::FADD, VT, Custom);
806         setOperationAction(ISD::FSUB, VT, Custom);
807         setOperationAction(ISD::FMUL, VT, Custom);
808         setOperationAction(ISD::FDIV, VT, Custom);
809         setOperationAction(ISD::FNEG, VT, Custom);
810         setOperationAction(ISD::FABS, VT, Custom);
811         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
812         setOperationAction(ISD::FSQRT, VT, Custom);
813         setOperationAction(ISD::FMA, VT, Custom);
814         setOperationAction(ISD::FMINNUM, VT, Custom);
815         setOperationAction(ISD::FMAXNUM, VT, Custom);
816 
817         setOperationAction(ISD::FP_ROUND, VT, Custom);
818         setOperationAction(ISD::FP_EXTEND, VT, Custom);
819 
820         for (auto CC : VFPCCToExpand)
821           setCondCodeAction(CC, VT, Expand);
822 
823         setOperationAction(ISD::VSELECT, VT, Custom);
824         setOperationAction(ISD::SELECT, VT, Custom);
825         setOperationAction(ISD::SELECT_CC, VT, Expand);
826 
827         setOperationAction(ISD::BITCAST, VT, Custom);
828 
829         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
830         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
831         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
832         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
833 
834         for (unsigned VPOpc : FloatingPointVPOps)
835           setOperationAction(VPOpc, VT, Custom);
836       }
837 
838       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
839       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
840       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
841       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
842       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
843       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
844       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
845       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
846     }
847   }
848 
849   // Function alignments.
850   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
851   setMinFunctionAlignment(FunctionAlignment);
852   setPrefFunctionAlignment(FunctionAlignment);
853 
854   setMinimumJumpTableEntries(5);
855 
856   // Jumps are expensive, compared to logic
857   setJumpIsExpensive();
858 
859   // We can use any register for comparisons
860   setHasMultipleConditionRegisters();
861 
862   setTargetDAGCombine(ISD::ADD);
863   setTargetDAGCombine(ISD::SUB);
864   setTargetDAGCombine(ISD::AND);
865   setTargetDAGCombine(ISD::OR);
866   setTargetDAGCombine(ISD::XOR);
867   setTargetDAGCombine(ISD::ANY_EXTEND);
868   setTargetDAGCombine(ISD::ZERO_EXTEND);
869   if (Subtarget.hasStdExtV()) {
870     setTargetDAGCombine(ISD::FCOPYSIGN);
871     setTargetDAGCombine(ISD::MGATHER);
872     setTargetDAGCombine(ISD::MSCATTER);
873     setTargetDAGCombine(ISD::SRA);
874     setTargetDAGCombine(ISD::SRL);
875     setTargetDAGCombine(ISD::SHL);
876   }
877 }
878 
879 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
880                                             LLVMContext &Context,
881                                             EVT VT) const {
882   if (!VT.isVector())
883     return getPointerTy(DL);
884   if (Subtarget.hasStdExtV() &&
885       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
886     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
887   return VT.changeVectorElementTypeToInteger();
888 }
889 
890 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
891   return Subtarget.getXLenVT();
892 }
893 
894 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
895                                              const CallInst &I,
896                                              MachineFunction &MF,
897                                              unsigned Intrinsic) const {
898   switch (Intrinsic) {
899   default:
900     return false;
901   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
902   case Intrinsic::riscv_masked_atomicrmw_add_i32:
903   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
904   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
905   case Intrinsic::riscv_masked_atomicrmw_max_i32:
906   case Intrinsic::riscv_masked_atomicrmw_min_i32:
907   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
908   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
909   case Intrinsic::riscv_masked_cmpxchg_i32: {
910     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
911     Info.opc = ISD::INTRINSIC_W_CHAIN;
912     Info.memVT = MVT::getVT(PtrTy->getElementType());
913     Info.ptrVal = I.getArgOperand(0);
914     Info.offset = 0;
915     Info.align = Align(4);
916     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
917                  MachineMemOperand::MOVolatile;
918     return true;
919   }
920   }
921 }
922 
923 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
924                                                 const AddrMode &AM, Type *Ty,
925                                                 unsigned AS,
926                                                 Instruction *I) const {
927   // No global is ever allowed as a base.
928   if (AM.BaseGV)
929     return false;
930 
931   // Require a 12-bit signed offset.
932   if (!isInt<12>(AM.BaseOffs))
933     return false;
934 
935   switch (AM.Scale) {
936   case 0: // "r+i" or just "i", depending on HasBaseReg.
937     break;
938   case 1:
939     if (!AM.HasBaseReg) // allow "r+i".
940       break;
941     return false; // disallow "r+r" or "r+r+i".
942   default:
943     return false;
944   }
945 
946   return true;
947 }
948 
949 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
950   return isInt<12>(Imm);
951 }
952 
953 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
954   return isInt<12>(Imm);
955 }
956 
957 // On RV32, 64-bit integers are split into their high and low parts and held
958 // in two different registers, so the trunc is free since the low register can
959 // just be used.
960 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
961   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
962     return false;
963   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
964   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
965   return (SrcBits == 64 && DestBits == 32);
966 }
967 
968 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
969   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
970       !SrcVT.isInteger() || !DstVT.isInteger())
971     return false;
972   unsigned SrcBits = SrcVT.getSizeInBits();
973   unsigned DestBits = DstVT.getSizeInBits();
974   return (SrcBits == 64 && DestBits == 32);
975 }
976 
977 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
978   // Zexts are free if they can be combined with a load.
979   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
980     EVT MemVT = LD->getMemoryVT();
981     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
982          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
983         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
984          LD->getExtensionType() == ISD::ZEXTLOAD))
985       return true;
986   }
987 
988   return TargetLowering::isZExtFree(Val, VT2);
989 }
990 
991 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
992   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
993 }
994 
995 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
996   return Subtarget.hasStdExtZbb();
997 }
998 
999 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1000   return Subtarget.hasStdExtZbb();
1001 }
1002 
1003 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1004                                        bool ForCodeSize) const {
1005   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1006     return false;
1007   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1008     return false;
1009   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1010     return false;
1011   if (Imm.isNegZero())
1012     return false;
1013   return Imm.isZero();
1014 }
1015 
1016 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1017   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1018          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1019          (VT == MVT::f64 && Subtarget.hasStdExtD());
1020 }
1021 
1022 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1023                                                       CallingConv::ID CC,
1024                                                       EVT VT) const {
1025   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1026   // end up using a GPR but that will be decided based on ABI.
1027   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1028     return MVT::f32;
1029 
1030   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1031 }
1032 
1033 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1034                                                            CallingConv::ID CC,
1035                                                            EVT VT) const {
1036   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1037   // end up using a GPR but that will be decided based on ABI.
1038   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1039     return 1;
1040 
1041   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1042 }
1043 
1044 // Changes the condition code and swaps operands if necessary, so the SetCC
1045 // operation matches one of the comparisons supported directly by branches
1046 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1047 // with 1/-1.
1048 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1049                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1050   // Convert X > -1 to X >= 0.
1051   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1052     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1053     CC = ISD::SETGE;
1054     return;
1055   }
1056   // Convert X < 1 to 0 >= X.
1057   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1058     RHS = LHS;
1059     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1060     CC = ISD::SETGE;
1061     return;
1062   }
1063 
1064   switch (CC) {
1065   default:
1066     break;
1067   case ISD::SETGT:
1068   case ISD::SETLE:
1069   case ISD::SETUGT:
1070   case ISD::SETULE:
1071     CC = ISD::getSetCCSwappedOperands(CC);
1072     std::swap(LHS, RHS);
1073     break;
1074   }
1075 }
1076 
1077 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1078   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1079   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1080   if (VT.getVectorElementType() == MVT::i1)
1081     KnownSize *= 8;
1082 
1083   switch (KnownSize) {
1084   default:
1085     llvm_unreachable("Invalid LMUL.");
1086   case 8:
1087     return RISCVII::VLMUL::LMUL_F8;
1088   case 16:
1089     return RISCVII::VLMUL::LMUL_F4;
1090   case 32:
1091     return RISCVII::VLMUL::LMUL_F2;
1092   case 64:
1093     return RISCVII::VLMUL::LMUL_1;
1094   case 128:
1095     return RISCVII::VLMUL::LMUL_2;
1096   case 256:
1097     return RISCVII::VLMUL::LMUL_4;
1098   case 512:
1099     return RISCVII::VLMUL::LMUL_8;
1100   }
1101 }
1102 
1103 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1104   switch (LMul) {
1105   default:
1106     llvm_unreachable("Invalid LMUL.");
1107   case RISCVII::VLMUL::LMUL_F8:
1108   case RISCVII::VLMUL::LMUL_F4:
1109   case RISCVII::VLMUL::LMUL_F2:
1110   case RISCVII::VLMUL::LMUL_1:
1111     return RISCV::VRRegClassID;
1112   case RISCVII::VLMUL::LMUL_2:
1113     return RISCV::VRM2RegClassID;
1114   case RISCVII::VLMUL::LMUL_4:
1115     return RISCV::VRM4RegClassID;
1116   case RISCVII::VLMUL::LMUL_8:
1117     return RISCV::VRM8RegClassID;
1118   }
1119 }
1120 
1121 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1122   RISCVII::VLMUL LMUL = getLMUL(VT);
1123   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1124       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1125       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1126       LMUL == RISCVII::VLMUL::LMUL_1) {
1127     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1128                   "Unexpected subreg numbering");
1129     return RISCV::sub_vrm1_0 + Index;
1130   }
1131   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1132     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1133                   "Unexpected subreg numbering");
1134     return RISCV::sub_vrm2_0 + Index;
1135   }
1136   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1137     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1138                   "Unexpected subreg numbering");
1139     return RISCV::sub_vrm4_0 + Index;
1140   }
1141   llvm_unreachable("Invalid vector type.");
1142 }
1143 
1144 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1145   if (VT.getVectorElementType() == MVT::i1)
1146     return RISCV::VRRegClassID;
1147   return getRegClassIDForLMUL(getLMUL(VT));
1148 }
1149 
1150 // Attempt to decompose a subvector insert/extract between VecVT and
1151 // SubVecVT via subregister indices. Returns the subregister index that
1152 // can perform the subvector insert/extract with the given element index, as
1153 // well as the index corresponding to any leftover subvectors that must be
1154 // further inserted/extracted within the register class for SubVecVT.
1155 std::pair<unsigned, unsigned>
1156 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1157     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1158     const RISCVRegisterInfo *TRI) {
1159   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1160                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1161                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1162                 "Register classes not ordered");
1163   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1164   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1165   // Try to compose a subregister index that takes us from the incoming
1166   // LMUL>1 register class down to the outgoing one. At each step we half
1167   // the LMUL:
1168   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1169   // Note that this is not guaranteed to find a subregister index, such as
1170   // when we are extracting from one VR type to another.
1171   unsigned SubRegIdx = RISCV::NoSubRegister;
1172   for (const unsigned RCID :
1173        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1174     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1175       VecVT = VecVT.getHalfNumVectorElementsVT();
1176       bool IsHi =
1177           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1178       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1179                                             getSubregIndexByMVT(VecVT, IsHi));
1180       if (IsHi)
1181         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1182     }
1183   return {SubRegIdx, InsertExtractIdx};
1184 }
1185 
1186 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1187 // stores for those types.
1188 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1189   return !Subtarget.useRVVForFixedLengthVectors() ||
1190          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1191 }
1192 
1193 static bool useRVVForFixedLengthVectorVT(MVT VT,
1194                                          const RISCVSubtarget &Subtarget) {
1195   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1196   if (!Subtarget.useRVVForFixedLengthVectors())
1197     return false;
1198 
1199   // We only support a set of vector types with a consistent maximum fixed size
1200   // across all supported vector element types to avoid legalization issues.
1201   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1202   // fixed-length vector type we support is 1024 bytes.
1203   if (VT.getFixedSizeInBits() > 1024 * 8)
1204     return false;
1205 
1206   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1207 
1208   MVT EltVT = VT.getVectorElementType();
1209 
1210   // Don't use RVV for vectors we cannot scalarize if required.
1211   switch (EltVT.SimpleTy) {
1212   // i1 is supported but has different rules.
1213   default:
1214     return false;
1215   case MVT::i1:
1216     // Masks can only use a single register.
1217     if (VT.getVectorNumElements() > MinVLen)
1218       return false;
1219     MinVLen /= 8;
1220     break;
1221   case MVT::i8:
1222   case MVT::i16:
1223   case MVT::i32:
1224   case MVT::i64:
1225     break;
1226   case MVT::f16:
1227     if (!Subtarget.hasStdExtZfh())
1228       return false;
1229     break;
1230   case MVT::f32:
1231     if (!Subtarget.hasStdExtF())
1232       return false;
1233     break;
1234   case MVT::f64:
1235     if (!Subtarget.hasStdExtD())
1236       return false;
1237     break;
1238   }
1239 
1240   // Reject elements larger than ELEN.
1241   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1242     return false;
1243 
1244   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1245   // Don't use RVV for types that don't fit.
1246   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1247     return false;
1248 
1249   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1250   // the base fixed length RVV support in place.
1251   if (!VT.isPow2VectorType())
1252     return false;
1253 
1254   return true;
1255 }
1256 
1257 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1258   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1259 }
1260 
1261 // Return the largest legal scalable vector type that matches VT's element type.
1262 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1263                                             const RISCVSubtarget &Subtarget) {
1264   // This may be called before legal types are setup.
1265   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1266           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1267          "Expected legal fixed length vector!");
1268 
1269   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1270   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1271 
1272   MVT EltVT = VT.getVectorElementType();
1273   switch (EltVT.SimpleTy) {
1274   default:
1275     llvm_unreachable("unexpected element type for RVV container");
1276   case MVT::i1:
1277   case MVT::i8:
1278   case MVT::i16:
1279   case MVT::i32:
1280   case MVT::i64:
1281   case MVT::f16:
1282   case MVT::f32:
1283   case MVT::f64: {
1284     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1285     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1286     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1287     unsigned NumElts =
1288         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1289     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1290     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1291     return MVT::getScalableVectorVT(EltVT, NumElts);
1292   }
1293   }
1294 }
1295 
1296 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1297                                             const RISCVSubtarget &Subtarget) {
1298   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1299                                           Subtarget);
1300 }
1301 
1302 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1303   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1304 }
1305 
1306 // Grow V to consume an entire RVV register.
1307 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1308                                        const RISCVSubtarget &Subtarget) {
1309   assert(VT.isScalableVector() &&
1310          "Expected to convert into a scalable vector!");
1311   assert(V.getValueType().isFixedLengthVector() &&
1312          "Expected a fixed length vector operand!");
1313   SDLoc DL(V);
1314   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1315   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1316 }
1317 
1318 // Shrink V so it's just big enough to maintain a VT's worth of data.
1319 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1320                                          const RISCVSubtarget &Subtarget) {
1321   assert(VT.isFixedLengthVector() &&
1322          "Expected to convert into a fixed length vector!");
1323   assert(V.getValueType().isScalableVector() &&
1324          "Expected a scalable vector operand!");
1325   SDLoc DL(V);
1326   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1327   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1328 }
1329 
1330 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1331 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1332 // the vector type that it is contained in.
1333 static std::pair<SDValue, SDValue>
1334 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1335                 const RISCVSubtarget &Subtarget) {
1336   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1337   MVT XLenVT = Subtarget.getXLenVT();
1338   SDValue VL = VecVT.isFixedLengthVector()
1339                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1340                    : DAG.getRegister(RISCV::X0, XLenVT);
1341   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1342   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1343   return {Mask, VL};
1344 }
1345 
1346 // As above but assuming the given type is a scalable vector type.
1347 static std::pair<SDValue, SDValue>
1348 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1349                         const RISCVSubtarget &Subtarget) {
1350   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1351   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1352 }
1353 
1354 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1355 // of either is (currently) supported. This can get us into an infinite loop
1356 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1357 // as a ..., etc.
1358 // Until either (or both) of these can reliably lower any node, reporting that
1359 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1360 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1361 // which is not desirable.
1362 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1363     EVT VT, unsigned DefinedValues) const {
1364   return false;
1365 }
1366 
1367 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1368   // Only splats are currently supported.
1369   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1370     return true;
1371 
1372   return false;
1373 }
1374 
1375 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
1376   // RISCV FP-to-int conversions saturate to the destination register size, but
1377   // don't produce 0 for nan. We can use a conversion instruction and fix the
1378   // nan case with a compare and a select.
1379   SDValue Src = Op.getOperand(0);
1380 
1381   EVT DstVT = Op.getValueType();
1382   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1383 
1384   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1385   unsigned Opc;
1386   if (SatVT == DstVT)
1387     Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ;
1388   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1389     Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
1390   else
1391     return SDValue();
1392   // FIXME: Support other SatVTs by clamping before or after the conversion.
1393 
1394   SDLoc DL(Op);
1395   SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src);
1396 
1397   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1398   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1399 }
1400 
1401 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1402                                  const RISCVSubtarget &Subtarget) {
1403   MVT VT = Op.getSimpleValueType();
1404   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1405 
1406   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1407 
1408   SDLoc DL(Op);
1409   SDValue Mask, VL;
1410   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1411 
1412   unsigned Opc =
1413       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1414   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1415   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1416 }
1417 
1418 struct VIDSequence {
1419   int64_t StepNumerator;
1420   unsigned StepDenominator;
1421   int64_t Addend;
1422 };
1423 
1424 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1425 // to the (non-zero) step S and start value X. This can be then lowered as the
1426 // RVV sequence (VID * S) + X, for example.
1427 // The step S is represented as an integer numerator divided by a positive
1428 // denominator. Note that the implementation currently only identifies
1429 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1430 // cannot detect 2/3, for example.
1431 // Note that this method will also match potentially unappealing index
1432 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1433 // determine whether this is worth generating code for.
1434 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1435   unsigned NumElts = Op.getNumOperands();
1436   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1437   if (!Op.getValueType().isInteger())
1438     return None;
1439 
1440   Optional<unsigned> SeqStepDenom;
1441   Optional<int64_t> SeqStepNum, SeqAddend;
1442   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1443   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1444   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1445     // Assume undef elements match the sequence; we just have to be careful
1446     // when interpolating across them.
1447     if (Op.getOperand(Idx).isUndef())
1448       continue;
1449     // The BUILD_VECTOR must be all constants.
1450     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1451       return None;
1452 
1453     uint64_t Val = Op.getConstantOperandVal(Idx) &
1454                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1455 
1456     if (PrevElt) {
1457       // Calculate the step since the last non-undef element, and ensure
1458       // it's consistent across the entire sequence.
1459       unsigned IdxDiff = Idx - PrevElt->second;
1460       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1461 
1462       // A zero-value value difference means that we're somewhere in the middle
1463       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1464       // step change before evaluating the sequence.
1465       if (ValDiff != 0) {
1466         int64_t Remainder = ValDiff % IdxDiff;
1467         // Normalize the step if it's greater than 1.
1468         if (Remainder != ValDiff) {
1469           // The difference must cleanly divide the element span.
1470           if (Remainder != 0)
1471             return None;
1472           ValDiff /= IdxDiff;
1473           IdxDiff = 1;
1474         }
1475 
1476         if (!SeqStepNum)
1477           SeqStepNum = ValDiff;
1478         else if (ValDiff != SeqStepNum)
1479           return None;
1480 
1481         if (!SeqStepDenom)
1482           SeqStepDenom = IdxDiff;
1483         else if (IdxDiff != *SeqStepDenom)
1484           return None;
1485       }
1486     }
1487 
1488     // Record and/or check any addend.
1489     if (SeqStepNum && SeqStepDenom) {
1490       uint64_t ExpectedVal =
1491           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1492       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1493       if (!SeqAddend)
1494         SeqAddend = Addend;
1495       else if (SeqAddend != Addend)
1496         return None;
1497     }
1498 
1499     // Record this non-undef element for later.
1500     if (!PrevElt || PrevElt->first != Val)
1501       PrevElt = std::make_pair(Val, Idx);
1502   }
1503   // We need to have logged both a step and an addend for this to count as
1504   // a legal index sequence.
1505   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1506     return None;
1507 
1508   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1509 }
1510 
1511 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1512                                  const RISCVSubtarget &Subtarget) {
1513   MVT VT = Op.getSimpleValueType();
1514   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1515 
1516   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1517 
1518   SDLoc DL(Op);
1519   SDValue Mask, VL;
1520   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1521 
1522   MVT XLenVT = Subtarget.getXLenVT();
1523   unsigned NumElts = Op.getNumOperands();
1524 
1525   if (VT.getVectorElementType() == MVT::i1) {
1526     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1527       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1528       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1529     }
1530 
1531     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1532       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1533       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1534     }
1535 
1536     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1537     // scalar integer chunks whose bit-width depends on the number of mask
1538     // bits and XLEN.
1539     // First, determine the most appropriate scalar integer type to use. This
1540     // is at most XLenVT, but may be shrunk to a smaller vector element type
1541     // according to the size of the final vector - use i8 chunks rather than
1542     // XLenVT if we're producing a v8i1. This results in more consistent
1543     // codegen across RV32 and RV64.
1544     unsigned NumViaIntegerBits =
1545         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1546     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1547       // If we have to use more than one INSERT_VECTOR_ELT then this
1548       // optimization is likely to increase code size; avoid peforming it in
1549       // such a case. We can use a load from a constant pool in this case.
1550       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1551         return SDValue();
1552       // Now we can create our integer vector type. Note that it may be larger
1553       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1554       MVT IntegerViaVecVT =
1555           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1556                            divideCeil(NumElts, NumViaIntegerBits));
1557 
1558       uint64_t Bits = 0;
1559       unsigned BitPos = 0, IntegerEltIdx = 0;
1560       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1561 
1562       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1563         // Once we accumulate enough bits to fill our scalar type, insert into
1564         // our vector and clear our accumulated data.
1565         if (I != 0 && I % NumViaIntegerBits == 0) {
1566           if (NumViaIntegerBits <= 32)
1567             Bits = SignExtend64(Bits, 32);
1568           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1569           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1570                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1571           Bits = 0;
1572           BitPos = 0;
1573           IntegerEltIdx++;
1574         }
1575         SDValue V = Op.getOperand(I);
1576         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1577         Bits |= ((uint64_t)BitValue << BitPos);
1578       }
1579 
1580       // Insert the (remaining) scalar value into position in our integer
1581       // vector type.
1582       if (NumViaIntegerBits <= 32)
1583         Bits = SignExtend64(Bits, 32);
1584       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1585       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1586                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1587 
1588       if (NumElts < NumViaIntegerBits) {
1589         // If we're producing a smaller vector than our minimum legal integer
1590         // type, bitcast to the equivalent (known-legal) mask type, and extract
1591         // our final mask.
1592         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1593         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1594         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1595                           DAG.getConstant(0, DL, XLenVT));
1596       } else {
1597         // Else we must have produced an integer type with the same size as the
1598         // mask type; bitcast for the final result.
1599         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1600         Vec = DAG.getBitcast(VT, Vec);
1601       }
1602 
1603       return Vec;
1604     }
1605 
1606     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1607     // vector type, we have a legal equivalently-sized i8 type, so we can use
1608     // that.
1609     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1610     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1611 
1612     SDValue WideVec;
1613     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1614       // For a splat, perform a scalar truncate before creating the wider
1615       // vector.
1616       assert(Splat.getValueType() == XLenVT &&
1617              "Unexpected type for i1 splat value");
1618       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1619                           DAG.getConstant(1, DL, XLenVT));
1620       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1621     } else {
1622       SmallVector<SDValue, 8> Ops(Op->op_values());
1623       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1624       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1625       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1626     }
1627 
1628     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1629   }
1630 
1631   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1632     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1633                                         : RISCVISD::VMV_V_X_VL;
1634     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1635     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1636   }
1637 
1638   // Try and match index sequences, which we can lower to the vid instruction
1639   // with optional modifications. An all-undef vector is matched by
1640   // getSplatValue, above.
1641   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1642     int64_t StepNumerator = SimpleVID->StepNumerator;
1643     unsigned StepDenominator = SimpleVID->StepDenominator;
1644     int64_t Addend = SimpleVID->Addend;
1645     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1646     // threshold since it's the immediate value many RVV instructions accept.
1647     if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) &&
1648         isInt<5>(Addend)) {
1649       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1650       // Convert right out of the scalable type so we can use standard ISD
1651       // nodes for the rest of the computation. If we used scalable types with
1652       // these, we'd lose the fixed-length vector info and generate worse
1653       // vsetvli code.
1654       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1655       assert(StepNumerator != 0 && "Invalid step");
1656       bool Negate = false;
1657       if (StepNumerator != 1) {
1658         int64_t SplatStepVal = StepNumerator;
1659         unsigned Opcode = ISD::MUL;
1660         if (isPowerOf2_64(std::abs(StepNumerator))) {
1661           Negate = StepNumerator < 0;
1662           Opcode = ISD::SHL;
1663           SplatStepVal = Log2_64(std::abs(StepNumerator));
1664         }
1665         SDValue SplatStep = DAG.getSplatVector(
1666             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1667         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1668       }
1669       if (StepDenominator != 1) {
1670         SDValue SplatStep = DAG.getSplatVector(
1671             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
1672         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
1673       }
1674       if (Addend != 0 || Negate) {
1675         SDValue SplatAddend =
1676             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1677         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1678       }
1679       return VID;
1680     }
1681   }
1682 
1683   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1684   // when re-interpreted as a vector with a larger element type. For example,
1685   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1686   // could be instead splat as
1687   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1688   // TODO: This optimization could also work on non-constant splats, but it
1689   // would require bit-manipulation instructions to construct the splat value.
1690   SmallVector<SDValue> Sequence;
1691   unsigned EltBitSize = VT.getScalarSizeInBits();
1692   const auto *BV = cast<BuildVectorSDNode>(Op);
1693   if (VT.isInteger() && EltBitSize < 64 &&
1694       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1695       BV->getRepeatedSequence(Sequence) &&
1696       (Sequence.size() * EltBitSize) <= 64) {
1697     unsigned SeqLen = Sequence.size();
1698     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1699     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1700     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1701             ViaIntVT == MVT::i64) &&
1702            "Unexpected sequence type");
1703 
1704     unsigned EltIdx = 0;
1705     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1706     uint64_t SplatValue = 0;
1707     // Construct the amalgamated value which can be splatted as this larger
1708     // vector type.
1709     for (const auto &SeqV : Sequence) {
1710       if (!SeqV.isUndef())
1711         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1712                        << (EltIdx * EltBitSize));
1713       EltIdx++;
1714     }
1715 
1716     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1717     // achieve better constant materializion.
1718     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1719       SplatValue = SignExtend64(SplatValue, 32);
1720 
1721     // Since we can't introduce illegal i64 types at this stage, we can only
1722     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1723     // way we can use RVV instructions to splat.
1724     assert((ViaIntVT.bitsLE(XLenVT) ||
1725             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1726            "Unexpected bitcast sequence");
1727     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1728       SDValue ViaVL =
1729           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1730       MVT ViaContainerVT =
1731           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1732       SDValue Splat =
1733           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1734                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1735       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1736       return DAG.getBitcast(VT, Splat);
1737     }
1738   }
1739 
1740   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1741   // which constitute a large proportion of the elements. In such cases we can
1742   // splat a vector with the dominant element and make up the shortfall with
1743   // INSERT_VECTOR_ELTs.
1744   // Note that this includes vectors of 2 elements by association. The
1745   // upper-most element is the "dominant" one, allowing us to use a splat to
1746   // "insert" the upper element, and an insert of the lower element at position
1747   // 0, which improves codegen.
1748   SDValue DominantValue;
1749   unsigned MostCommonCount = 0;
1750   DenseMap<SDValue, unsigned> ValueCounts;
1751   unsigned NumUndefElts =
1752       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1753 
1754   // Track the number of scalar loads we know we'd be inserting, estimated as
1755   // any non-zero floating-point constant. Other kinds of element are either
1756   // already in registers or are materialized on demand. The threshold at which
1757   // a vector load is more desirable than several scalar materializion and
1758   // vector-insertion instructions is not known.
1759   unsigned NumScalarLoads = 0;
1760 
1761   for (SDValue V : Op->op_values()) {
1762     if (V.isUndef())
1763       continue;
1764 
1765     ValueCounts.insert(std::make_pair(V, 0));
1766     unsigned &Count = ValueCounts[V];
1767 
1768     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
1769       NumScalarLoads += !CFP->isExactlyValue(+0.0);
1770 
1771     // Is this value dominant? In case of a tie, prefer the highest element as
1772     // it's cheaper to insert near the beginning of a vector than it is at the
1773     // end.
1774     if (++Count >= MostCommonCount) {
1775       DominantValue = V;
1776       MostCommonCount = Count;
1777     }
1778   }
1779 
1780   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1781   unsigned NumDefElts = NumElts - NumUndefElts;
1782   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1783 
1784   // Don't perform this optimization when optimizing for size, since
1785   // materializing elements and inserting them tends to cause code bloat.
1786   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
1787       ((MostCommonCount > DominantValueCountThreshold) ||
1788        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1789     // Start by splatting the most common element.
1790     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1791 
1792     DenseSet<SDValue> Processed{DominantValue};
1793     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1794     for (const auto &OpIdx : enumerate(Op->ops())) {
1795       const SDValue &V = OpIdx.value();
1796       if (V.isUndef() || !Processed.insert(V).second)
1797         continue;
1798       if (ValueCounts[V] == 1) {
1799         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1800                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1801       } else {
1802         // Blend in all instances of this value using a VSELECT, using a
1803         // mask where each bit signals whether that element is the one
1804         // we're after.
1805         SmallVector<SDValue> Ops;
1806         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1807           return DAG.getConstant(V == V1, DL, XLenVT);
1808         });
1809         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1810                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1811                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1812       }
1813     }
1814 
1815     return Vec;
1816   }
1817 
1818   return SDValue();
1819 }
1820 
1821 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1822                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1823   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1824     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1825     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1826     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1827     // node in order to try and match RVV vector/scalar instructions.
1828     if ((LoC >> 31) == HiC)
1829       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1830   }
1831 
1832   // Fall back to a stack store and stride x0 vector load.
1833   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1834 }
1835 
1836 // Called by type legalization to handle splat of i64 on RV32.
1837 // FIXME: We can optimize this when the type has sign or zero bits in one
1838 // of the halves.
1839 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1840                                    SDValue VL, SelectionDAG &DAG) {
1841   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1842   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1843                            DAG.getConstant(0, DL, MVT::i32));
1844   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1845                            DAG.getConstant(1, DL, MVT::i32));
1846   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1847 }
1848 
1849 // This function lowers a splat of a scalar operand Splat with the vector
1850 // length VL. It ensures the final sequence is type legal, which is useful when
1851 // lowering a splat after type legalization.
1852 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1853                                 SelectionDAG &DAG,
1854                                 const RISCVSubtarget &Subtarget) {
1855   if (VT.isFloatingPoint())
1856     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1857 
1858   MVT XLenVT = Subtarget.getXLenVT();
1859 
1860   // Simplest case is that the operand needs to be promoted to XLenVT.
1861   if (Scalar.getValueType().bitsLE(XLenVT)) {
1862     // If the operand is a constant, sign extend to increase our chances
1863     // of being able to use a .vi instruction. ANY_EXTEND would become a
1864     // a zero extend and the simm5 check in isel would fail.
1865     // FIXME: Should we ignore the upper bits in isel instead?
1866     unsigned ExtOpc =
1867         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1868     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1869     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1870   }
1871 
1872   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1873          "Unexpected scalar for splat lowering!");
1874 
1875   // Otherwise use the more complicated splatting algorithm.
1876   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1877 }
1878 
1879 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1880                                    const RISCVSubtarget &Subtarget) {
1881   SDValue V1 = Op.getOperand(0);
1882   SDValue V2 = Op.getOperand(1);
1883   SDLoc DL(Op);
1884   MVT XLenVT = Subtarget.getXLenVT();
1885   MVT VT = Op.getSimpleValueType();
1886   unsigned NumElts = VT.getVectorNumElements();
1887   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1888 
1889   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1890 
1891   SDValue TrueMask, VL;
1892   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1893 
1894   if (SVN->isSplat()) {
1895     const int Lane = SVN->getSplatIndex();
1896     if (Lane >= 0) {
1897       MVT SVT = VT.getVectorElementType();
1898 
1899       // Turn splatted vector load into a strided load with an X0 stride.
1900       SDValue V = V1;
1901       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1902       // with undef.
1903       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1904       int Offset = Lane;
1905       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1906         int OpElements =
1907             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1908         V = V.getOperand(Offset / OpElements);
1909         Offset %= OpElements;
1910       }
1911 
1912       // We need to ensure the load isn't atomic or volatile.
1913       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1914         auto *Ld = cast<LoadSDNode>(V);
1915         Offset *= SVT.getStoreSize();
1916         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1917                                                    TypeSize::Fixed(Offset), DL);
1918 
1919         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1920         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1921           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1922           SDValue IntID =
1923               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1924           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1925                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1926           SDValue NewLoad = DAG.getMemIntrinsicNode(
1927               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1928               DAG.getMachineFunction().getMachineMemOperand(
1929                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1930           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1931           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1932         }
1933 
1934         // Otherwise use a scalar load and splat. This will give the best
1935         // opportunity to fold a splat into the operation. ISel can turn it into
1936         // the x0 strided load if we aren't able to fold away the select.
1937         if (SVT.isFloatingPoint())
1938           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1939                           Ld->getPointerInfo().getWithOffset(Offset),
1940                           Ld->getOriginalAlign(),
1941                           Ld->getMemOperand()->getFlags());
1942         else
1943           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1944                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1945                              Ld->getOriginalAlign(),
1946                              Ld->getMemOperand()->getFlags());
1947         DAG.makeEquivalentMemoryOrdering(Ld, V);
1948 
1949         unsigned Opc =
1950             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1951         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1952         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1953       }
1954 
1955       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1956       assert(Lane < (int)NumElts && "Unexpected lane!");
1957       SDValue Gather =
1958           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1959                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1960       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1961     }
1962   }
1963 
1964   // Detect shuffles which can be re-expressed as vector selects; these are
1965   // shuffles in which each element in the destination is taken from an element
1966   // at the corresponding index in either source vectors.
1967   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1968     int MaskIndex = MaskIdx.value();
1969     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1970   });
1971 
1972   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1973 
1974   SmallVector<SDValue> MaskVals;
1975   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1976   // merged with a second vrgather.
1977   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1978 
1979   // By default we preserve the original operand order, and use a mask to
1980   // select LHS as true and RHS as false. However, since RVV vector selects may
1981   // feature splats but only on the LHS, we may choose to invert our mask and
1982   // instead select between RHS and LHS.
1983   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1984   bool InvertMask = IsSelect == SwapOps;
1985 
1986   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
1987   // half.
1988   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
1989 
1990   // Now construct the mask that will be used by the vselect or blended
1991   // vrgather operation. For vrgathers, construct the appropriate indices into
1992   // each vector.
1993   for (int MaskIndex : SVN->getMask()) {
1994     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1995     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1996     if (!IsSelect) {
1997       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
1998       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
1999                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2000                                      : DAG.getUNDEF(XLenVT));
2001       GatherIndicesRHS.push_back(
2002           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2003                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2004       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2005         ++LHSIndexCounts[MaskIndex];
2006       if (!IsLHSOrUndefIndex)
2007         ++RHSIndexCounts[MaskIndex - NumElts];
2008     }
2009   }
2010 
2011   if (SwapOps) {
2012     std::swap(V1, V2);
2013     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2014   }
2015 
2016   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2017   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2018   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2019 
2020   if (IsSelect)
2021     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2022 
2023   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2024     // On such a large vector we're unable to use i8 as the index type.
2025     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2026     // may involve vector splitting if we're already at LMUL=8, or our
2027     // user-supplied maximum fixed-length LMUL.
2028     return SDValue();
2029   }
2030 
2031   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2032   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2033   MVT IndexVT = VT.changeTypeToInteger();
2034   // Since we can't introduce illegal index types at this stage, use i16 and
2035   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2036   // than XLenVT.
2037   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2038     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2039     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2040   }
2041 
2042   MVT IndexContainerVT =
2043       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2044 
2045   SDValue Gather;
2046   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2047   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2048   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2049     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2050   } else {
2051     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2052     // If only one index is used, we can use a "splat" vrgather.
2053     // TODO: We can splat the most-common index and fix-up any stragglers, if
2054     // that's beneficial.
2055     if (LHSIndexCounts.size() == 1) {
2056       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2057       Gather =
2058           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2059                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2060     } else {
2061       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2062       LHSIndices =
2063           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2064 
2065       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2066                            TrueMask, VL);
2067     }
2068   }
2069 
2070   // If a second vector operand is used by this shuffle, blend it in with an
2071   // additional vrgather.
2072   if (!V2.isUndef()) {
2073     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2074     // If only one index is used, we can use a "splat" vrgather.
2075     // TODO: We can splat the most-common index and fix-up any stragglers, if
2076     // that's beneficial.
2077     if (RHSIndexCounts.size() == 1) {
2078       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2079       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2080                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2081     } else {
2082       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2083       RHSIndices =
2084           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2085       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2086                        VL);
2087     }
2088 
2089     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2090     SelectMask =
2091         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2092 
2093     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2094                          Gather, VL);
2095   }
2096 
2097   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2098 }
2099 
2100 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2101                                      SDLoc DL, SelectionDAG &DAG,
2102                                      const RISCVSubtarget &Subtarget) {
2103   if (VT.isScalableVector())
2104     return DAG.getFPExtendOrRound(Op, DL, VT);
2105   assert(VT.isFixedLengthVector() &&
2106          "Unexpected value type for RVV FP extend/round lowering");
2107   SDValue Mask, VL;
2108   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2109   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2110                         ? RISCVISD::FP_EXTEND_VL
2111                         : RISCVISD::FP_ROUND_VL;
2112   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2113 }
2114 
2115 // While RVV has alignment restrictions, we should always be able to load as a
2116 // legal equivalently-sized byte-typed vector instead. This method is
2117 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2118 // the load is already correctly-aligned, it returns SDValue().
2119 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2120                                                     SelectionDAG &DAG) const {
2121   auto *Load = cast<LoadSDNode>(Op);
2122   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2123 
2124   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2125                                      Load->getMemoryVT(),
2126                                      *Load->getMemOperand()))
2127     return SDValue();
2128 
2129   SDLoc DL(Op);
2130   MVT VT = Op.getSimpleValueType();
2131   unsigned EltSizeBits = VT.getScalarSizeInBits();
2132   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2133          "Unexpected unaligned RVV load type");
2134   MVT NewVT =
2135       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2136   assert(NewVT.isValid() &&
2137          "Expecting equally-sized RVV vector types to be legal");
2138   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2139                           Load->getPointerInfo(), Load->getOriginalAlign(),
2140                           Load->getMemOperand()->getFlags());
2141   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2142 }
2143 
2144 // While RVV has alignment restrictions, we should always be able to store as a
2145 // legal equivalently-sized byte-typed vector instead. This method is
2146 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2147 // returns SDValue() if the store is already correctly aligned.
2148 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2149                                                      SelectionDAG &DAG) const {
2150   auto *Store = cast<StoreSDNode>(Op);
2151   assert(Store && Store->getValue().getValueType().isVector() &&
2152          "Expected vector store");
2153 
2154   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2155                                      Store->getMemoryVT(),
2156                                      *Store->getMemOperand()))
2157     return SDValue();
2158 
2159   SDLoc DL(Op);
2160   SDValue StoredVal = Store->getValue();
2161   MVT VT = StoredVal.getSimpleValueType();
2162   unsigned EltSizeBits = VT.getScalarSizeInBits();
2163   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2164          "Unexpected unaligned RVV store type");
2165   MVT NewVT =
2166       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2167   assert(NewVT.isValid() &&
2168          "Expecting equally-sized RVV vector types to be legal");
2169   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2170   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2171                       Store->getPointerInfo(), Store->getOriginalAlign(),
2172                       Store->getMemOperand()->getFlags());
2173 }
2174 
2175 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2176                                             SelectionDAG &DAG) const {
2177   switch (Op.getOpcode()) {
2178   default:
2179     report_fatal_error("unimplemented operand");
2180   case ISD::GlobalAddress:
2181     return lowerGlobalAddress(Op, DAG);
2182   case ISD::BlockAddress:
2183     return lowerBlockAddress(Op, DAG);
2184   case ISD::ConstantPool:
2185     return lowerConstantPool(Op, DAG);
2186   case ISD::JumpTable:
2187     return lowerJumpTable(Op, DAG);
2188   case ISD::GlobalTLSAddress:
2189     return lowerGlobalTLSAddress(Op, DAG);
2190   case ISD::SELECT:
2191     return lowerSELECT(Op, DAG);
2192   case ISD::BRCOND:
2193     return lowerBRCOND(Op, DAG);
2194   case ISD::VASTART:
2195     return lowerVASTART(Op, DAG);
2196   case ISD::FRAMEADDR:
2197     return lowerFRAMEADDR(Op, DAG);
2198   case ISD::RETURNADDR:
2199     return lowerRETURNADDR(Op, DAG);
2200   case ISD::SHL_PARTS:
2201     return lowerShiftLeftParts(Op, DAG);
2202   case ISD::SRA_PARTS:
2203     return lowerShiftRightParts(Op, DAG, true);
2204   case ISD::SRL_PARTS:
2205     return lowerShiftRightParts(Op, DAG, false);
2206   case ISD::BITCAST: {
2207     SDLoc DL(Op);
2208     EVT VT = Op.getValueType();
2209     SDValue Op0 = Op.getOperand(0);
2210     EVT Op0VT = Op0.getValueType();
2211     MVT XLenVT = Subtarget.getXLenVT();
2212     if (VT.isFixedLengthVector()) {
2213       // We can handle fixed length vector bitcasts with a simple replacement
2214       // in isel.
2215       if (Op0VT.isFixedLengthVector())
2216         return Op;
2217       // When bitcasting from scalar to fixed-length vector, insert the scalar
2218       // into a one-element vector of the result type, and perform a vector
2219       // bitcast.
2220       if (!Op0VT.isVector()) {
2221         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2222         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2223                                               DAG.getUNDEF(BVT), Op0,
2224                                               DAG.getConstant(0, DL, XLenVT)));
2225       }
2226       return SDValue();
2227     }
2228     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2229     // thus: bitcast the vector to a one-element vector type whose element type
2230     // is the same as the result type, and extract the first element.
2231     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2232       LLVMContext &Context = *DAG.getContext();
2233       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2234       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2235                          DAG.getConstant(0, DL, XLenVT));
2236     }
2237     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2238       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2239       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2240       return FPConv;
2241     }
2242     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2243         Subtarget.hasStdExtF()) {
2244       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2245       SDValue FPConv =
2246           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2247       return FPConv;
2248     }
2249     return SDValue();
2250   }
2251   case ISD::INTRINSIC_WO_CHAIN:
2252     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2253   case ISD::INTRINSIC_W_CHAIN:
2254     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2255   case ISD::BSWAP:
2256   case ISD::BITREVERSE: {
2257     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2258     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2259     MVT VT = Op.getSimpleValueType();
2260     SDLoc DL(Op);
2261     // Start with the maximum immediate value which is the bitwidth - 1.
2262     unsigned Imm = VT.getSizeInBits() - 1;
2263     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2264     if (Op.getOpcode() == ISD::BSWAP)
2265       Imm &= ~0x7U;
2266     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2267                        DAG.getConstant(Imm, DL, VT));
2268   }
2269   case ISD::FSHL:
2270   case ISD::FSHR: {
2271     MVT VT = Op.getSimpleValueType();
2272     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2273     SDLoc DL(Op);
2274     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2275       return Op;
2276     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2277     // use log(XLen) bits. Mask the shift amount accordingly.
2278     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2279     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2280                                 DAG.getConstant(ShAmtWidth, DL, VT));
2281     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2282     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2283   }
2284   case ISD::TRUNCATE: {
2285     SDLoc DL(Op);
2286     MVT VT = Op.getSimpleValueType();
2287     // Only custom-lower vector truncates
2288     if (!VT.isVector())
2289       return Op;
2290 
2291     // Truncates to mask types are handled differently
2292     if (VT.getVectorElementType() == MVT::i1)
2293       return lowerVectorMaskTrunc(Op, DAG);
2294 
2295     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2296     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2297     // truncate by one power of two at a time.
2298     MVT DstEltVT = VT.getVectorElementType();
2299 
2300     SDValue Src = Op.getOperand(0);
2301     MVT SrcVT = Src.getSimpleValueType();
2302     MVT SrcEltVT = SrcVT.getVectorElementType();
2303 
2304     assert(DstEltVT.bitsLT(SrcEltVT) &&
2305            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2306            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2307            "Unexpected vector truncate lowering");
2308 
2309     MVT ContainerVT = SrcVT;
2310     if (SrcVT.isFixedLengthVector()) {
2311       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2312       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2313     }
2314 
2315     SDValue Result = Src;
2316     SDValue Mask, VL;
2317     std::tie(Mask, VL) =
2318         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2319     LLVMContext &Context = *DAG.getContext();
2320     const ElementCount Count = ContainerVT.getVectorElementCount();
2321     do {
2322       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2323       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2324       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2325                            Mask, VL);
2326     } while (SrcEltVT != DstEltVT);
2327 
2328     if (SrcVT.isFixedLengthVector())
2329       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2330 
2331     return Result;
2332   }
2333   case ISD::ANY_EXTEND:
2334   case ISD::ZERO_EXTEND:
2335     if (Op.getOperand(0).getValueType().isVector() &&
2336         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2337       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2338     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2339   case ISD::SIGN_EXTEND:
2340     if (Op.getOperand(0).getValueType().isVector() &&
2341         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2342       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2343     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2344   case ISD::SPLAT_VECTOR_PARTS:
2345     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2346   case ISD::INSERT_VECTOR_ELT:
2347     return lowerINSERT_VECTOR_ELT(Op, DAG);
2348   case ISD::EXTRACT_VECTOR_ELT:
2349     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2350   case ISD::VSCALE: {
2351     MVT VT = Op.getSimpleValueType();
2352     SDLoc DL(Op);
2353     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2354     // We define our scalable vector types for lmul=1 to use a 64 bit known
2355     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2356     // vscale as VLENB / 8.
2357     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2358     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2359       // We assume VLENB is a multiple of 8. We manually choose the best shift
2360       // here because SimplifyDemandedBits isn't always able to simplify it.
2361       uint64_t Val = Op.getConstantOperandVal(0);
2362       if (isPowerOf2_64(Val)) {
2363         uint64_t Log2 = Log2_64(Val);
2364         if (Log2 < 3)
2365           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2366                              DAG.getConstant(3 - Log2, DL, VT));
2367         if (Log2 > 3)
2368           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2369                              DAG.getConstant(Log2 - 3, DL, VT));
2370         return VLENB;
2371       }
2372       // If the multiplier is a multiple of 8, scale it down to avoid needing
2373       // to shift the VLENB value.
2374       if ((Val % 8) == 0)
2375         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2376                            DAG.getConstant(Val / 8, DL, VT));
2377     }
2378 
2379     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2380                                  DAG.getConstant(3, DL, VT));
2381     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2382   }
2383   case ISD::FP_EXTEND: {
2384     // RVV can only do fp_extend to types double the size as the source. We
2385     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2386     // via f32.
2387     SDLoc DL(Op);
2388     MVT VT = Op.getSimpleValueType();
2389     SDValue Src = Op.getOperand(0);
2390     MVT SrcVT = Src.getSimpleValueType();
2391 
2392     // Prepare any fixed-length vector operands.
2393     MVT ContainerVT = VT;
2394     if (SrcVT.isFixedLengthVector()) {
2395       ContainerVT = getContainerForFixedLengthVector(VT);
2396       MVT SrcContainerVT =
2397           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2398       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2399     }
2400 
2401     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2402         SrcVT.getVectorElementType() != MVT::f16) {
2403       // For scalable vectors, we only need to close the gap between
2404       // vXf16->vXf64.
2405       if (!VT.isFixedLengthVector())
2406         return Op;
2407       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2408       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2409       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2410     }
2411 
2412     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2413     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2414     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2415         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2416 
2417     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2418                                            DL, DAG, Subtarget);
2419     if (VT.isFixedLengthVector())
2420       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2421     return Extend;
2422   }
2423   case ISD::FP_ROUND: {
2424     // RVV can only do fp_round to types half the size as the source. We
2425     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2426     // conversion instruction.
2427     SDLoc DL(Op);
2428     MVT VT = Op.getSimpleValueType();
2429     SDValue Src = Op.getOperand(0);
2430     MVT SrcVT = Src.getSimpleValueType();
2431 
2432     // Prepare any fixed-length vector operands.
2433     MVT ContainerVT = VT;
2434     if (VT.isFixedLengthVector()) {
2435       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2436       ContainerVT =
2437           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2438       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2439     }
2440 
2441     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2442         SrcVT.getVectorElementType() != MVT::f64) {
2443       // For scalable vectors, we only need to close the gap between
2444       // vXf64<->vXf16.
2445       if (!VT.isFixedLengthVector())
2446         return Op;
2447       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2448       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2449       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2450     }
2451 
2452     SDValue Mask, VL;
2453     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2454 
2455     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2456     SDValue IntermediateRound =
2457         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2458     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2459                                           DL, DAG, Subtarget);
2460 
2461     if (VT.isFixedLengthVector())
2462       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2463     return Round;
2464   }
2465   case ISD::FP_TO_SINT:
2466   case ISD::FP_TO_UINT:
2467   case ISD::SINT_TO_FP:
2468   case ISD::UINT_TO_FP: {
2469     // RVV can only do fp<->int conversions to types half/double the size as
2470     // the source. We custom-lower any conversions that do two hops into
2471     // sequences.
2472     MVT VT = Op.getSimpleValueType();
2473     if (!VT.isVector())
2474       return Op;
2475     SDLoc DL(Op);
2476     SDValue Src = Op.getOperand(0);
2477     MVT EltVT = VT.getVectorElementType();
2478     MVT SrcVT = Src.getSimpleValueType();
2479     MVT SrcEltVT = SrcVT.getVectorElementType();
2480     unsigned EltSize = EltVT.getSizeInBits();
2481     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2482     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2483            "Unexpected vector element types");
2484 
2485     bool IsInt2FP = SrcEltVT.isInteger();
2486     // Widening conversions
2487     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2488       if (IsInt2FP) {
2489         // Do a regular integer sign/zero extension then convert to float.
2490         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2491                                       VT.getVectorElementCount());
2492         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2493                                  ? ISD::ZERO_EXTEND
2494                                  : ISD::SIGN_EXTEND;
2495         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2496         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2497       }
2498       // FP2Int
2499       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2500       // Do one doubling fp_extend then complete the operation by converting
2501       // to int.
2502       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2503       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2504       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2505     }
2506 
2507     // Narrowing conversions
2508     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2509       if (IsInt2FP) {
2510         // One narrowing int_to_fp, then an fp_round.
2511         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2512         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2513         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2514         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2515       }
2516       // FP2Int
2517       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2518       // representable by the integer, the result is poison.
2519       MVT IVecVT =
2520           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2521                            VT.getVectorElementCount());
2522       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2523       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2524     }
2525 
2526     // Scalable vectors can exit here. Patterns will handle equally-sized
2527     // conversions halving/doubling ones.
2528     if (!VT.isFixedLengthVector())
2529       return Op;
2530 
2531     // For fixed-length vectors we lower to a custom "VL" node.
2532     unsigned RVVOpc = 0;
2533     switch (Op.getOpcode()) {
2534     default:
2535       llvm_unreachable("Impossible opcode");
2536     case ISD::FP_TO_SINT:
2537       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2538       break;
2539     case ISD::FP_TO_UINT:
2540       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2541       break;
2542     case ISD::SINT_TO_FP:
2543       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2544       break;
2545     case ISD::UINT_TO_FP:
2546       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2547       break;
2548     }
2549 
2550     MVT ContainerVT, SrcContainerVT;
2551     // Derive the reference container type from the larger vector type.
2552     if (SrcEltSize > EltSize) {
2553       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2554       ContainerVT =
2555           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2556     } else {
2557       ContainerVT = getContainerForFixedLengthVector(VT);
2558       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2559     }
2560 
2561     SDValue Mask, VL;
2562     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2563 
2564     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2565     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2566     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2567   }
2568   case ISD::FP_TO_SINT_SAT:
2569   case ISD::FP_TO_UINT_SAT:
2570     return lowerFP_TO_INT_SAT(Op, DAG);
2571   case ISD::VECREDUCE_ADD:
2572   case ISD::VECREDUCE_UMAX:
2573   case ISD::VECREDUCE_SMAX:
2574   case ISD::VECREDUCE_UMIN:
2575   case ISD::VECREDUCE_SMIN:
2576     return lowerVECREDUCE(Op, DAG);
2577   case ISD::VECREDUCE_AND:
2578   case ISD::VECREDUCE_OR:
2579   case ISD::VECREDUCE_XOR:
2580     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2581       return lowerVectorMaskVECREDUCE(Op, DAG);
2582     return lowerVECREDUCE(Op, DAG);
2583   case ISD::VECREDUCE_FADD:
2584   case ISD::VECREDUCE_SEQ_FADD:
2585   case ISD::VECREDUCE_FMIN:
2586   case ISD::VECREDUCE_FMAX:
2587     return lowerFPVECREDUCE(Op, DAG);
2588   case ISD::INSERT_SUBVECTOR:
2589     return lowerINSERT_SUBVECTOR(Op, DAG);
2590   case ISD::EXTRACT_SUBVECTOR:
2591     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2592   case ISD::STEP_VECTOR:
2593     return lowerSTEP_VECTOR(Op, DAG);
2594   case ISD::VECTOR_REVERSE:
2595     return lowerVECTOR_REVERSE(Op, DAG);
2596   case ISD::BUILD_VECTOR:
2597     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2598   case ISD::SPLAT_VECTOR:
2599     if (Op.getValueType().getVectorElementType() == MVT::i1)
2600       return lowerVectorMaskSplat(Op, DAG);
2601     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2602   case ISD::VECTOR_SHUFFLE:
2603     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2604   case ISD::CONCAT_VECTORS: {
2605     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2606     // better than going through the stack, as the default expansion does.
2607     SDLoc DL(Op);
2608     MVT VT = Op.getSimpleValueType();
2609     unsigned NumOpElts =
2610         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2611     SDValue Vec = DAG.getUNDEF(VT);
2612     for (const auto &OpIdx : enumerate(Op->ops()))
2613       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2614                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2615     return Vec;
2616   }
2617   case ISD::LOAD:
2618     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2619       return V;
2620     if (Op.getValueType().isFixedLengthVector())
2621       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2622     return Op;
2623   case ISD::STORE:
2624     if (auto V = expandUnalignedRVVStore(Op, DAG))
2625       return V;
2626     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2627       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2628     return Op;
2629   case ISD::MLOAD:
2630     return lowerMLOAD(Op, DAG);
2631   case ISD::MSTORE:
2632     return lowerMSTORE(Op, DAG);
2633   case ISD::SETCC:
2634     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2635   case ISD::ADD:
2636     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2637   case ISD::SUB:
2638     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2639   case ISD::MUL:
2640     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2641   case ISD::MULHS:
2642     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2643   case ISD::MULHU:
2644     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2645   case ISD::AND:
2646     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2647                                               RISCVISD::AND_VL);
2648   case ISD::OR:
2649     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2650                                               RISCVISD::OR_VL);
2651   case ISD::XOR:
2652     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2653                                               RISCVISD::XOR_VL);
2654   case ISD::SDIV:
2655     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2656   case ISD::SREM:
2657     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2658   case ISD::UDIV:
2659     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2660   case ISD::UREM:
2661     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2662   case ISD::SHL:
2663   case ISD::SRA:
2664   case ISD::SRL:
2665     if (Op.getSimpleValueType().isFixedLengthVector())
2666       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2667     // This can be called for an i32 shift amount that needs to be promoted.
2668     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2669            "Unexpected custom legalisation");
2670     return SDValue();
2671   case ISD::SADDSAT:
2672     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
2673   case ISD::UADDSAT:
2674     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
2675   case ISD::SSUBSAT:
2676     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
2677   case ISD::USUBSAT:
2678     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
2679   case ISD::FADD:
2680     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2681   case ISD::FSUB:
2682     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2683   case ISD::FMUL:
2684     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2685   case ISD::FDIV:
2686     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2687   case ISD::FNEG:
2688     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2689   case ISD::FABS:
2690     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2691   case ISD::FSQRT:
2692     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2693   case ISD::FMA:
2694     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2695   case ISD::SMIN:
2696     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2697   case ISD::SMAX:
2698     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2699   case ISD::UMIN:
2700     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2701   case ISD::UMAX:
2702     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2703   case ISD::FMINNUM:
2704     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2705   case ISD::FMAXNUM:
2706     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2707   case ISD::ABS:
2708     return lowerABS(Op, DAG);
2709   case ISD::VSELECT:
2710     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2711   case ISD::FCOPYSIGN:
2712     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2713   case ISD::MGATHER:
2714     return lowerMGATHER(Op, DAG);
2715   case ISD::MSCATTER:
2716     return lowerMSCATTER(Op, DAG);
2717   case ISD::FLT_ROUNDS_:
2718     return lowerGET_ROUNDING(Op, DAG);
2719   case ISD::SET_ROUNDING:
2720     return lowerSET_ROUNDING(Op, DAG);
2721   case ISD::VP_ADD:
2722     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2723   case ISD::VP_SUB:
2724     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2725   case ISD::VP_MUL:
2726     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2727   case ISD::VP_SDIV:
2728     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2729   case ISD::VP_UDIV:
2730     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2731   case ISD::VP_SREM:
2732     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2733   case ISD::VP_UREM:
2734     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2735   case ISD::VP_AND:
2736     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2737   case ISD::VP_OR:
2738     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2739   case ISD::VP_XOR:
2740     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2741   case ISD::VP_ASHR:
2742     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2743   case ISD::VP_LSHR:
2744     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2745   case ISD::VP_SHL:
2746     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2747   case ISD::VP_FADD:
2748     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2749   case ISD::VP_FSUB:
2750     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2751   case ISD::VP_FMUL:
2752     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2753   case ISD::VP_FDIV:
2754     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2755   }
2756 }
2757 
2758 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2759                              SelectionDAG &DAG, unsigned Flags) {
2760   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2761 }
2762 
2763 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2764                              SelectionDAG &DAG, unsigned Flags) {
2765   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2766                                    Flags);
2767 }
2768 
2769 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2770                              SelectionDAG &DAG, unsigned Flags) {
2771   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2772                                    N->getOffset(), Flags);
2773 }
2774 
2775 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2776                              SelectionDAG &DAG, unsigned Flags) {
2777   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2778 }
2779 
2780 template <class NodeTy>
2781 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2782                                      bool IsLocal) const {
2783   SDLoc DL(N);
2784   EVT Ty = getPointerTy(DAG.getDataLayout());
2785 
2786   if (isPositionIndependent()) {
2787     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2788     if (IsLocal)
2789       // Use PC-relative addressing to access the symbol. This generates the
2790       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2791       // %pcrel_lo(auipc)).
2792       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2793 
2794     // Use PC-relative addressing to access the GOT for this symbol, then load
2795     // the address from the GOT. This generates the pattern (PseudoLA sym),
2796     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2797     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2798   }
2799 
2800   switch (getTargetMachine().getCodeModel()) {
2801   default:
2802     report_fatal_error("Unsupported code model for lowering");
2803   case CodeModel::Small: {
2804     // Generate a sequence for accessing addresses within the first 2 GiB of
2805     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2806     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2807     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2808     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2809     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2810   }
2811   case CodeModel::Medium: {
2812     // Generate a sequence for accessing addresses within any 2GiB range within
2813     // the address space. This generates the pattern (PseudoLLA sym), which
2814     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2815     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2816     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2817   }
2818   }
2819 }
2820 
2821 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2822                                                 SelectionDAG &DAG) const {
2823   SDLoc DL(Op);
2824   EVT Ty = Op.getValueType();
2825   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2826   int64_t Offset = N->getOffset();
2827   MVT XLenVT = Subtarget.getXLenVT();
2828 
2829   const GlobalValue *GV = N->getGlobal();
2830   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2831   SDValue Addr = getAddr(N, DAG, IsLocal);
2832 
2833   // In order to maximise the opportunity for common subexpression elimination,
2834   // emit a separate ADD node for the global address offset instead of folding
2835   // it in the global address node. Later peephole optimisations may choose to
2836   // fold it back in when profitable.
2837   if (Offset != 0)
2838     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2839                        DAG.getConstant(Offset, DL, XLenVT));
2840   return Addr;
2841 }
2842 
2843 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2844                                                SelectionDAG &DAG) const {
2845   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2846 
2847   return getAddr(N, DAG);
2848 }
2849 
2850 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2851                                                SelectionDAG &DAG) const {
2852   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2853 
2854   return getAddr(N, DAG);
2855 }
2856 
2857 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2858                                             SelectionDAG &DAG) const {
2859   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2860 
2861   return getAddr(N, DAG);
2862 }
2863 
2864 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2865                                               SelectionDAG &DAG,
2866                                               bool UseGOT) const {
2867   SDLoc DL(N);
2868   EVT Ty = getPointerTy(DAG.getDataLayout());
2869   const GlobalValue *GV = N->getGlobal();
2870   MVT XLenVT = Subtarget.getXLenVT();
2871 
2872   if (UseGOT) {
2873     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2874     // load the address from the GOT and add the thread pointer. This generates
2875     // the pattern (PseudoLA_TLS_IE sym), which expands to
2876     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2877     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2878     SDValue Load =
2879         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2880 
2881     // Add the thread pointer.
2882     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2883     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2884   }
2885 
2886   // Generate a sequence for accessing the address relative to the thread
2887   // pointer, with the appropriate adjustment for the thread pointer offset.
2888   // This generates the pattern
2889   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2890   SDValue AddrHi =
2891       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2892   SDValue AddrAdd =
2893       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2894   SDValue AddrLo =
2895       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2896 
2897   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2898   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2899   SDValue MNAdd = SDValue(
2900       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2901       0);
2902   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2903 }
2904 
2905 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2906                                                SelectionDAG &DAG) const {
2907   SDLoc DL(N);
2908   EVT Ty = getPointerTy(DAG.getDataLayout());
2909   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2910   const GlobalValue *GV = N->getGlobal();
2911 
2912   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2913   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2914   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2915   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2916   SDValue Load =
2917       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2918 
2919   // Prepare argument list to generate call.
2920   ArgListTy Args;
2921   ArgListEntry Entry;
2922   Entry.Node = Load;
2923   Entry.Ty = CallTy;
2924   Args.push_back(Entry);
2925 
2926   // Setup call to __tls_get_addr.
2927   TargetLowering::CallLoweringInfo CLI(DAG);
2928   CLI.setDebugLoc(DL)
2929       .setChain(DAG.getEntryNode())
2930       .setLibCallee(CallingConv::C, CallTy,
2931                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2932                     std::move(Args));
2933 
2934   return LowerCallTo(CLI).first;
2935 }
2936 
2937 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2938                                                    SelectionDAG &DAG) const {
2939   SDLoc DL(Op);
2940   EVT Ty = Op.getValueType();
2941   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2942   int64_t Offset = N->getOffset();
2943   MVT XLenVT = Subtarget.getXLenVT();
2944 
2945   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2946 
2947   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2948       CallingConv::GHC)
2949     report_fatal_error("In GHC calling convention TLS is not supported");
2950 
2951   SDValue Addr;
2952   switch (Model) {
2953   case TLSModel::LocalExec:
2954     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2955     break;
2956   case TLSModel::InitialExec:
2957     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2958     break;
2959   case TLSModel::LocalDynamic:
2960   case TLSModel::GeneralDynamic:
2961     Addr = getDynamicTLSAddr(N, DAG);
2962     break;
2963   }
2964 
2965   // In order to maximise the opportunity for common subexpression elimination,
2966   // emit a separate ADD node for the global address offset instead of folding
2967   // it in the global address node. Later peephole optimisations may choose to
2968   // fold it back in when profitable.
2969   if (Offset != 0)
2970     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2971                        DAG.getConstant(Offset, DL, XLenVT));
2972   return Addr;
2973 }
2974 
2975 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2976   SDValue CondV = Op.getOperand(0);
2977   SDValue TrueV = Op.getOperand(1);
2978   SDValue FalseV = Op.getOperand(2);
2979   SDLoc DL(Op);
2980   MVT VT = Op.getSimpleValueType();
2981   MVT XLenVT = Subtarget.getXLenVT();
2982 
2983   // Lower vector SELECTs to VSELECTs by splatting the condition.
2984   if (VT.isVector()) {
2985     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
2986     SDValue CondSplat = VT.isScalableVector()
2987                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
2988                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
2989     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
2990   }
2991 
2992   // If the result type is XLenVT and CondV is the output of a SETCC node
2993   // which also operated on XLenVT inputs, then merge the SETCC node into the
2994   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2995   // compare+branch instructions. i.e.:
2996   // (select (setcc lhs, rhs, cc), truev, falsev)
2997   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2998   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2999       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3000     SDValue LHS = CondV.getOperand(0);
3001     SDValue RHS = CondV.getOperand(1);
3002     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3003     ISD::CondCode CCVal = CC->get();
3004 
3005     // Special case for a select of 2 constants that have a diffence of 1.
3006     // Normally this is done by DAGCombine, but if the select is introduced by
3007     // type legalization or op legalization, we miss it. Restricting to SETLT
3008     // case for now because that is what signed saturating add/sub need.
3009     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3010     // but we would probably want to swap the true/false values if the condition
3011     // is SETGE/SETLE to avoid an XORI.
3012     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3013         CCVal == ISD::SETLT) {
3014       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3015       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3016       if (TrueVal - 1 == FalseVal)
3017         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3018       if (TrueVal + 1 == FalseVal)
3019         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3020     }
3021 
3022     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3023 
3024     SDValue TargetCC = DAG.getCondCode(CCVal);
3025     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3026     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3027   }
3028 
3029   // Otherwise:
3030   // (select condv, truev, falsev)
3031   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3032   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3033   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3034 
3035   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3036 
3037   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3038 }
3039 
3040 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3041   SDValue CondV = Op.getOperand(1);
3042   SDLoc DL(Op);
3043   MVT XLenVT = Subtarget.getXLenVT();
3044 
3045   if (CondV.getOpcode() == ISD::SETCC &&
3046       CondV.getOperand(0).getValueType() == XLenVT) {
3047     SDValue LHS = CondV.getOperand(0);
3048     SDValue RHS = CondV.getOperand(1);
3049     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3050 
3051     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3052 
3053     SDValue TargetCC = DAG.getCondCode(CCVal);
3054     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3055                        LHS, RHS, TargetCC, Op.getOperand(2));
3056   }
3057 
3058   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3059                      CondV, DAG.getConstant(0, DL, XLenVT),
3060                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3061 }
3062 
3063 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3064   MachineFunction &MF = DAG.getMachineFunction();
3065   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3066 
3067   SDLoc DL(Op);
3068   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3069                                  getPointerTy(MF.getDataLayout()));
3070 
3071   // vastart just stores the address of the VarArgsFrameIndex slot into the
3072   // memory location argument.
3073   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3074   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3075                       MachinePointerInfo(SV));
3076 }
3077 
3078 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3079                                             SelectionDAG &DAG) const {
3080   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3081   MachineFunction &MF = DAG.getMachineFunction();
3082   MachineFrameInfo &MFI = MF.getFrameInfo();
3083   MFI.setFrameAddressIsTaken(true);
3084   Register FrameReg = RI.getFrameRegister(MF);
3085   int XLenInBytes = Subtarget.getXLen() / 8;
3086 
3087   EVT VT = Op.getValueType();
3088   SDLoc DL(Op);
3089   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3090   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3091   while (Depth--) {
3092     int Offset = -(XLenInBytes * 2);
3093     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3094                               DAG.getIntPtrConstant(Offset, DL));
3095     FrameAddr =
3096         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3097   }
3098   return FrameAddr;
3099 }
3100 
3101 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3102                                              SelectionDAG &DAG) const {
3103   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3104   MachineFunction &MF = DAG.getMachineFunction();
3105   MachineFrameInfo &MFI = MF.getFrameInfo();
3106   MFI.setReturnAddressIsTaken(true);
3107   MVT XLenVT = Subtarget.getXLenVT();
3108   int XLenInBytes = Subtarget.getXLen() / 8;
3109 
3110   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3111     return SDValue();
3112 
3113   EVT VT = Op.getValueType();
3114   SDLoc DL(Op);
3115   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3116   if (Depth) {
3117     int Off = -XLenInBytes;
3118     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3119     SDValue Offset = DAG.getConstant(Off, DL, VT);
3120     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3121                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3122                        MachinePointerInfo());
3123   }
3124 
3125   // Return the value of the return address register, marking it an implicit
3126   // live-in.
3127   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3128   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3129 }
3130 
3131 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3132                                                  SelectionDAG &DAG) const {
3133   SDLoc DL(Op);
3134   SDValue Lo = Op.getOperand(0);
3135   SDValue Hi = Op.getOperand(1);
3136   SDValue Shamt = Op.getOperand(2);
3137   EVT VT = Lo.getValueType();
3138 
3139   // if Shamt-XLEN < 0: // Shamt < XLEN
3140   //   Lo = Lo << Shamt
3141   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3142   // else:
3143   //   Lo = 0
3144   //   Hi = Lo << (Shamt-XLEN)
3145 
3146   SDValue Zero = DAG.getConstant(0, DL, VT);
3147   SDValue One = DAG.getConstant(1, DL, VT);
3148   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3149   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3150   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3151   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3152 
3153   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3154   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3155   SDValue ShiftRightLo =
3156       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3157   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3158   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3159   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3160 
3161   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3162 
3163   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3164   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3165 
3166   SDValue Parts[2] = {Lo, Hi};
3167   return DAG.getMergeValues(Parts, DL);
3168 }
3169 
3170 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3171                                                   bool IsSRA) const {
3172   SDLoc DL(Op);
3173   SDValue Lo = Op.getOperand(0);
3174   SDValue Hi = Op.getOperand(1);
3175   SDValue Shamt = Op.getOperand(2);
3176   EVT VT = Lo.getValueType();
3177 
3178   // SRA expansion:
3179   //   if Shamt-XLEN < 0: // Shamt < XLEN
3180   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3181   //     Hi = Hi >>s Shamt
3182   //   else:
3183   //     Lo = Hi >>s (Shamt-XLEN);
3184   //     Hi = Hi >>s (XLEN-1)
3185   //
3186   // SRL expansion:
3187   //   if Shamt-XLEN < 0: // Shamt < XLEN
3188   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3189   //     Hi = Hi >>u Shamt
3190   //   else:
3191   //     Lo = Hi >>u (Shamt-XLEN);
3192   //     Hi = 0;
3193 
3194   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3195 
3196   SDValue Zero = DAG.getConstant(0, DL, VT);
3197   SDValue One = DAG.getConstant(1, DL, VT);
3198   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3199   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3200   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3201   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3202 
3203   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3204   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3205   SDValue ShiftLeftHi =
3206       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3207   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3208   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3209   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3210   SDValue HiFalse =
3211       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3212 
3213   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3214 
3215   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3216   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3217 
3218   SDValue Parts[2] = {Lo, Hi};
3219   return DAG.getMergeValues(Parts, DL);
3220 }
3221 
3222 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3223 // legal equivalently-sized i8 type, so we can use that as a go-between.
3224 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3225                                                   SelectionDAG &DAG) const {
3226   SDLoc DL(Op);
3227   MVT VT = Op.getSimpleValueType();
3228   SDValue SplatVal = Op.getOperand(0);
3229   // All-zeros or all-ones splats are handled specially.
3230   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3231     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3232     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3233   }
3234   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3235     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3236     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3237   }
3238   MVT XLenVT = Subtarget.getXLenVT();
3239   assert(SplatVal.getValueType() == XLenVT &&
3240          "Unexpected type for i1 splat value");
3241   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3242   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3243                          DAG.getConstant(1, DL, XLenVT));
3244   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3245   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3246   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3247 }
3248 
3249 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3250 // illegal (currently only vXi64 RV32).
3251 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3252 // them to SPLAT_VECTOR_I64
3253 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3254                                                      SelectionDAG &DAG) const {
3255   SDLoc DL(Op);
3256   MVT VecVT = Op.getSimpleValueType();
3257   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3258          "Unexpected SPLAT_VECTOR_PARTS lowering");
3259 
3260   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3261   SDValue Lo = Op.getOperand(0);
3262   SDValue Hi = Op.getOperand(1);
3263 
3264   if (VecVT.isFixedLengthVector()) {
3265     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3266     SDLoc DL(Op);
3267     SDValue Mask, VL;
3268     std::tie(Mask, VL) =
3269         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3270 
3271     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3272     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3273   }
3274 
3275   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3276     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3277     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3278     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3279     // node in order to try and match RVV vector/scalar instructions.
3280     if ((LoC >> 31) == HiC)
3281       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3282   }
3283 
3284   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3285   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3286       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3287       Hi.getConstantOperandVal(1) == 31)
3288     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3289 
3290   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3291   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3292                      DAG.getRegister(RISCV::X0, MVT::i64));
3293 }
3294 
3295 // Custom-lower extensions from mask vectors by using a vselect either with 1
3296 // for zero/any-extension or -1 for sign-extension:
3297 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3298 // Note that any-extension is lowered identically to zero-extension.
3299 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3300                                                 int64_t ExtTrueVal) const {
3301   SDLoc DL(Op);
3302   MVT VecVT = Op.getSimpleValueType();
3303   SDValue Src = Op.getOperand(0);
3304   // Only custom-lower extensions from mask types
3305   assert(Src.getValueType().isVector() &&
3306          Src.getValueType().getVectorElementType() == MVT::i1);
3307 
3308   MVT XLenVT = Subtarget.getXLenVT();
3309   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3310   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3311 
3312   if (VecVT.isScalableVector()) {
3313     // Be careful not to introduce illegal scalar types at this stage, and be
3314     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3315     // illegal and must be expanded. Since we know that the constants are
3316     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3317     bool IsRV32E64 =
3318         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3319 
3320     if (!IsRV32E64) {
3321       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3322       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3323     } else {
3324       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3325       SplatTrueVal =
3326           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3327     }
3328 
3329     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3330   }
3331 
3332   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3333   MVT I1ContainerVT =
3334       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3335 
3336   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3337 
3338   SDValue Mask, VL;
3339   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3340 
3341   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3342   SplatTrueVal =
3343       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3344   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3345                                SplatTrueVal, SplatZero, VL);
3346 
3347   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3348 }
3349 
3350 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3351     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3352   MVT ExtVT = Op.getSimpleValueType();
3353   // Only custom-lower extensions from fixed-length vector types.
3354   if (!ExtVT.isFixedLengthVector())
3355     return Op;
3356   MVT VT = Op.getOperand(0).getSimpleValueType();
3357   // Grab the canonical container type for the extended type. Infer the smaller
3358   // type from that to ensure the same number of vector elements, as we know
3359   // the LMUL will be sufficient to hold the smaller type.
3360   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3361   // Get the extended container type manually to ensure the same number of
3362   // vector elements between source and dest.
3363   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3364                                      ContainerExtVT.getVectorElementCount());
3365 
3366   SDValue Op1 =
3367       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3368 
3369   SDLoc DL(Op);
3370   SDValue Mask, VL;
3371   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3372 
3373   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3374 
3375   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3376 }
3377 
3378 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3379 // setcc operation:
3380 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3381 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3382                                                   SelectionDAG &DAG) const {
3383   SDLoc DL(Op);
3384   EVT MaskVT = Op.getValueType();
3385   // Only expect to custom-lower truncations to mask types
3386   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3387          "Unexpected type for vector mask lowering");
3388   SDValue Src = Op.getOperand(0);
3389   MVT VecVT = Src.getSimpleValueType();
3390 
3391   // If this is a fixed vector, we need to convert it to a scalable vector.
3392   MVT ContainerVT = VecVT;
3393   if (VecVT.isFixedLengthVector()) {
3394     ContainerVT = getContainerForFixedLengthVector(VecVT);
3395     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3396   }
3397 
3398   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3399   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3400 
3401   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3402   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3403 
3404   if (VecVT.isScalableVector()) {
3405     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3406     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3407   }
3408 
3409   SDValue Mask, VL;
3410   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3411 
3412   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3413   SDValue Trunc =
3414       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3415   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3416                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3417   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3418 }
3419 
3420 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3421 // first position of a vector, and that vector is slid up to the insert index.
3422 // By limiting the active vector length to index+1 and merging with the
3423 // original vector (with an undisturbed tail policy for elements >= VL), we
3424 // achieve the desired result of leaving all elements untouched except the one
3425 // at VL-1, which is replaced with the desired value.
3426 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3427                                                     SelectionDAG &DAG) const {
3428   SDLoc DL(Op);
3429   MVT VecVT = Op.getSimpleValueType();
3430   SDValue Vec = Op.getOperand(0);
3431   SDValue Val = Op.getOperand(1);
3432   SDValue Idx = Op.getOperand(2);
3433 
3434   if (VecVT.getVectorElementType() == MVT::i1) {
3435     // FIXME: For now we just promote to an i8 vector and insert into that,
3436     // but this is probably not optimal.
3437     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3438     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3439     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3440     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3441   }
3442 
3443   MVT ContainerVT = VecVT;
3444   // If the operand is a fixed-length vector, convert to a scalable one.
3445   if (VecVT.isFixedLengthVector()) {
3446     ContainerVT = getContainerForFixedLengthVector(VecVT);
3447     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3448   }
3449 
3450   MVT XLenVT = Subtarget.getXLenVT();
3451 
3452   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3453   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3454   // Even i64-element vectors on RV32 can be lowered without scalar
3455   // legalization if the most-significant 32 bits of the value are not affected
3456   // by the sign-extension of the lower 32 bits.
3457   // TODO: We could also catch sign extensions of a 32-bit value.
3458   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3459     const auto *CVal = cast<ConstantSDNode>(Val);
3460     if (isInt<32>(CVal->getSExtValue())) {
3461       IsLegalInsert = true;
3462       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3463     }
3464   }
3465 
3466   SDValue Mask, VL;
3467   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3468 
3469   SDValue ValInVec;
3470 
3471   if (IsLegalInsert) {
3472     unsigned Opc =
3473         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3474     if (isNullConstant(Idx)) {
3475       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3476       if (!VecVT.isFixedLengthVector())
3477         return Vec;
3478       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3479     }
3480     ValInVec =
3481         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3482   } else {
3483     // On RV32, i64-element vectors must be specially handled to place the
3484     // value at element 0, by using two vslide1up instructions in sequence on
3485     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3486     // this.
3487     SDValue One = DAG.getConstant(1, DL, XLenVT);
3488     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3489     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3490     MVT I32ContainerVT =
3491         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3492     SDValue I32Mask =
3493         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3494     // Limit the active VL to two.
3495     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3496     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3497     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3498     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3499                            InsertI64VL);
3500     // First slide in the hi value, then the lo in underneath it.
3501     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3502                            ValHi, I32Mask, InsertI64VL);
3503     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3504                            ValLo, I32Mask, InsertI64VL);
3505     // Bitcast back to the right container type.
3506     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3507   }
3508 
3509   // Now that the value is in a vector, slide it into position.
3510   SDValue InsertVL =
3511       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3512   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3513                                 ValInVec, Idx, Mask, InsertVL);
3514   if (!VecVT.isFixedLengthVector())
3515     return Slideup;
3516   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3517 }
3518 
3519 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3520 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3521 // types this is done using VMV_X_S to allow us to glean information about the
3522 // sign bits of the result.
3523 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3524                                                      SelectionDAG &DAG) const {
3525   SDLoc DL(Op);
3526   SDValue Idx = Op.getOperand(1);
3527   SDValue Vec = Op.getOperand(0);
3528   EVT EltVT = Op.getValueType();
3529   MVT VecVT = Vec.getSimpleValueType();
3530   MVT XLenVT = Subtarget.getXLenVT();
3531 
3532   if (VecVT.getVectorElementType() == MVT::i1) {
3533     // FIXME: For now we just promote to an i8 vector and extract from that,
3534     // but this is probably not optimal.
3535     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3536     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3537     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3538   }
3539 
3540   // If this is a fixed vector, we need to convert it to a scalable vector.
3541   MVT ContainerVT = VecVT;
3542   if (VecVT.isFixedLengthVector()) {
3543     ContainerVT = getContainerForFixedLengthVector(VecVT);
3544     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3545   }
3546 
3547   // If the index is 0, the vector is already in the right position.
3548   if (!isNullConstant(Idx)) {
3549     // Use a VL of 1 to avoid processing more elements than we need.
3550     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3551     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3552     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3553     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3554                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3555   }
3556 
3557   if (!EltVT.isInteger()) {
3558     // Floating-point extracts are handled in TableGen.
3559     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3560                        DAG.getConstant(0, DL, XLenVT));
3561   }
3562 
3563   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3564   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3565 }
3566 
3567 // Some RVV intrinsics may claim that they want an integer operand to be
3568 // promoted or expanded.
3569 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3570                                           const RISCVSubtarget &Subtarget) {
3571   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3572           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3573          "Unexpected opcode");
3574 
3575   if (!Subtarget.hasStdExtV())
3576     return SDValue();
3577 
3578   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3579   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3580   SDLoc DL(Op);
3581 
3582   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3583       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3584   if (!II || !II->SplatOperand)
3585     return SDValue();
3586 
3587   unsigned SplatOp = II->SplatOperand + HasChain;
3588   assert(SplatOp < Op.getNumOperands());
3589 
3590   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3591   SDValue &ScalarOp = Operands[SplatOp];
3592   MVT OpVT = ScalarOp.getSimpleValueType();
3593   MVT XLenVT = Subtarget.getXLenVT();
3594 
3595   // If this isn't a scalar, or its type is XLenVT we're done.
3596   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3597     return SDValue();
3598 
3599   // Simplest case is that the operand needs to be promoted to XLenVT.
3600   if (OpVT.bitsLT(XLenVT)) {
3601     // If the operand is a constant, sign extend to increase our chances
3602     // of being able to use a .vi instruction. ANY_EXTEND would become a
3603     // a zero extend and the simm5 check in isel would fail.
3604     // FIXME: Should we ignore the upper bits in isel instead?
3605     unsigned ExtOpc =
3606         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3607     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3608     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3609   }
3610 
3611   // Use the previous operand to get the vXi64 VT. The result might be a mask
3612   // VT for compares. Using the previous operand assumes that the previous
3613   // operand will never have a smaller element size than a scalar operand and
3614   // that a widening operation never uses SEW=64.
3615   // NOTE: If this fails the below assert, we can probably just find the
3616   // element count from any operand or result and use it to construct the VT.
3617   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3618   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3619 
3620   // The more complex case is when the scalar is larger than XLenVT.
3621   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3622          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3623 
3624   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3625   // on the instruction to sign-extend since SEW>XLEN.
3626   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3627     if (isInt<32>(CVal->getSExtValue())) {
3628       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3629       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3630     }
3631   }
3632 
3633   // We need to convert the scalar to a splat vector.
3634   // FIXME: Can we implicitly truncate the scalar if it is known to
3635   // be sign extended?
3636   // VL should be the last operand.
3637   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3638   assert(VL.getValueType() == XLenVT);
3639   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3640   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3641 }
3642 
3643 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3644                                                      SelectionDAG &DAG) const {
3645   unsigned IntNo = Op.getConstantOperandVal(0);
3646   SDLoc DL(Op);
3647   MVT XLenVT = Subtarget.getXLenVT();
3648 
3649   switch (IntNo) {
3650   default:
3651     break; // Don't custom lower most intrinsics.
3652   case Intrinsic::thread_pointer: {
3653     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3654     return DAG.getRegister(RISCV::X4, PtrVT);
3655   }
3656   case Intrinsic::riscv_orc_b:
3657     // Lower to the GORCI encoding for orc.b.
3658     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3659                        DAG.getConstant(7, DL, XLenVT));
3660   case Intrinsic::riscv_grev:
3661   case Intrinsic::riscv_gorc: {
3662     unsigned Opc =
3663         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3664     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3665   }
3666   case Intrinsic::riscv_shfl:
3667   case Intrinsic::riscv_unshfl: {
3668     unsigned Opc =
3669         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3670     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3671   }
3672   case Intrinsic::riscv_bcompress:
3673   case Intrinsic::riscv_bdecompress: {
3674     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3675                                                        : RISCVISD::BDECOMPRESS;
3676     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3677   }
3678   case Intrinsic::riscv_vmv_x_s:
3679     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3680     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3681                        Op.getOperand(1));
3682   case Intrinsic::riscv_vmv_v_x:
3683     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3684                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3685   case Intrinsic::riscv_vfmv_v_f:
3686     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3687                        Op.getOperand(1), Op.getOperand(2));
3688   case Intrinsic::riscv_vmv_s_x: {
3689     SDValue Scalar = Op.getOperand(2);
3690 
3691     if (Scalar.getValueType().bitsLE(XLenVT)) {
3692       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3693       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3694                          Op.getOperand(1), Scalar, Op.getOperand(3));
3695     }
3696 
3697     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3698 
3699     // This is an i64 value that lives in two scalar registers. We have to
3700     // insert this in a convoluted way. First we build vXi64 splat containing
3701     // the/ two values that we assemble using some bit math. Next we'll use
3702     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3703     // to merge element 0 from our splat into the source vector.
3704     // FIXME: This is probably not the best way to do this, but it is
3705     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3706     // point.
3707     //   sw lo, (a0)
3708     //   sw hi, 4(a0)
3709     //   vlse vX, (a0)
3710     //
3711     //   vid.v      vVid
3712     //   vmseq.vx   mMask, vVid, 0
3713     //   vmerge.vvm vDest, vSrc, vVal, mMask
3714     MVT VT = Op.getSimpleValueType();
3715     SDValue Vec = Op.getOperand(1);
3716     SDValue VL = Op.getOperand(3);
3717 
3718     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3719     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3720                                       DAG.getConstant(0, DL, MVT::i32), VL);
3721 
3722     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3723     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3724     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3725     SDValue SelectCond =
3726         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3727                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3728     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3729                        Vec, VL);
3730   }
3731   case Intrinsic::riscv_vslide1up:
3732   case Intrinsic::riscv_vslide1down:
3733   case Intrinsic::riscv_vslide1up_mask:
3734   case Intrinsic::riscv_vslide1down_mask: {
3735     // We need to special case these when the scalar is larger than XLen.
3736     unsigned NumOps = Op.getNumOperands();
3737     bool IsMasked = NumOps == 6;
3738     unsigned OpOffset = IsMasked ? 1 : 0;
3739     SDValue Scalar = Op.getOperand(2 + OpOffset);
3740     if (Scalar.getValueType().bitsLE(XLenVT))
3741       break;
3742 
3743     // Splatting a sign extended constant is fine.
3744     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3745       if (isInt<32>(CVal->getSExtValue()))
3746         break;
3747 
3748     MVT VT = Op.getSimpleValueType();
3749     assert(VT.getVectorElementType() == MVT::i64 &&
3750            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3751 
3752     // Convert the vector source to the equivalent nxvXi32 vector.
3753     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3754     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3755 
3756     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3757                                    DAG.getConstant(0, DL, XLenVT));
3758     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3759                                    DAG.getConstant(1, DL, XLenVT));
3760 
3761     // Double the VL since we halved SEW.
3762     SDValue VL = Op.getOperand(NumOps - 1);
3763     SDValue I32VL =
3764         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3765 
3766     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3767     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3768 
3769     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3770     // instructions.
3771     if (IntNo == Intrinsic::riscv_vslide1up ||
3772         IntNo == Intrinsic::riscv_vslide1up_mask) {
3773       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3774                         I32Mask, I32VL);
3775       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3776                         I32Mask, I32VL);
3777     } else {
3778       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3779                         I32Mask, I32VL);
3780       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3781                         I32Mask, I32VL);
3782     }
3783 
3784     // Convert back to nxvXi64.
3785     Vec = DAG.getBitcast(VT, Vec);
3786 
3787     if (!IsMasked)
3788       return Vec;
3789 
3790     // Apply mask after the operation.
3791     SDValue Mask = Op.getOperand(NumOps - 2);
3792     SDValue MaskedOff = Op.getOperand(1);
3793     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3794   }
3795   }
3796 
3797   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3798 }
3799 
3800 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3801                                                     SelectionDAG &DAG) const {
3802   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3803 }
3804 
3805 static MVT getLMUL1VT(MVT VT) {
3806   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3807          "Unexpected vector MVT");
3808   return MVT::getScalableVectorVT(
3809       VT.getVectorElementType(),
3810       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3811 }
3812 
3813 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3814   switch (ISDOpcode) {
3815   default:
3816     llvm_unreachable("Unhandled reduction");
3817   case ISD::VECREDUCE_ADD:
3818     return RISCVISD::VECREDUCE_ADD_VL;
3819   case ISD::VECREDUCE_UMAX:
3820     return RISCVISD::VECREDUCE_UMAX_VL;
3821   case ISD::VECREDUCE_SMAX:
3822     return RISCVISD::VECREDUCE_SMAX_VL;
3823   case ISD::VECREDUCE_UMIN:
3824     return RISCVISD::VECREDUCE_UMIN_VL;
3825   case ISD::VECREDUCE_SMIN:
3826     return RISCVISD::VECREDUCE_SMIN_VL;
3827   case ISD::VECREDUCE_AND:
3828     return RISCVISD::VECREDUCE_AND_VL;
3829   case ISD::VECREDUCE_OR:
3830     return RISCVISD::VECREDUCE_OR_VL;
3831   case ISD::VECREDUCE_XOR:
3832     return RISCVISD::VECREDUCE_XOR_VL;
3833   }
3834 }
3835 
3836 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3837                                                       SelectionDAG &DAG) const {
3838   SDLoc DL(Op);
3839   SDValue Vec = Op.getOperand(0);
3840   MVT VecVT = Vec.getSimpleValueType();
3841   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3842           Op.getOpcode() == ISD::VECREDUCE_OR ||
3843           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3844          "Unexpected reduction lowering");
3845 
3846   MVT XLenVT = Subtarget.getXLenVT();
3847   assert(Op.getValueType() == XLenVT &&
3848          "Expected reduction output to be legalized to XLenVT");
3849 
3850   MVT ContainerVT = VecVT;
3851   if (VecVT.isFixedLengthVector()) {
3852     ContainerVT = getContainerForFixedLengthVector(VecVT);
3853     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3854   }
3855 
3856   SDValue Mask, VL;
3857   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3858   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3859 
3860   switch (Op.getOpcode()) {
3861   default:
3862     llvm_unreachable("Unhandled reduction");
3863   case ISD::VECREDUCE_AND:
3864     // vpopc ~x == 0
3865     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3866     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3867     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3868   case ISD::VECREDUCE_OR:
3869     // vpopc x != 0
3870     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3871     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3872   case ISD::VECREDUCE_XOR: {
3873     // ((vpopc x) & 1) != 0
3874     SDValue One = DAG.getConstant(1, DL, XLenVT);
3875     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3876     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3877     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3878   }
3879   }
3880 }
3881 
3882 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3883                                             SelectionDAG &DAG) const {
3884   SDLoc DL(Op);
3885   SDValue Vec = Op.getOperand(0);
3886   EVT VecEVT = Vec.getValueType();
3887 
3888   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3889 
3890   // Due to ordering in legalize types we may have a vector type that needs to
3891   // be split. Do that manually so we can get down to a legal type.
3892   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3893          TargetLowering::TypeSplitVector) {
3894     SDValue Lo, Hi;
3895     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3896     VecEVT = Lo.getValueType();
3897     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3898   }
3899 
3900   // TODO: The type may need to be widened rather than split. Or widened before
3901   // it can be split.
3902   if (!isTypeLegal(VecEVT))
3903     return SDValue();
3904 
3905   MVT VecVT = VecEVT.getSimpleVT();
3906   MVT VecEltVT = VecVT.getVectorElementType();
3907   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3908 
3909   MVT ContainerVT = VecVT;
3910   if (VecVT.isFixedLengthVector()) {
3911     ContainerVT = getContainerForFixedLengthVector(VecVT);
3912     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3913   }
3914 
3915   MVT M1VT = getLMUL1VT(ContainerVT);
3916 
3917   SDValue Mask, VL;
3918   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3919 
3920   // FIXME: This is a VLMAX splat which might be too large and can prevent
3921   // vsetvli removal.
3922   SDValue NeutralElem =
3923       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3924   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3925   SDValue Reduction =
3926       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3927   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3928                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3929   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3930 }
3931 
3932 // Given a reduction op, this function returns the matching reduction opcode,
3933 // the vector SDValue and the scalar SDValue required to lower this to a
3934 // RISCVISD node.
3935 static std::tuple<unsigned, SDValue, SDValue>
3936 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3937   SDLoc DL(Op);
3938   auto Flags = Op->getFlags();
3939   unsigned Opcode = Op.getOpcode();
3940   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3941   switch (Opcode) {
3942   default:
3943     llvm_unreachable("Unhandled reduction");
3944   case ISD::VECREDUCE_FADD:
3945     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3946                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3947   case ISD::VECREDUCE_SEQ_FADD:
3948     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3949                            Op.getOperand(0));
3950   case ISD::VECREDUCE_FMIN:
3951     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3952                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3953   case ISD::VECREDUCE_FMAX:
3954     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3955                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3956   }
3957 }
3958 
3959 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3960                                               SelectionDAG &DAG) const {
3961   SDLoc DL(Op);
3962   MVT VecEltVT = Op.getSimpleValueType();
3963 
3964   unsigned RVVOpcode;
3965   SDValue VectorVal, ScalarVal;
3966   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3967       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3968   MVT VecVT = VectorVal.getSimpleValueType();
3969 
3970   MVT ContainerVT = VecVT;
3971   if (VecVT.isFixedLengthVector()) {
3972     ContainerVT = getContainerForFixedLengthVector(VecVT);
3973     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3974   }
3975 
3976   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3977 
3978   SDValue Mask, VL;
3979   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3980 
3981   // FIXME: This is a VLMAX splat which might be too large and can prevent
3982   // vsetvli removal.
3983   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3984   SDValue Reduction =
3985       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3986   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3987                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3988 }
3989 
3990 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3991                                                    SelectionDAG &DAG) const {
3992   SDValue Vec = Op.getOperand(0);
3993   SDValue SubVec = Op.getOperand(1);
3994   MVT VecVT = Vec.getSimpleValueType();
3995   MVT SubVecVT = SubVec.getSimpleValueType();
3996 
3997   SDLoc DL(Op);
3998   MVT XLenVT = Subtarget.getXLenVT();
3999   unsigned OrigIdx = Op.getConstantOperandVal(2);
4000   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4001 
4002   // We don't have the ability to slide mask vectors up indexed by their i1
4003   // elements; the smallest we can do is i8. Often we are able to bitcast to
4004   // equivalent i8 vectors. Note that when inserting a fixed-length vector
4005   // into a scalable one, we might not necessarily have enough scalable
4006   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4007   if (SubVecVT.getVectorElementType() == MVT::i1 &&
4008       (OrigIdx != 0 || !Vec.isUndef())) {
4009     if (VecVT.getVectorMinNumElements() >= 8 &&
4010         SubVecVT.getVectorMinNumElements() >= 8) {
4011       assert(OrigIdx % 8 == 0 && "Invalid index");
4012       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4013              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4014              "Unexpected mask vector lowering");
4015       OrigIdx /= 8;
4016       SubVecVT =
4017           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4018                            SubVecVT.isScalableVector());
4019       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4020                                VecVT.isScalableVector());
4021       Vec = DAG.getBitcast(VecVT, Vec);
4022       SubVec = DAG.getBitcast(SubVecVT, SubVec);
4023     } else {
4024       // We can't slide this mask vector up indexed by its i1 elements.
4025       // This poses a problem when we wish to insert a scalable vector which
4026       // can't be re-expressed as a larger type. Just choose the slow path and
4027       // extend to a larger type, then truncate back down.
4028       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4029       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4030       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4031       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4032       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4033                         Op.getOperand(2));
4034       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4035       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4036     }
4037   }
4038 
4039   // If the subvector vector is a fixed-length type, we cannot use subregister
4040   // manipulation to simplify the codegen; we don't know which register of a
4041   // LMUL group contains the specific subvector as we only know the minimum
4042   // register size. Therefore we must slide the vector group up the full
4043   // amount.
4044   if (SubVecVT.isFixedLengthVector()) {
4045     if (OrigIdx == 0 && Vec.isUndef())
4046       return Op;
4047     MVT ContainerVT = VecVT;
4048     if (VecVT.isFixedLengthVector()) {
4049       ContainerVT = getContainerForFixedLengthVector(VecVT);
4050       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4051     }
4052     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4053                          DAG.getUNDEF(ContainerVT), SubVec,
4054                          DAG.getConstant(0, DL, XLenVT));
4055     SDValue Mask =
4056         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4057     // Set the vector length to only the number of elements we care about. Note
4058     // that for slideup this includes the offset.
4059     SDValue VL =
4060         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4061     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4062     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4063                                   SubVec, SlideupAmt, Mask, VL);
4064     if (VecVT.isFixedLengthVector())
4065       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4066     return DAG.getBitcast(Op.getValueType(), Slideup);
4067   }
4068 
4069   unsigned SubRegIdx, RemIdx;
4070   std::tie(SubRegIdx, RemIdx) =
4071       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4072           VecVT, SubVecVT, OrigIdx, TRI);
4073 
4074   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4075   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4076                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4077                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4078 
4079   // 1. If the Idx has been completely eliminated and this subvector's size is
4080   // a vector register or a multiple thereof, or the surrounding elements are
4081   // undef, then this is a subvector insert which naturally aligns to a vector
4082   // register. These can easily be handled using subregister manipulation.
4083   // 2. If the subvector is smaller than a vector register, then the insertion
4084   // must preserve the undisturbed elements of the register. We do this by
4085   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4086   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4087   // subvector within the vector register, and an INSERT_SUBVECTOR of that
4088   // LMUL=1 type back into the larger vector (resolving to another subregister
4089   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4090   // to avoid allocating a large register group to hold our subvector.
4091   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4092     return Op;
4093 
4094   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4095   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4096   // (in our case undisturbed). This means we can set up a subvector insertion
4097   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4098   // size of the subvector.
4099   MVT InterSubVT = VecVT;
4100   SDValue AlignedExtract = Vec;
4101   unsigned AlignedIdx = OrigIdx - RemIdx;
4102   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4103     InterSubVT = getLMUL1VT(VecVT);
4104     // Extract a subvector equal to the nearest full vector register type. This
4105     // should resolve to a EXTRACT_SUBREG instruction.
4106     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4107                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4108   }
4109 
4110   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4111   // For scalable vectors this must be further multiplied by vscale.
4112   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4113 
4114   SDValue Mask, VL;
4115   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4116 
4117   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4118   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4119   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4120   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4121 
4122   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4123                        DAG.getUNDEF(InterSubVT), SubVec,
4124                        DAG.getConstant(0, DL, XLenVT));
4125 
4126   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4127                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4128 
4129   // If required, insert this subvector back into the correct vector register.
4130   // This should resolve to an INSERT_SUBREG instruction.
4131   if (VecVT.bitsGT(InterSubVT))
4132     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4133                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4134 
4135   // We might have bitcast from a mask type: cast back to the original type if
4136   // required.
4137   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4138 }
4139 
4140 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4141                                                     SelectionDAG &DAG) const {
4142   SDValue Vec = Op.getOperand(0);
4143   MVT SubVecVT = Op.getSimpleValueType();
4144   MVT VecVT = Vec.getSimpleValueType();
4145 
4146   SDLoc DL(Op);
4147   MVT XLenVT = Subtarget.getXLenVT();
4148   unsigned OrigIdx = Op.getConstantOperandVal(1);
4149   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4150 
4151   // We don't have the ability to slide mask vectors down indexed by their i1
4152   // elements; the smallest we can do is i8. Often we are able to bitcast to
4153   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4154   // from a scalable one, we might not necessarily have enough scalable
4155   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4156   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4157     if (VecVT.getVectorMinNumElements() >= 8 &&
4158         SubVecVT.getVectorMinNumElements() >= 8) {
4159       assert(OrigIdx % 8 == 0 && "Invalid index");
4160       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4161              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4162              "Unexpected mask vector lowering");
4163       OrigIdx /= 8;
4164       SubVecVT =
4165           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4166                            SubVecVT.isScalableVector());
4167       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4168                                VecVT.isScalableVector());
4169       Vec = DAG.getBitcast(VecVT, Vec);
4170     } else {
4171       // We can't slide this mask vector down, indexed by its i1 elements.
4172       // This poses a problem when we wish to extract a scalable vector which
4173       // can't be re-expressed as a larger type. Just choose the slow path and
4174       // extend to a larger type, then truncate back down.
4175       // TODO: We could probably improve this when extracting certain fixed
4176       // from fixed, where we can extract as i8 and shift the correct element
4177       // right to reach the desired subvector?
4178       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4179       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4180       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4181       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4182                         Op.getOperand(1));
4183       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4184       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4185     }
4186   }
4187 
4188   // If the subvector vector is a fixed-length type, we cannot use subregister
4189   // manipulation to simplify the codegen; we don't know which register of a
4190   // LMUL group contains the specific subvector as we only know the minimum
4191   // register size. Therefore we must slide the vector group down the full
4192   // amount.
4193   if (SubVecVT.isFixedLengthVector()) {
4194     // With an index of 0 this is a cast-like subvector, which can be performed
4195     // with subregister operations.
4196     if (OrigIdx == 0)
4197       return Op;
4198     MVT ContainerVT = VecVT;
4199     if (VecVT.isFixedLengthVector()) {
4200       ContainerVT = getContainerForFixedLengthVector(VecVT);
4201       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4202     }
4203     SDValue Mask =
4204         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4205     // Set the vector length to only the number of elements we care about. This
4206     // avoids sliding down elements we're going to discard straight away.
4207     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4208     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4209     SDValue Slidedown =
4210         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4211                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4212     // Now we can use a cast-like subvector extract to get the result.
4213     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4214                             DAG.getConstant(0, DL, XLenVT));
4215     return DAG.getBitcast(Op.getValueType(), Slidedown);
4216   }
4217 
4218   unsigned SubRegIdx, RemIdx;
4219   std::tie(SubRegIdx, RemIdx) =
4220       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4221           VecVT, SubVecVT, OrigIdx, TRI);
4222 
4223   // If the Idx has been completely eliminated then this is a subvector extract
4224   // which naturally aligns to a vector register. These can easily be handled
4225   // using subregister manipulation.
4226   if (RemIdx == 0)
4227     return Op;
4228 
4229   // Else we must shift our vector register directly to extract the subvector.
4230   // Do this using VSLIDEDOWN.
4231 
4232   // If the vector type is an LMUL-group type, extract a subvector equal to the
4233   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4234   // instruction.
4235   MVT InterSubVT = VecVT;
4236   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4237     InterSubVT = getLMUL1VT(VecVT);
4238     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4239                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4240   }
4241 
4242   // Slide this vector register down by the desired number of elements in order
4243   // to place the desired subvector starting at element 0.
4244   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4245   // For scalable vectors this must be further multiplied by vscale.
4246   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4247 
4248   SDValue Mask, VL;
4249   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4250   SDValue Slidedown =
4251       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4252                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4253 
4254   // Now the vector is in the right position, extract our final subvector. This
4255   // should resolve to a COPY.
4256   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4257                           DAG.getConstant(0, DL, XLenVT));
4258 
4259   // We might have bitcast from a mask type: cast back to the original type if
4260   // required.
4261   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4262 }
4263 
4264 // Lower step_vector to the vid instruction. Any non-identity step value must
4265 // be accounted for my manual expansion.
4266 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4267                                               SelectionDAG &DAG) const {
4268   SDLoc DL(Op);
4269   MVT VT = Op.getSimpleValueType();
4270   MVT XLenVT = Subtarget.getXLenVT();
4271   SDValue Mask, VL;
4272   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4273   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4274   uint64_t StepValImm = Op.getConstantOperandVal(0);
4275   if (StepValImm != 1) {
4276     if (isPowerOf2_64(StepValImm)) {
4277       SDValue StepVal =
4278           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4279                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4280       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4281     } else {
4282       SDValue StepVal = lowerScalarSplat(
4283           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4284           DL, DAG, Subtarget);
4285       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4286     }
4287   }
4288   return StepVec;
4289 }
4290 
4291 // Implement vector_reverse using vrgather.vv with indices determined by
4292 // subtracting the id of each element from (VLMAX-1). This will convert
4293 // the indices like so:
4294 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4295 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4296 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4297                                                  SelectionDAG &DAG) const {
4298   SDLoc DL(Op);
4299   MVT VecVT = Op.getSimpleValueType();
4300   unsigned EltSize = VecVT.getScalarSizeInBits();
4301   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4302 
4303   unsigned MaxVLMAX = 0;
4304   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4305   if (VectorBitsMax != 0)
4306     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4307 
4308   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4309   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4310 
4311   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4312   // to use vrgatherei16.vv.
4313   // TODO: It's also possible to use vrgatherei16.vv for other types to
4314   // decrease register width for the index calculation.
4315   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4316     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4317     // Reverse each half, then reassemble them in reverse order.
4318     // NOTE: It's also possible that after splitting that VLMAX no longer
4319     // requires vrgatherei16.vv.
4320     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4321       SDValue Lo, Hi;
4322       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4323       EVT LoVT, HiVT;
4324       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4325       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4326       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4327       // Reassemble the low and high pieces reversed.
4328       // FIXME: This is a CONCAT_VECTORS.
4329       SDValue Res =
4330           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4331                       DAG.getIntPtrConstant(0, DL));
4332       return DAG.getNode(
4333           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4334           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4335     }
4336 
4337     // Just promote the int type to i16 which will double the LMUL.
4338     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4339     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4340   }
4341 
4342   MVT XLenVT = Subtarget.getXLenVT();
4343   SDValue Mask, VL;
4344   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4345 
4346   // Calculate VLMAX-1 for the desired SEW.
4347   unsigned MinElts = VecVT.getVectorMinNumElements();
4348   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4349                               DAG.getConstant(MinElts, DL, XLenVT));
4350   SDValue VLMinus1 =
4351       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4352 
4353   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4354   bool IsRV32E64 =
4355       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4356   SDValue SplatVL;
4357   if (!IsRV32E64)
4358     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4359   else
4360     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4361 
4362   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4363   SDValue Indices =
4364       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4365 
4366   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4367 }
4368 
4369 SDValue
4370 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4371                                                      SelectionDAG &DAG) const {
4372   SDLoc DL(Op);
4373   auto *Load = cast<LoadSDNode>(Op);
4374 
4375   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4376                                         Load->getMemoryVT(),
4377                                         *Load->getMemOperand()) &&
4378          "Expecting a correctly-aligned load");
4379 
4380   MVT VT = Op.getSimpleValueType();
4381   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4382 
4383   SDValue VL =
4384       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4385 
4386   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4387   SDValue NewLoad = DAG.getMemIntrinsicNode(
4388       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4389       Load->getMemoryVT(), Load->getMemOperand());
4390 
4391   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4392   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4393 }
4394 
4395 SDValue
4396 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4397                                                       SelectionDAG &DAG) const {
4398   SDLoc DL(Op);
4399   auto *Store = cast<StoreSDNode>(Op);
4400 
4401   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4402                                         Store->getMemoryVT(),
4403                                         *Store->getMemOperand()) &&
4404          "Expecting a correctly-aligned store");
4405 
4406   SDValue StoreVal = Store->getValue();
4407   MVT VT = StoreVal.getSimpleValueType();
4408 
4409   // If the size less than a byte, we need to pad with zeros to make a byte.
4410   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4411     VT = MVT::v8i1;
4412     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4413                            DAG.getConstant(0, DL, VT), StoreVal,
4414                            DAG.getIntPtrConstant(0, DL));
4415   }
4416 
4417   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4418 
4419   SDValue VL =
4420       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4421 
4422   SDValue NewValue =
4423       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4424   return DAG.getMemIntrinsicNode(
4425       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4426       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4427       Store->getMemoryVT(), Store->getMemOperand());
4428 }
4429 
4430 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4431   auto *Load = cast<MaskedLoadSDNode>(Op);
4432 
4433   SDLoc DL(Op);
4434   MVT VT = Op.getSimpleValueType();
4435   MVT XLenVT = Subtarget.getXLenVT();
4436 
4437   SDValue Mask = Load->getMask();
4438   SDValue PassThru = Load->getPassThru();
4439   SDValue VL;
4440 
4441   MVT ContainerVT = VT;
4442   if (VT.isFixedLengthVector()) {
4443     ContainerVT = getContainerForFixedLengthVector(VT);
4444     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4445 
4446     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4447     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4448     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4449   } else
4450     VL = DAG.getRegister(RISCV::X0, XLenVT);
4451 
4452   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4453   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4454   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4455                    Load->getBasePtr(), Mask,  VL};
4456   SDValue Result =
4457       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4458                               Load->getMemoryVT(), Load->getMemOperand());
4459   SDValue Chain = Result.getValue(1);
4460 
4461   if (VT.isFixedLengthVector())
4462     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4463 
4464   return DAG.getMergeValues({Result, Chain}, DL);
4465 }
4466 
4467 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4468   auto *Store = cast<MaskedStoreSDNode>(Op);
4469 
4470   SDLoc DL(Op);
4471   SDValue Val = Store->getValue();
4472   SDValue Mask = Store->getMask();
4473   MVT VT = Val.getSimpleValueType();
4474   MVT XLenVT = Subtarget.getXLenVT();
4475   SDValue VL;
4476 
4477   MVT ContainerVT = VT;
4478   if (VT.isFixedLengthVector()) {
4479     ContainerVT = getContainerForFixedLengthVector(VT);
4480     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4481 
4482     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4483     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4484     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4485   } else
4486     VL = DAG.getRegister(RISCV::X0, XLenVT);
4487 
4488   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4489   return DAG.getMemIntrinsicNode(
4490       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4491       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4492       Store->getMemoryVT(), Store->getMemOperand());
4493 }
4494 
4495 SDValue
4496 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4497                                                       SelectionDAG &DAG) const {
4498   MVT InVT = Op.getOperand(0).getSimpleValueType();
4499   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4500 
4501   MVT VT = Op.getSimpleValueType();
4502 
4503   SDValue Op1 =
4504       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4505   SDValue Op2 =
4506       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4507 
4508   SDLoc DL(Op);
4509   SDValue VL =
4510       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4511 
4512   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4513   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4514 
4515   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4516                             Op.getOperand(2), Mask, VL);
4517 
4518   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4519 }
4520 
4521 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4522     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4523   MVT VT = Op.getSimpleValueType();
4524 
4525   if (VT.getVectorElementType() == MVT::i1)
4526     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4527 
4528   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4529 }
4530 
4531 SDValue
4532 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
4533                                                       SelectionDAG &DAG) const {
4534   unsigned Opc;
4535   switch (Op.getOpcode()) {
4536   default: llvm_unreachable("Unexpected opcode!");
4537   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
4538   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
4539   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
4540   }
4541 
4542   return lowerToScalableOp(Op, DAG, Opc);
4543 }
4544 
4545 // Lower vector ABS to smax(X, sub(0, X)).
4546 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4547   SDLoc DL(Op);
4548   MVT VT = Op.getSimpleValueType();
4549   SDValue X = Op.getOperand(0);
4550 
4551   assert(VT.isFixedLengthVector() && "Unexpected type");
4552 
4553   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4554   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4555 
4556   SDValue Mask, VL;
4557   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4558 
4559   SDValue SplatZero =
4560       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4561                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4562   SDValue NegX =
4563       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4564   SDValue Max =
4565       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4566 
4567   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4568 }
4569 
4570 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4571     SDValue Op, SelectionDAG &DAG) const {
4572   SDLoc DL(Op);
4573   MVT VT = Op.getSimpleValueType();
4574   SDValue Mag = Op.getOperand(0);
4575   SDValue Sign = Op.getOperand(1);
4576   assert(Mag.getValueType() == Sign.getValueType() &&
4577          "Can only handle COPYSIGN with matching types.");
4578 
4579   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4580   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4581   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4582 
4583   SDValue Mask, VL;
4584   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4585 
4586   SDValue CopySign =
4587       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4588 
4589   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4590 }
4591 
4592 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4593     SDValue Op, SelectionDAG &DAG) const {
4594   MVT VT = Op.getSimpleValueType();
4595   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4596 
4597   MVT I1ContainerVT =
4598       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4599 
4600   SDValue CC =
4601       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4602   SDValue Op1 =
4603       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4604   SDValue Op2 =
4605       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4606 
4607   SDLoc DL(Op);
4608   SDValue Mask, VL;
4609   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4610 
4611   SDValue Select =
4612       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4613 
4614   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4615 }
4616 
4617 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4618                                                unsigned NewOpc,
4619                                                bool HasMask) const {
4620   MVT VT = Op.getSimpleValueType();
4621   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4622 
4623   // Create list of operands by converting existing ones to scalable types.
4624   SmallVector<SDValue, 6> Ops;
4625   for (const SDValue &V : Op->op_values()) {
4626     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4627 
4628     // Pass through non-vector operands.
4629     if (!V.getValueType().isVector()) {
4630       Ops.push_back(V);
4631       continue;
4632     }
4633 
4634     // "cast" fixed length vector to a scalable vector.
4635     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4636            "Only fixed length vectors are supported!");
4637     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4638   }
4639 
4640   SDLoc DL(Op);
4641   SDValue Mask, VL;
4642   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4643   if (HasMask)
4644     Ops.push_back(Mask);
4645   Ops.push_back(VL);
4646 
4647   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4648   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4649 }
4650 
4651 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4652 // * Operands of each node are assumed to be in the same order.
4653 // * The EVL operand is promoted from i32 to i64 on RV64.
4654 // * Fixed-length vectors are converted to their scalable-vector container
4655 //   types.
4656 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4657                                        unsigned RISCVISDOpc) const {
4658   SDLoc DL(Op);
4659   MVT VT = Op.getSimpleValueType();
4660   SmallVector<SDValue, 4> Ops;
4661 
4662   for (const auto &OpIdx : enumerate(Op->ops())) {
4663     SDValue V = OpIdx.value();
4664     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4665     // Pass through operands which aren't fixed-length vectors.
4666     if (!V.getValueType().isFixedLengthVector()) {
4667       Ops.push_back(V);
4668       continue;
4669     }
4670     // "cast" fixed length vector to a scalable vector.
4671     MVT OpVT = V.getSimpleValueType();
4672     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4673     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4674            "Only fixed length vectors are supported!");
4675     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4676   }
4677 
4678   if (!VT.isFixedLengthVector())
4679     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4680 
4681   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4682 
4683   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4684 
4685   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4686 }
4687 
4688 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4689 // a RVV indexed load. The RVV indexed load instructions only support the
4690 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4691 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4692 // indexing is extended to the XLEN value type and scaled accordingly.
4693 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4694   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4695   SDLoc DL(Op);
4696 
4697   SDValue Index = MGN->getIndex();
4698   SDValue Mask = MGN->getMask();
4699   SDValue PassThru = MGN->getPassThru();
4700 
4701   MVT VT = Op.getSimpleValueType();
4702   MVT IndexVT = Index.getSimpleValueType();
4703   MVT XLenVT = Subtarget.getXLenVT();
4704 
4705   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4706          "Unexpected VTs!");
4707   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4708          "Unexpected pointer type");
4709   // Targets have to explicitly opt-in for extending vector loads.
4710   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4711          "Unexpected extending MGATHER");
4712 
4713   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4714   // the selection of the masked intrinsics doesn't do this for us.
4715   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4716 
4717   SDValue VL;
4718   MVT ContainerVT = VT;
4719   if (VT.isFixedLengthVector()) {
4720     // We need to use the larger of the result and index type to determine the
4721     // scalable type to use so we don't increase LMUL for any operand/result.
4722     if (VT.bitsGE(IndexVT)) {
4723       ContainerVT = getContainerForFixedLengthVector(VT);
4724       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4725                                  ContainerVT.getVectorElementCount());
4726     } else {
4727       IndexVT = getContainerForFixedLengthVector(IndexVT);
4728       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4729                                      IndexVT.getVectorElementCount());
4730     }
4731 
4732     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4733 
4734     if (!IsUnmasked) {
4735       MVT MaskVT =
4736           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4737       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4738       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4739     }
4740 
4741     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4742   } else
4743     VL = DAG.getRegister(RISCV::X0, XLenVT);
4744 
4745   unsigned IntID =
4746       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
4747   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4748                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4749   if (!IsUnmasked)
4750     Ops.push_back(PassThru);
4751   Ops.push_back(MGN->getBasePtr());
4752   Ops.push_back(Index);
4753   if (!IsUnmasked)
4754     Ops.push_back(Mask);
4755   Ops.push_back(VL);
4756 
4757   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4758   SDValue Result =
4759       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4760                               MGN->getMemoryVT(), MGN->getMemOperand());
4761   SDValue Chain = Result.getValue(1);
4762 
4763   if (VT.isFixedLengthVector())
4764     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4765 
4766   return DAG.getMergeValues({Result, Chain}, DL);
4767 }
4768 
4769 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4770 // a RVV indexed store. The RVV indexed store instructions only support the
4771 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4772 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4773 // indexing is extended to the XLEN value type and scaled accordingly.
4774 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4775                                            SelectionDAG &DAG) const {
4776   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4777   SDLoc DL(Op);
4778   SDValue Index = MSN->getIndex();
4779   SDValue Mask = MSN->getMask();
4780   SDValue Val = MSN->getValue();
4781 
4782   MVT VT = Val.getSimpleValueType();
4783   MVT IndexVT = Index.getSimpleValueType();
4784   MVT XLenVT = Subtarget.getXLenVT();
4785 
4786   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4787          "Unexpected VTs!");
4788   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4789          "Unexpected pointer type");
4790   // Targets have to explicitly opt-in for extending vector loads and
4791   // truncating vector stores.
4792   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4793 
4794   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4795   // the selection of the masked intrinsics doesn't do this for us.
4796   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4797 
4798   SDValue VL;
4799   if (VT.isFixedLengthVector()) {
4800     // We need to use the larger of the value and index type to determine the
4801     // scalable type to use so we don't increase LMUL for any operand/result.
4802     MVT ContainerVT;
4803     if (VT.bitsGE(IndexVT)) {
4804       ContainerVT = getContainerForFixedLengthVector(VT);
4805       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4806                                  ContainerVT.getVectorElementCount());
4807     } else {
4808       IndexVT = getContainerForFixedLengthVector(IndexVT);
4809       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4810                                      IndexVT.getVectorElementCount());
4811     }
4812 
4813     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4814     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4815 
4816     if (!IsUnmasked) {
4817       MVT MaskVT =
4818           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4819       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4820     }
4821 
4822     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4823   } else
4824     VL = DAG.getRegister(RISCV::X0, XLenVT);
4825 
4826   unsigned IntID =
4827       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4828   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4829                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4830   Ops.push_back(Val);
4831   Ops.push_back(MSN->getBasePtr());
4832   Ops.push_back(Index);
4833   if (!IsUnmasked)
4834     Ops.push_back(Mask);
4835   Ops.push_back(VL);
4836 
4837   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4838                                  MSN->getMemoryVT(), MSN->getMemOperand());
4839 }
4840 
4841 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4842                                                SelectionDAG &DAG) const {
4843   const MVT XLenVT = Subtarget.getXLenVT();
4844   SDLoc DL(Op);
4845   SDValue Chain = Op->getOperand(0);
4846   SDValue SysRegNo = DAG.getConstant(
4847       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4848   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4849   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4850 
4851   // Encoding used for rounding mode in RISCV differs from that used in
4852   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4853   // table, which consists of a sequence of 4-bit fields, each representing
4854   // corresponding FLT_ROUNDS mode.
4855   static const int Table =
4856       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4857       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4858       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4859       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4860       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4861 
4862   SDValue Shift =
4863       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4864   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4865                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4866   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4867                                DAG.getConstant(7, DL, XLenVT));
4868 
4869   return DAG.getMergeValues({Masked, Chain}, DL);
4870 }
4871 
4872 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4873                                                SelectionDAG &DAG) const {
4874   const MVT XLenVT = Subtarget.getXLenVT();
4875   SDLoc DL(Op);
4876   SDValue Chain = Op->getOperand(0);
4877   SDValue RMValue = Op->getOperand(1);
4878   SDValue SysRegNo = DAG.getConstant(
4879       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4880 
4881   // Encoding used for rounding mode in RISCV differs from that used in
4882   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4883   // a table, which consists of a sequence of 4-bit fields, each representing
4884   // corresponding RISCV mode.
4885   static const unsigned Table =
4886       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4887       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4888       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4889       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4890       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4891 
4892   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4893                               DAG.getConstant(2, DL, XLenVT));
4894   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4895                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4896   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4897                         DAG.getConstant(0x7, DL, XLenVT));
4898   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4899                      RMValue);
4900 }
4901 
4902 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4903 // form of the given Opcode.
4904 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4905   switch (Opcode) {
4906   default:
4907     llvm_unreachable("Unexpected opcode");
4908   case ISD::SHL:
4909     return RISCVISD::SLLW;
4910   case ISD::SRA:
4911     return RISCVISD::SRAW;
4912   case ISD::SRL:
4913     return RISCVISD::SRLW;
4914   case ISD::SDIV:
4915     return RISCVISD::DIVW;
4916   case ISD::UDIV:
4917     return RISCVISD::DIVUW;
4918   case ISD::UREM:
4919     return RISCVISD::REMUW;
4920   case ISD::ROTL:
4921     return RISCVISD::ROLW;
4922   case ISD::ROTR:
4923     return RISCVISD::RORW;
4924   case RISCVISD::GREV:
4925     return RISCVISD::GREVW;
4926   case RISCVISD::GORC:
4927     return RISCVISD::GORCW;
4928   }
4929 }
4930 
4931 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
4932 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
4933 // otherwise be promoted to i64, making it difficult to select the
4934 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
4935 // type i8/i16/i32 is lost.
4936 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4937                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4938   SDLoc DL(N);
4939   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4940   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4941   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4942   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4943   // ReplaceNodeResults requires we maintain the same type for the return value.
4944   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4945 }
4946 
4947 // Converts the given 32-bit operation to a i64 operation with signed extension
4948 // semantic to reduce the signed extension instructions.
4949 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4950   SDLoc DL(N);
4951   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4952   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4953   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4954   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4955                                DAG.getValueType(MVT::i32));
4956   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4957 }
4958 
4959 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4960                                              SmallVectorImpl<SDValue> &Results,
4961                                              SelectionDAG &DAG) const {
4962   SDLoc DL(N);
4963   switch (N->getOpcode()) {
4964   default:
4965     llvm_unreachable("Don't know how to custom type legalize this operation!");
4966   case ISD::STRICT_FP_TO_SINT:
4967   case ISD::STRICT_FP_TO_UINT:
4968   case ISD::FP_TO_SINT:
4969   case ISD::FP_TO_UINT: {
4970     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4971            "Unexpected custom legalisation");
4972     bool IsStrict = N->isStrictFPOpcode();
4973     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
4974                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
4975     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4976     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4977         TargetLowering::TypeSoftenFloat) {
4978       // FIXME: Support strict FP.
4979       if (IsStrict)
4980         return;
4981       if (!isTypeLegal(Op0.getValueType()))
4982         return;
4983       unsigned Opc =
4984           IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
4985       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
4986       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4987       return;
4988     }
4989     // If the FP type needs to be softened, emit a library call using the 'si'
4990     // version. If we left it to default legalization we'd end up with 'di'. If
4991     // the FP type doesn't need to be softened just let generic type
4992     // legalization promote the result type.
4993     RTLIB::Libcall LC;
4994     if (IsSigned)
4995       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4996     else
4997       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4998     MakeLibCallOptions CallOptions;
4999     EVT OpVT = Op0.getValueType();
5000     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5001     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5002     SDValue Result;
5003     std::tie(Result, Chain) =
5004         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5005     Results.push_back(Result);
5006     if (IsStrict)
5007       Results.push_back(Chain);
5008     break;
5009   }
5010   case ISD::READCYCLECOUNTER: {
5011     assert(!Subtarget.is64Bit() &&
5012            "READCYCLECOUNTER only has custom type legalization on riscv32");
5013 
5014     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5015     SDValue RCW =
5016         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5017 
5018     Results.push_back(
5019         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
5020     Results.push_back(RCW.getValue(2));
5021     break;
5022   }
5023   case ISD::MUL: {
5024     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
5025     unsigned XLen = Subtarget.getXLen();
5026     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
5027     if (Size > XLen) {
5028       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
5029       SDValue LHS = N->getOperand(0);
5030       SDValue RHS = N->getOperand(1);
5031       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
5032 
5033       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
5034       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
5035       // We need exactly one side to be unsigned.
5036       if (LHSIsU == RHSIsU)
5037         return;
5038 
5039       auto MakeMULPair = [&](SDValue S, SDValue U) {
5040         MVT XLenVT = Subtarget.getXLenVT();
5041         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
5042         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
5043         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
5044         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
5045         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
5046       };
5047 
5048       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
5049       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
5050 
5051       // The other operand should be signed, but still prefer MULH when
5052       // possible.
5053       if (RHSIsU && LHSIsS && !RHSIsS)
5054         Results.push_back(MakeMULPair(LHS, RHS));
5055       else if (LHSIsU && RHSIsS && !LHSIsS)
5056         Results.push_back(MakeMULPair(RHS, LHS));
5057 
5058       return;
5059     }
5060     LLVM_FALLTHROUGH;
5061   }
5062   case ISD::ADD:
5063   case ISD::SUB:
5064     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5065            "Unexpected custom legalisation");
5066     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
5067     break;
5068   case ISD::SHL:
5069   case ISD::SRA:
5070   case ISD::SRL:
5071     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5072            "Unexpected custom legalisation");
5073     if (N->getOperand(1).getOpcode() != ISD::Constant) {
5074       Results.push_back(customLegalizeToWOp(N, DAG));
5075       break;
5076     }
5077 
5078     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
5079     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
5080     // shift amount.
5081     if (N->getOpcode() == ISD::SHL) {
5082       SDLoc DL(N);
5083       SDValue NewOp0 =
5084           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5085       SDValue NewOp1 =
5086           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
5087       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
5088       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5089                                    DAG.getValueType(MVT::i32));
5090       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5091     }
5092 
5093     break;
5094   case ISD::ROTL:
5095   case ISD::ROTR:
5096     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5097            "Unexpected custom legalisation");
5098     Results.push_back(customLegalizeToWOp(N, DAG));
5099     break;
5100   case ISD::CTTZ:
5101   case ISD::CTTZ_ZERO_UNDEF:
5102   case ISD::CTLZ:
5103   case ISD::CTLZ_ZERO_UNDEF: {
5104     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5105            "Unexpected custom legalisation");
5106 
5107     SDValue NewOp0 =
5108         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5109     bool IsCTZ =
5110         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
5111     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
5112     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
5113     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5114     return;
5115   }
5116   case ISD::SDIV:
5117   case ISD::UDIV:
5118   case ISD::UREM: {
5119     MVT VT = N->getSimpleValueType(0);
5120     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
5121            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
5122            "Unexpected custom legalisation");
5123     // Don't promote division/remainder by constant since we should expand those
5124     // to multiply by magic constant.
5125     // FIXME: What if the expansion is disabled for minsize.
5126     if (N->getOperand(1).getOpcode() == ISD::Constant)
5127       return;
5128 
5129     // If the input is i32, use ANY_EXTEND since the W instructions don't read
5130     // the upper 32 bits. For other types we need to sign or zero extend
5131     // based on the opcode.
5132     unsigned ExtOpc = ISD::ANY_EXTEND;
5133     if (VT != MVT::i32)
5134       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5135                                            : ISD::ZERO_EXTEND;
5136 
5137     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5138     break;
5139   }
5140   case ISD::UADDO:
5141   case ISD::USUBO: {
5142     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5143            "Unexpected custom legalisation");
5144     bool IsAdd = N->getOpcode() == ISD::UADDO;
5145     // Create an ADDW or SUBW.
5146     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5147     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5148     SDValue Res =
5149         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5150     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5151                       DAG.getValueType(MVT::i32));
5152 
5153     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5154     // Since the inputs are sign extended from i32, this is equivalent to
5155     // comparing the lower 32 bits.
5156     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5157     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5158                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5159 
5160     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5161     Results.push_back(Overflow);
5162     return;
5163   }
5164   case ISD::UADDSAT:
5165   case ISD::USUBSAT: {
5166     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5167            "Unexpected custom legalisation");
5168     if (Subtarget.hasStdExtZbb()) {
5169       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5170       // sign extend allows overflow of the lower 32 bits to be detected on
5171       // the promoted size.
5172       SDValue LHS =
5173           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5174       SDValue RHS =
5175           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5176       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5177       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5178       return;
5179     }
5180 
5181     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5182     // promotion for UADDO/USUBO.
5183     Results.push_back(expandAddSubSat(N, DAG));
5184     return;
5185   }
5186   case ISD::BITCAST: {
5187     EVT VT = N->getValueType(0);
5188     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5189     SDValue Op0 = N->getOperand(0);
5190     EVT Op0VT = Op0.getValueType();
5191     MVT XLenVT = Subtarget.getXLenVT();
5192     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5193       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5194       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5195     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5196                Subtarget.hasStdExtF()) {
5197       SDValue FPConv =
5198           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5199       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5200     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5201                isTypeLegal(Op0VT)) {
5202       // Custom-legalize bitcasts from fixed-length vector types to illegal
5203       // scalar types in order to improve codegen. Bitcast the vector to a
5204       // one-element vector type whose element type is the same as the result
5205       // type, and extract the first element.
5206       LLVMContext &Context = *DAG.getContext();
5207       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
5208       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5209                                     DAG.getConstant(0, DL, XLenVT)));
5210     }
5211     break;
5212   }
5213   case RISCVISD::GREV:
5214   case RISCVISD::GORC: {
5215     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5216            "Unexpected custom legalisation");
5217     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5218     // This is similar to customLegalizeToWOp, except that we pass the second
5219     // operand (a TargetConstant) straight through: it is already of type
5220     // XLenVT.
5221     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5222     SDValue NewOp0 =
5223         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5224     SDValue NewOp1 =
5225         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5226     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5227     // ReplaceNodeResults requires we maintain the same type for the return
5228     // value.
5229     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5230     break;
5231   }
5232   case RISCVISD::SHFL: {
5233     // There is no SHFLIW instruction, but we can just promote the operation.
5234     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5235            "Unexpected custom legalisation");
5236     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5237     SDValue NewOp0 =
5238         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5239     SDValue NewOp1 =
5240         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5241     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5242     // ReplaceNodeResults requires we maintain the same type for the return
5243     // value.
5244     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5245     break;
5246   }
5247   case ISD::BSWAP:
5248   case ISD::BITREVERSE: {
5249     MVT VT = N->getSimpleValueType(0);
5250     MVT XLenVT = Subtarget.getXLenVT();
5251     assert((VT == MVT::i8 || VT == MVT::i16 ||
5252             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5253            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5254     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5255     unsigned Imm = VT.getSizeInBits() - 1;
5256     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5257     if (N->getOpcode() == ISD::BSWAP)
5258       Imm &= ~0x7U;
5259     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5260     SDValue GREVI =
5261         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5262     // ReplaceNodeResults requires we maintain the same type for the return
5263     // value.
5264     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5265     break;
5266   }
5267   case ISD::FSHL:
5268   case ISD::FSHR: {
5269     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5270            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5271     SDValue NewOp0 =
5272         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5273     SDValue NewOp1 =
5274         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5275     SDValue NewOp2 =
5276         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5277     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5278     // Mask the shift amount to 5 bits.
5279     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5280                          DAG.getConstant(0x1f, DL, MVT::i64));
5281     unsigned Opc =
5282         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5283     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5284     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5285     break;
5286   }
5287   case ISD::EXTRACT_VECTOR_ELT: {
5288     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5289     // type is illegal (currently only vXi64 RV32).
5290     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5291     // transferred to the destination register. We issue two of these from the
5292     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5293     // first element.
5294     SDValue Vec = N->getOperand(0);
5295     SDValue Idx = N->getOperand(1);
5296 
5297     // The vector type hasn't been legalized yet so we can't issue target
5298     // specific nodes if it needs legalization.
5299     // FIXME: We would manually legalize if it's important.
5300     if (!isTypeLegal(Vec.getValueType()))
5301       return;
5302 
5303     MVT VecVT = Vec.getSimpleValueType();
5304 
5305     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5306            VecVT.getVectorElementType() == MVT::i64 &&
5307            "Unexpected EXTRACT_VECTOR_ELT legalization");
5308 
5309     // If this is a fixed vector, we need to convert it to a scalable vector.
5310     MVT ContainerVT = VecVT;
5311     if (VecVT.isFixedLengthVector()) {
5312       ContainerVT = getContainerForFixedLengthVector(VecVT);
5313       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5314     }
5315 
5316     MVT XLenVT = Subtarget.getXLenVT();
5317 
5318     // Use a VL of 1 to avoid processing more elements than we need.
5319     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5320     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5321     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5322 
5323     // Unless the index is known to be 0, we must slide the vector down to get
5324     // the desired element into index 0.
5325     if (!isNullConstant(Idx)) {
5326       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5327                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5328     }
5329 
5330     // Extract the lower XLEN bits of the correct vector element.
5331     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5332 
5333     // To extract the upper XLEN bits of the vector element, shift the first
5334     // element right by 32 bits and re-extract the lower XLEN bits.
5335     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5336                                      DAG.getConstant(32, DL, XLenVT), VL);
5337     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5338                                  ThirtyTwoV, Mask, VL);
5339 
5340     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5341 
5342     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5343     break;
5344   }
5345   case ISD::INTRINSIC_WO_CHAIN: {
5346     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5347     switch (IntNo) {
5348     default:
5349       llvm_unreachable(
5350           "Don't know how to custom type legalize this intrinsic!");
5351     case Intrinsic::riscv_orc_b: {
5352       // Lower to the GORCI encoding for orc.b with the operand extended.
5353       SDValue NewOp =
5354           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5355       // If Zbp is enabled, use GORCIW which will sign extend the result.
5356       unsigned Opc =
5357           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5358       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5359                                 DAG.getConstant(7, DL, MVT::i64));
5360       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5361       return;
5362     }
5363     case Intrinsic::riscv_grev:
5364     case Intrinsic::riscv_gorc: {
5365       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5366              "Unexpected custom legalisation");
5367       SDValue NewOp1 =
5368           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5369       SDValue NewOp2 =
5370           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5371       unsigned Opc =
5372           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5373       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5374       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5375       break;
5376     }
5377     case Intrinsic::riscv_shfl:
5378     case Intrinsic::riscv_unshfl: {
5379       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5380              "Unexpected custom legalisation");
5381       SDValue NewOp1 =
5382           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5383       SDValue NewOp2 =
5384           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5385       unsigned Opc =
5386           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5387       if (isa<ConstantSDNode>(N->getOperand(2))) {
5388         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5389                              DAG.getConstant(0xf, DL, MVT::i64));
5390         Opc =
5391             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5392       }
5393       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5394       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5395       break;
5396     }
5397     case Intrinsic::riscv_bcompress:
5398     case Intrinsic::riscv_bdecompress: {
5399       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5400              "Unexpected custom legalisation");
5401       SDValue NewOp1 =
5402           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5403       SDValue NewOp2 =
5404           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5405       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5406                          ? RISCVISD::BCOMPRESSW
5407                          : RISCVISD::BDECOMPRESSW;
5408       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5409       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5410       break;
5411     }
5412     case Intrinsic::riscv_vmv_x_s: {
5413       EVT VT = N->getValueType(0);
5414       MVT XLenVT = Subtarget.getXLenVT();
5415       if (VT.bitsLT(XLenVT)) {
5416         // Simple case just extract using vmv.x.s and truncate.
5417         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5418                                       Subtarget.getXLenVT(), N->getOperand(1));
5419         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5420         return;
5421       }
5422 
5423       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5424              "Unexpected custom legalization");
5425 
5426       // We need to do the move in two steps.
5427       SDValue Vec = N->getOperand(1);
5428       MVT VecVT = Vec.getSimpleValueType();
5429 
5430       // First extract the lower XLEN bits of the element.
5431       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5432 
5433       // To extract the upper XLEN bits of the vector element, shift the first
5434       // element right by 32 bits and re-extract the lower XLEN bits.
5435       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5436       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5437       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5438       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5439                                        DAG.getConstant(32, DL, XLenVT), VL);
5440       SDValue LShr32 =
5441           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5442       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5443 
5444       Results.push_back(
5445           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5446       break;
5447     }
5448     }
5449     break;
5450   }
5451   case ISD::VECREDUCE_ADD:
5452   case ISD::VECREDUCE_AND:
5453   case ISD::VECREDUCE_OR:
5454   case ISD::VECREDUCE_XOR:
5455   case ISD::VECREDUCE_SMAX:
5456   case ISD::VECREDUCE_UMAX:
5457   case ISD::VECREDUCE_SMIN:
5458   case ISD::VECREDUCE_UMIN:
5459     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5460       Results.push_back(V);
5461     break;
5462   case ISD::FLT_ROUNDS_: {
5463     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5464     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5465     Results.push_back(Res.getValue(0));
5466     Results.push_back(Res.getValue(1));
5467     break;
5468   }
5469   }
5470 }
5471 
5472 // A structure to hold one of the bit-manipulation patterns below. Together, a
5473 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5474 //   (or (and (shl x, 1), 0xAAAAAAAA),
5475 //       (and (srl x, 1), 0x55555555))
5476 struct RISCVBitmanipPat {
5477   SDValue Op;
5478   unsigned ShAmt;
5479   bool IsSHL;
5480 
5481   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5482     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5483   }
5484 };
5485 
5486 // Matches patterns of the form
5487 //   (and (shl x, C2), (C1 << C2))
5488 //   (and (srl x, C2), C1)
5489 //   (shl (and x, C1), C2)
5490 //   (srl (and x, (C1 << C2)), C2)
5491 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5492 // The expected masks for each shift amount are specified in BitmanipMasks where
5493 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5494 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5495 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5496 // XLen is 64.
5497 static Optional<RISCVBitmanipPat>
5498 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5499   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5500          "Unexpected number of masks");
5501   Optional<uint64_t> Mask;
5502   // Optionally consume a mask around the shift operation.
5503   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5504     Mask = Op.getConstantOperandVal(1);
5505     Op = Op.getOperand(0);
5506   }
5507   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5508     return None;
5509   bool IsSHL = Op.getOpcode() == ISD::SHL;
5510 
5511   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5512     return None;
5513   uint64_t ShAmt = Op.getConstantOperandVal(1);
5514 
5515   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5516   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5517     return None;
5518   // If we don't have enough masks for 64 bit, then we must be trying to
5519   // match SHFL so we're only allowed to shift 1/4 of the width.
5520   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5521     return None;
5522 
5523   SDValue Src = Op.getOperand(0);
5524 
5525   // The expected mask is shifted left when the AND is found around SHL
5526   // patterns.
5527   //   ((x >> 1) & 0x55555555)
5528   //   ((x << 1) & 0xAAAAAAAA)
5529   bool SHLExpMask = IsSHL;
5530 
5531   if (!Mask) {
5532     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5533     // the mask is all ones: consume that now.
5534     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5535       Mask = Src.getConstantOperandVal(1);
5536       Src = Src.getOperand(0);
5537       // The expected mask is now in fact shifted left for SRL, so reverse the
5538       // decision.
5539       //   ((x & 0xAAAAAAAA) >> 1)
5540       //   ((x & 0x55555555) << 1)
5541       SHLExpMask = !SHLExpMask;
5542     } else {
5543       // Use a default shifted mask of all-ones if there's no AND, truncated
5544       // down to the expected width. This simplifies the logic later on.
5545       Mask = maskTrailingOnes<uint64_t>(Width);
5546       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5547     }
5548   }
5549 
5550   unsigned MaskIdx = Log2_32(ShAmt);
5551   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5552 
5553   if (SHLExpMask)
5554     ExpMask <<= ShAmt;
5555 
5556   if (Mask != ExpMask)
5557     return None;
5558 
5559   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5560 }
5561 
5562 // Matches any of the following bit-manipulation patterns:
5563 //   (and (shl x, 1), (0x55555555 << 1))
5564 //   (and (srl x, 1), 0x55555555)
5565 //   (shl (and x, 0x55555555), 1)
5566 //   (srl (and x, (0x55555555 << 1)), 1)
5567 // where the shift amount and mask may vary thus:
5568 //   [1]  = 0x55555555 / 0xAAAAAAAA
5569 //   [2]  = 0x33333333 / 0xCCCCCCCC
5570 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5571 //   [8]  = 0x00FF00FF / 0xFF00FF00
5572 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5573 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5574 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5575   // These are the unshifted masks which we use to match bit-manipulation
5576   // patterns. They may be shifted left in certain circumstances.
5577   static const uint64_t BitmanipMasks[] = {
5578       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5579       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5580 
5581   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5582 }
5583 
5584 // Match the following pattern as a GREVI(W) operation
5585 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5586 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5587                                const RISCVSubtarget &Subtarget) {
5588   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5589   EVT VT = Op.getValueType();
5590 
5591   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5592     auto LHS = matchGREVIPat(Op.getOperand(0));
5593     auto RHS = matchGREVIPat(Op.getOperand(1));
5594     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5595       SDLoc DL(Op);
5596       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5597                          DAG.getConstant(LHS->ShAmt, DL, VT));
5598     }
5599   }
5600   return SDValue();
5601 }
5602 
5603 // Matches any the following pattern as a GORCI(W) operation
5604 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5605 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5606 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5607 // Note that with the variant of 3.,
5608 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5609 // the inner pattern will first be matched as GREVI and then the outer
5610 // pattern will be matched to GORC via the first rule above.
5611 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5612 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5613                                const RISCVSubtarget &Subtarget) {
5614   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5615   EVT VT = Op.getValueType();
5616 
5617   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5618     SDLoc DL(Op);
5619     SDValue Op0 = Op.getOperand(0);
5620     SDValue Op1 = Op.getOperand(1);
5621 
5622     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5623       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5624           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5625           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5626         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5627       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5628       if ((Reverse.getOpcode() == ISD::ROTL ||
5629            Reverse.getOpcode() == ISD::ROTR) &&
5630           Reverse.getOperand(0) == X &&
5631           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5632         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5633         if (RotAmt == (VT.getSizeInBits() / 2))
5634           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5635                              DAG.getConstant(RotAmt, DL, VT));
5636       }
5637       return SDValue();
5638     };
5639 
5640     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5641     if (SDValue V = MatchOROfReverse(Op0, Op1))
5642       return V;
5643     if (SDValue V = MatchOROfReverse(Op1, Op0))
5644       return V;
5645 
5646     // OR is commutable so canonicalize its OR operand to the left
5647     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5648       std::swap(Op0, Op1);
5649     if (Op0.getOpcode() != ISD::OR)
5650       return SDValue();
5651     SDValue OrOp0 = Op0.getOperand(0);
5652     SDValue OrOp1 = Op0.getOperand(1);
5653     auto LHS = matchGREVIPat(OrOp0);
5654     // OR is commutable so swap the operands and try again: x might have been
5655     // on the left
5656     if (!LHS) {
5657       std::swap(OrOp0, OrOp1);
5658       LHS = matchGREVIPat(OrOp0);
5659     }
5660     auto RHS = matchGREVIPat(Op1);
5661     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5662       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5663                          DAG.getConstant(LHS->ShAmt, DL, VT));
5664     }
5665   }
5666   return SDValue();
5667 }
5668 
5669 // Matches any of the following bit-manipulation patterns:
5670 //   (and (shl x, 1), (0x22222222 << 1))
5671 //   (and (srl x, 1), 0x22222222)
5672 //   (shl (and x, 0x22222222), 1)
5673 //   (srl (and x, (0x22222222 << 1)), 1)
5674 // where the shift amount and mask may vary thus:
5675 //   [1]  = 0x22222222 / 0x44444444
5676 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5677 //   [4]  = 0x00F000F0 / 0x0F000F00
5678 //   [8]  = 0x0000FF00 / 0x00FF0000
5679 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5680 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5681   // These are the unshifted masks which we use to match bit-manipulation
5682   // patterns. They may be shifted left in certain circumstances.
5683   static const uint64_t BitmanipMasks[] = {
5684       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5685       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5686 
5687   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5688 }
5689 
5690 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5691 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5692                                const RISCVSubtarget &Subtarget) {
5693   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5694   EVT VT = Op.getValueType();
5695 
5696   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5697     return SDValue();
5698 
5699   SDValue Op0 = Op.getOperand(0);
5700   SDValue Op1 = Op.getOperand(1);
5701 
5702   // Or is commutable so canonicalize the second OR to the LHS.
5703   if (Op0.getOpcode() != ISD::OR)
5704     std::swap(Op0, Op1);
5705   if (Op0.getOpcode() != ISD::OR)
5706     return SDValue();
5707 
5708   // We found an inner OR, so our operands are the operands of the inner OR
5709   // and the other operand of the outer OR.
5710   SDValue A = Op0.getOperand(0);
5711   SDValue B = Op0.getOperand(1);
5712   SDValue C = Op1;
5713 
5714   auto Match1 = matchSHFLPat(A);
5715   auto Match2 = matchSHFLPat(B);
5716 
5717   // If neither matched, we failed.
5718   if (!Match1 && !Match2)
5719     return SDValue();
5720 
5721   // We had at least one match. if one failed, try the remaining C operand.
5722   if (!Match1) {
5723     std::swap(A, C);
5724     Match1 = matchSHFLPat(A);
5725     if (!Match1)
5726       return SDValue();
5727   } else if (!Match2) {
5728     std::swap(B, C);
5729     Match2 = matchSHFLPat(B);
5730     if (!Match2)
5731       return SDValue();
5732   }
5733   assert(Match1 && Match2);
5734 
5735   // Make sure our matches pair up.
5736   if (!Match1->formsPairWith(*Match2))
5737     return SDValue();
5738 
5739   // All the remains is to make sure C is an AND with the same input, that masks
5740   // out the bits that are being shuffled.
5741   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5742       C.getOperand(0) != Match1->Op)
5743     return SDValue();
5744 
5745   uint64_t Mask = C.getConstantOperandVal(1);
5746 
5747   static const uint64_t BitmanipMasks[] = {
5748       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5749       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5750   };
5751 
5752   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5753   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5754   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5755 
5756   if (Mask != ExpMask)
5757     return SDValue();
5758 
5759   SDLoc DL(Op);
5760   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5761                      DAG.getConstant(Match1->ShAmt, DL, VT));
5762 }
5763 
5764 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5765 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5766 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5767 // not undo itself, but they are redundant.
5768 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5769   SDValue Src = N->getOperand(0);
5770 
5771   if (Src.getOpcode() != N->getOpcode())
5772     return SDValue();
5773 
5774   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5775       !isa<ConstantSDNode>(Src.getOperand(1)))
5776     return SDValue();
5777 
5778   unsigned ShAmt1 = N->getConstantOperandVal(1);
5779   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5780   Src = Src.getOperand(0);
5781 
5782   unsigned CombinedShAmt;
5783   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5784     CombinedShAmt = ShAmt1 | ShAmt2;
5785   else
5786     CombinedShAmt = ShAmt1 ^ ShAmt2;
5787 
5788   if (CombinedShAmt == 0)
5789     return Src;
5790 
5791   SDLoc DL(N);
5792   return DAG.getNode(
5793       N->getOpcode(), DL, N->getValueType(0), Src,
5794       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5795 }
5796 
5797 // Combine a constant select operand into its use:
5798 //
5799 // (and (select cond, -1, c), x)
5800 //   -> (select cond, x, (and x, c))  [AllOnes=1]
5801 // (or  (select cond, 0, c), x)
5802 //   -> (select cond, x, (or x, c))  [AllOnes=0]
5803 // (xor (select cond, 0, c), x)
5804 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
5805 // (add (select cond, 0, c), x)
5806 //   -> (select cond, x, (add x, c))  [AllOnes=0]
5807 // (sub x, (select cond, 0, c))
5808 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
5809 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5810                                    SelectionDAG &DAG, bool AllOnes) {
5811   EVT VT = N->getValueType(0);
5812 
5813   // Skip vectors.
5814   if (VT.isVector())
5815     return SDValue();
5816 
5817   if ((Slct.getOpcode() != ISD::SELECT &&
5818        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
5819       !Slct.hasOneUse())
5820     return SDValue();
5821 
5822   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5823     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5824   };
5825 
5826   bool SwapSelectOps;
5827   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
5828   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
5829   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
5830   SDValue NonConstantVal;
5831   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5832     SwapSelectOps = false;
5833     NonConstantVal = FalseVal;
5834   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5835     SwapSelectOps = true;
5836     NonConstantVal = TrueVal;
5837   } else
5838     return SDValue();
5839 
5840   // Slct is now know to be the desired identity constant when CC is true.
5841   TrueVal = OtherOp;
5842   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5843   // Unless SwapSelectOps says the condition should be false.
5844   if (SwapSelectOps)
5845     std::swap(TrueVal, FalseVal);
5846 
5847   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
5848     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5849                        {Slct.getOperand(0), Slct.getOperand(1),
5850                         Slct.getOperand(2), TrueVal, FalseVal});
5851 
5852   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
5853                      {Slct.getOperand(0), TrueVal, FalseVal});
5854 }
5855 
5856 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5857 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5858                                               bool AllOnes) {
5859   SDValue N0 = N->getOperand(0);
5860   SDValue N1 = N->getOperand(1);
5861   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
5862     return Result;
5863   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
5864     return Result;
5865   return SDValue();
5866 }
5867 
5868 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG) {
5869   // fold (add (select lhs, rhs, cc, 0, y), x) ->
5870   //      (select lhs, rhs, cc, x, (add x, y))
5871   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
5872 }
5873 
5874 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
5875   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
5876   //      (select lhs, rhs, cc, x, (sub x, y))
5877   SDValue N0 = N->getOperand(0);
5878   SDValue N1 = N->getOperand(1);
5879   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
5880 }
5881 
5882 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
5883   // fold (and (select lhs, rhs, cc, -1, y), x) ->
5884   //      (select lhs, rhs, cc, x, (and x, y))
5885   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
5886 }
5887 
5888 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
5889                                 const RISCVSubtarget &Subtarget) {
5890   if (Subtarget.hasStdExtZbp()) {
5891     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5892       return GREV;
5893     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5894       return GORC;
5895     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5896       return SHFL;
5897   }
5898 
5899   // fold (or (select cond, 0, y), x) ->
5900   //      (select cond, x, (or x, y))
5901   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
5902 }
5903 
5904 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
5905   // fold (xor (select cond, 0, y), x) ->
5906   //      (select cond, x, (xor x, y))
5907   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
5908 }
5909 
5910 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
5911 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
5912 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
5913 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
5914 // ADDW/SUBW/MULW.
5915 static SDValue performANY_EXTENDCombine(SDNode *N,
5916                                         TargetLowering::DAGCombinerInfo &DCI,
5917                                         const RISCVSubtarget &Subtarget) {
5918   if (!Subtarget.is64Bit())
5919     return SDValue();
5920 
5921   SelectionDAG &DAG = DCI.DAG;
5922 
5923   SDValue Src = N->getOperand(0);
5924   EVT VT = N->getValueType(0);
5925   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
5926     return SDValue();
5927 
5928   // The opcode must be one that can implicitly sign_extend.
5929   // FIXME: Additional opcodes.
5930   switch (Src.getOpcode()) {
5931   default:
5932     return SDValue();
5933   case ISD::MUL:
5934     if (!Subtarget.hasStdExtM())
5935       return SDValue();
5936     LLVM_FALLTHROUGH;
5937   case ISD::ADD:
5938   case ISD::SUB:
5939     break;
5940   }
5941 
5942   // Only handle cases where the result is used by a CopyToReg. That likely
5943   // means the value is a liveout of the basic block. This helps prevent
5944   // infinite combine loops like PR51206.
5945   if (none_of(N->uses(),
5946               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
5947     return SDValue();
5948 
5949   SmallVector<SDNode *, 4> SetCCs;
5950   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
5951                             UE = Src.getNode()->use_end();
5952        UI != UE; ++UI) {
5953     SDNode *User = *UI;
5954     if (User == N)
5955       continue;
5956     if (UI.getUse().getResNo() != Src.getResNo())
5957       continue;
5958     // All i32 setccs are legalized by sign extending operands.
5959     if (User->getOpcode() == ISD::SETCC) {
5960       SetCCs.push_back(User);
5961       continue;
5962     }
5963     // We don't know if we can extend this user.
5964     break;
5965   }
5966 
5967   // If we don't have any SetCCs, this isn't worthwhile.
5968   if (SetCCs.empty())
5969     return SDValue();
5970 
5971   SDLoc DL(N);
5972   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
5973   DCI.CombineTo(N, SExt);
5974 
5975   // Promote all the setccs.
5976   for (SDNode *SetCC : SetCCs) {
5977     SmallVector<SDValue, 4> Ops;
5978 
5979     for (unsigned j = 0; j != 2; ++j) {
5980       SDValue SOp = SetCC->getOperand(j);
5981       if (SOp == Src)
5982         Ops.push_back(SExt);
5983       else
5984         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
5985     }
5986 
5987     Ops.push_back(SetCC->getOperand(2));
5988     DCI.CombineTo(SetCC,
5989                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
5990   }
5991   return SDValue(N, 0);
5992 }
5993 
5994 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5995                                                DAGCombinerInfo &DCI) const {
5996   SelectionDAG &DAG = DCI.DAG;
5997 
5998   // Helper to call SimplifyDemandedBits on an operand of N where only some low
5999   // bits are demanded. N will be added to the Worklist if it was not deleted.
6000   // Caller should return SDValue(N, 0) if this returns true.
6001   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
6002     SDValue Op = N->getOperand(OpNo);
6003     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
6004     if (!SimplifyDemandedBits(Op, Mask, DCI))
6005       return false;
6006 
6007     if (N->getOpcode() != ISD::DELETED_NODE)
6008       DCI.AddToWorklist(N);
6009     return true;
6010   };
6011 
6012   switch (N->getOpcode()) {
6013   default:
6014     break;
6015   case RISCVISD::SplitF64: {
6016     SDValue Op0 = N->getOperand(0);
6017     // If the input to SplitF64 is just BuildPairF64 then the operation is
6018     // redundant. Instead, use BuildPairF64's operands directly.
6019     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
6020       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
6021 
6022     SDLoc DL(N);
6023 
6024     // It's cheaper to materialise two 32-bit integers than to load a double
6025     // from the constant pool and transfer it to integer registers through the
6026     // stack.
6027     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
6028       APInt V = C->getValueAPF().bitcastToAPInt();
6029       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
6030       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
6031       return DCI.CombineTo(N, Lo, Hi);
6032     }
6033 
6034     // This is a target-specific version of a DAGCombine performed in
6035     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6036     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6037     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6038     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6039         !Op0.getNode()->hasOneUse())
6040       break;
6041     SDValue NewSplitF64 =
6042         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
6043                     Op0.getOperand(0));
6044     SDValue Lo = NewSplitF64.getValue(0);
6045     SDValue Hi = NewSplitF64.getValue(1);
6046     APInt SignBit = APInt::getSignMask(32);
6047     if (Op0.getOpcode() == ISD::FNEG) {
6048       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
6049                                   DAG.getConstant(SignBit, DL, MVT::i32));
6050       return DCI.CombineTo(N, Lo, NewHi);
6051     }
6052     assert(Op0.getOpcode() == ISD::FABS);
6053     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
6054                                 DAG.getConstant(~SignBit, DL, MVT::i32));
6055     return DCI.CombineTo(N, Lo, NewHi);
6056   }
6057   case RISCVISD::SLLW:
6058   case RISCVISD::SRAW:
6059   case RISCVISD::SRLW:
6060   case RISCVISD::ROLW:
6061   case RISCVISD::RORW: {
6062     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6063     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6064         SimplifyDemandedLowBitsHelper(1, 5))
6065       return SDValue(N, 0);
6066     break;
6067   }
6068   case RISCVISD::CLZW:
6069   case RISCVISD::CTZW: {
6070     // Only the lower 32 bits of the first operand are read
6071     if (SimplifyDemandedLowBitsHelper(0, 32))
6072       return SDValue(N, 0);
6073     break;
6074   }
6075   case RISCVISD::FSL:
6076   case RISCVISD::FSR: {
6077     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
6078     unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
6079     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6080     if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
6081       return SDValue(N, 0);
6082     break;
6083   }
6084   case RISCVISD::FSLW:
6085   case RISCVISD::FSRW: {
6086     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
6087     // read.
6088     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6089         SimplifyDemandedLowBitsHelper(1, 32) ||
6090         SimplifyDemandedLowBitsHelper(2, 6))
6091       return SDValue(N, 0);
6092     break;
6093   }
6094   case RISCVISD::GREV:
6095   case RISCVISD::GORC: {
6096     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
6097     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6098     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6099     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
6100       return SDValue(N, 0);
6101 
6102     return combineGREVI_GORCI(N, DCI.DAG);
6103   }
6104   case RISCVISD::GREVW:
6105   case RISCVISD::GORCW: {
6106     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6107     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6108         SimplifyDemandedLowBitsHelper(1, 5))
6109       return SDValue(N, 0);
6110 
6111     return combineGREVI_GORCI(N, DCI.DAG);
6112   }
6113   case RISCVISD::SHFL:
6114   case RISCVISD::UNSHFL: {
6115     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
6116     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6117     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6118     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
6119       return SDValue(N, 0);
6120 
6121     break;
6122   }
6123   case RISCVISD::SHFLW:
6124   case RISCVISD::UNSHFLW: {
6125     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
6126     SDValue LHS = N->getOperand(0);
6127     SDValue RHS = N->getOperand(1);
6128     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6129     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
6130     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6131         SimplifyDemandedLowBitsHelper(1, 4))
6132       return SDValue(N, 0);
6133 
6134     break;
6135   }
6136   case RISCVISD::BCOMPRESSW:
6137   case RISCVISD::BDECOMPRESSW: {
6138     // Only the lower 32 bits of LHS and RHS are read.
6139     if (SimplifyDemandedLowBitsHelper(0, 32) ||
6140         SimplifyDemandedLowBitsHelper(1, 32))
6141       return SDValue(N, 0);
6142 
6143     break;
6144   }
6145   case RISCVISD::FMV_X_ANYEXTH:
6146   case RISCVISD::FMV_X_ANYEXTW_RV64: {
6147     SDLoc DL(N);
6148     SDValue Op0 = N->getOperand(0);
6149     MVT VT = N->getSimpleValueType(0);
6150     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
6151     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
6152     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
6153     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
6154          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
6155         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
6156          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
6157       assert(Op0.getOperand(0).getValueType() == VT &&
6158              "Unexpected value type!");
6159       return Op0.getOperand(0);
6160     }
6161 
6162     // This is a target-specific version of a DAGCombine performed in
6163     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6164     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6165     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6166     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6167         !Op0.getNode()->hasOneUse())
6168       break;
6169     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
6170     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
6171     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
6172     if (Op0.getOpcode() == ISD::FNEG)
6173       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
6174                          DAG.getConstant(SignBit, DL, VT));
6175 
6176     assert(Op0.getOpcode() == ISD::FABS);
6177     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
6178                        DAG.getConstant(~SignBit, DL, VT));
6179   }
6180   case ISD::ADD:
6181     return performADDCombine(N, DAG);
6182   case ISD::SUB:
6183     return performSUBCombine(N, DAG);
6184   case ISD::AND:
6185     return performANDCombine(N, DAG);
6186   case ISD::OR:
6187     return performORCombine(N, DAG, Subtarget);
6188   case ISD::XOR:
6189     return performXORCombine(N, DAG);
6190   case ISD::ANY_EXTEND:
6191     return performANY_EXTENDCombine(N, DCI, Subtarget);
6192   case ISD::ZERO_EXTEND:
6193     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
6194     // type legalization. This is safe because fp_to_uint produces poison if
6195     // it overflows.
6196     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() &&
6197         N->getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
6198         isTypeLegal(N->getOperand(0).getOperand(0).getValueType()))
6199       return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
6200                          N->getOperand(0).getOperand(0));
6201     return SDValue();
6202   case RISCVISD::SELECT_CC: {
6203     // Transform
6204     SDValue LHS = N->getOperand(0);
6205     SDValue RHS = N->getOperand(1);
6206     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
6207     if (!ISD::isIntEqualitySetCC(CCVal))
6208       break;
6209 
6210     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
6211     //      (select_cc X, Y, lt, trueV, falseV)
6212     // Sometimes the setcc is introduced after select_cc has been formed.
6213     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6214         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6215       // If we're looking for eq 0 instead of ne 0, we need to invert the
6216       // condition.
6217       bool Invert = CCVal == ISD::SETEQ;
6218       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6219       if (Invert)
6220         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6221 
6222       SDLoc DL(N);
6223       RHS = LHS.getOperand(1);
6224       LHS = LHS.getOperand(0);
6225       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6226 
6227       SDValue TargetCC = DAG.getCondCode(CCVal);
6228       return DAG.getNode(
6229           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6230           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6231     }
6232 
6233     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
6234     //      (select_cc X, Y, eq/ne, trueV, falseV)
6235     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6236       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
6237                          {LHS.getOperand(0), LHS.getOperand(1),
6238                           N->getOperand(2), N->getOperand(3),
6239                           N->getOperand(4)});
6240     // (select_cc X, 1, setne, trueV, falseV) ->
6241     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
6242     // This can occur when legalizing some floating point comparisons.
6243     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6244     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6245       SDLoc DL(N);
6246       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6247       SDValue TargetCC = DAG.getCondCode(CCVal);
6248       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6249       return DAG.getNode(
6250           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6251           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6252     }
6253 
6254     break;
6255   }
6256   case RISCVISD::BR_CC: {
6257     SDValue LHS = N->getOperand(1);
6258     SDValue RHS = N->getOperand(2);
6259     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6260     if (!ISD::isIntEqualitySetCC(CCVal))
6261       break;
6262 
6263     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6264     //      (br_cc X, Y, lt, dest)
6265     // Sometimes the setcc is introduced after br_cc has been formed.
6266     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6267         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6268       // If we're looking for eq 0 instead of ne 0, we need to invert the
6269       // condition.
6270       bool Invert = CCVal == ISD::SETEQ;
6271       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6272       if (Invert)
6273         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6274 
6275       SDLoc DL(N);
6276       RHS = LHS.getOperand(1);
6277       LHS = LHS.getOperand(0);
6278       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6279 
6280       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6281                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6282                          N->getOperand(4));
6283     }
6284 
6285     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6286     //      (br_cc X, Y, eq/ne, trueV, falseV)
6287     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6288       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6289                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6290                          N->getOperand(3), N->getOperand(4));
6291 
6292     // (br_cc X, 1, setne, br_cc) ->
6293     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6294     // This can occur when legalizing some floating point comparisons.
6295     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6296     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6297       SDLoc DL(N);
6298       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6299       SDValue TargetCC = DAG.getCondCode(CCVal);
6300       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6301       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6302                          N->getOperand(0), LHS, RHS, TargetCC,
6303                          N->getOperand(4));
6304     }
6305     break;
6306   }
6307   case ISD::FCOPYSIGN: {
6308     EVT VT = N->getValueType(0);
6309     if (!VT.isVector())
6310       break;
6311     // There is a form of VFSGNJ which injects the negated sign of its second
6312     // operand. Try and bubble any FNEG up after the extend/round to produce
6313     // this optimized pattern. Avoid modifying cases where FP_ROUND and
6314     // TRUNC=1.
6315     SDValue In2 = N->getOperand(1);
6316     // Avoid cases where the extend/round has multiple uses, as duplicating
6317     // those is typically more expensive than removing a fneg.
6318     if (!In2.hasOneUse())
6319       break;
6320     if (In2.getOpcode() != ISD::FP_EXTEND &&
6321         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6322       break;
6323     In2 = In2.getOperand(0);
6324     if (In2.getOpcode() != ISD::FNEG)
6325       break;
6326     SDLoc DL(N);
6327     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
6328     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
6329                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
6330   }
6331   case ISD::MGATHER:
6332   case ISD::MSCATTER: {
6333     if (!DCI.isBeforeLegalize())
6334       break;
6335     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
6336     SDValue Index = MGSN->getIndex();
6337     EVT IndexVT = Index.getValueType();
6338     MVT XLenVT = Subtarget.getXLenVT();
6339     // RISCV indexed loads only support the "unsigned unscaled" addressing
6340     // mode, so anything else must be manually legalized.
6341     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
6342                                 (MGSN->isIndexSigned() &&
6343                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
6344     if (!NeedsIdxLegalization)
6345       break;
6346 
6347     SDLoc DL(N);
6348 
6349     // Any index legalization should first promote to XLenVT, so we don't lose
6350     // bits when scaling. This may create an illegal index type so we let
6351     // LLVM's legalization take care of the splitting.
6352     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
6353       IndexVT = IndexVT.changeVectorElementType(XLenVT);
6354       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
6355                                                 : ISD::ZERO_EXTEND,
6356                           DL, IndexVT, Index);
6357     }
6358 
6359     unsigned Scale = N->getConstantOperandVal(5);
6360     if (MGSN->isIndexScaled() && Scale != 1) {
6361       // Manually scale the indices by the element size.
6362       // TODO: Sanitize the scale operand here?
6363       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
6364       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
6365       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
6366     }
6367 
6368     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6369     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
6370       return DAG.getMaskedGather(
6371           N->getVTList(), MGSN->getMemoryVT(), DL,
6372           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
6373            MGSN->getBasePtr(), Index, MGN->getScale()},
6374           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6375     }
6376     const auto *MSN = cast<MaskedScatterSDNode>(N);
6377     return DAG.getMaskedScatter(
6378         N->getVTList(), MGSN->getMemoryVT(), DL,
6379         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
6380          Index, MGSN->getScale()},
6381         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6382   }
6383   case RISCVISD::SRA_VL:
6384   case RISCVISD::SRL_VL:
6385   case RISCVISD::SHL_VL: {
6386     SDValue ShAmt = N->getOperand(1);
6387     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6388       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6389       SDLoc DL(N);
6390       SDValue VL = N->getOperand(3);
6391       EVT VT = N->getValueType(0);
6392       ShAmt =
6393           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6394       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6395                          N->getOperand(2), N->getOperand(3));
6396     }
6397     break;
6398   }
6399   case ISD::SRA:
6400   case ISD::SRL:
6401   case ISD::SHL: {
6402     SDValue ShAmt = N->getOperand(1);
6403     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6404       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6405       SDLoc DL(N);
6406       EVT VT = N->getValueType(0);
6407       ShAmt =
6408           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6409       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6410     }
6411     break;
6412   }
6413   case RISCVISD::MUL_VL: {
6414     // Try to form VWMUL or VWMULU.
6415     // FIXME: Look for splat of extended scalar as well.
6416     // FIXME: Support VWMULSU.
6417     SDValue Op0 = N->getOperand(0);
6418     SDValue Op1 = N->getOperand(1);
6419     bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6420     bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6421     if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode())
6422       return SDValue();
6423 
6424     // Make sure the extends have a single use.
6425     if (!Op0.hasOneUse() || !Op1.hasOneUse())
6426       return SDValue();
6427 
6428     SDValue Mask = N->getOperand(2);
6429     SDValue VL = N->getOperand(3);
6430     if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask ||
6431         Op0.getOperand(2) != VL || Op1.getOperand(2) != VL)
6432       return SDValue();
6433 
6434     Op0 = Op0.getOperand(0);
6435     Op1 = Op1.getOperand(0);
6436 
6437     MVT VT = N->getSimpleValueType(0);
6438     MVT NarrowVT =
6439         MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2),
6440                          VT.getVectorElementCount());
6441 
6442     SDLoc DL(N);
6443 
6444     // Re-introduce narrower extends if needed.
6445     unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6446     if (Op0.getValueType() != NarrowVT)
6447       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6448     if (Op1.getValueType() != NarrowVT)
6449       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6450 
6451     unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6452     return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6453   }
6454   }
6455 
6456   return SDValue();
6457 }
6458 
6459 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6460     const SDNode *N, CombineLevel Level) const {
6461   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6462   // materialised in fewer instructions than `(OP _, c1)`:
6463   //
6464   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6465   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6466   SDValue N0 = N->getOperand(0);
6467   EVT Ty = N0.getValueType();
6468   if (Ty.isScalarInteger() &&
6469       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6470     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6471     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6472     if (C1 && C2) {
6473       const APInt &C1Int = C1->getAPIntValue();
6474       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6475 
6476       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6477       // and the combine should happen, to potentially allow further combines
6478       // later.
6479       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6480           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6481         return true;
6482 
6483       // We can materialise `c1` in an add immediate, so it's "free", and the
6484       // combine should be prevented.
6485       if (C1Int.getMinSignedBits() <= 64 &&
6486           isLegalAddImmediate(C1Int.getSExtValue()))
6487         return false;
6488 
6489       // Neither constant will fit into an immediate, so find materialisation
6490       // costs.
6491       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6492                                               Subtarget.getFeatureBits(),
6493                                               /*CompressionCost*/true);
6494       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6495           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
6496           /*CompressionCost*/true);
6497 
6498       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6499       // combine should be prevented.
6500       if (C1Cost < ShiftedC1Cost)
6501         return false;
6502     }
6503   }
6504   return true;
6505 }
6506 
6507 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6508     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6509     TargetLoweringOpt &TLO) const {
6510   // Delay this optimization as late as possible.
6511   if (!TLO.LegalOps)
6512     return false;
6513 
6514   EVT VT = Op.getValueType();
6515   if (VT.isVector())
6516     return false;
6517 
6518   // Only handle AND for now.
6519   if (Op.getOpcode() != ISD::AND)
6520     return false;
6521 
6522   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6523   if (!C)
6524     return false;
6525 
6526   const APInt &Mask = C->getAPIntValue();
6527 
6528   // Clear all non-demanded bits initially.
6529   APInt ShrunkMask = Mask & DemandedBits;
6530 
6531   // Try to make a smaller immediate by setting undemanded bits.
6532 
6533   APInt ExpandedMask = Mask | ~DemandedBits;
6534 
6535   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6536     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6537   };
6538   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6539     if (NewMask == Mask)
6540       return true;
6541     SDLoc DL(Op);
6542     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6543     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6544     return TLO.CombineTo(Op, NewOp);
6545   };
6546 
6547   // If the shrunk mask fits in sign extended 12 bits, let the target
6548   // independent code apply it.
6549   if (ShrunkMask.isSignedIntN(12))
6550     return false;
6551 
6552   // Preserve (and X, 0xffff) when zext.h is supported.
6553   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6554     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6555     if (IsLegalMask(NewMask))
6556       return UseMask(NewMask);
6557   }
6558 
6559   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6560   if (VT == MVT::i64) {
6561     APInt NewMask = APInt(64, 0xffffffff);
6562     if (IsLegalMask(NewMask))
6563       return UseMask(NewMask);
6564   }
6565 
6566   // For the remaining optimizations, we need to be able to make a negative
6567   // number through a combination of mask and undemanded bits.
6568   if (!ExpandedMask.isNegative())
6569     return false;
6570 
6571   // What is the fewest number of bits we need to represent the negative number.
6572   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6573 
6574   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6575   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6576   APInt NewMask = ShrunkMask;
6577   if (MinSignedBits <= 12)
6578     NewMask.setBitsFrom(11);
6579   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6580     NewMask.setBitsFrom(31);
6581   else
6582     return false;
6583 
6584   // Sanity check that our new mask is a subset of the demanded mask.
6585   assert(IsLegalMask(NewMask));
6586   return UseMask(NewMask);
6587 }
6588 
6589 static void computeGREV(APInt &Src, unsigned ShAmt) {
6590   ShAmt &= Src.getBitWidth() - 1;
6591   uint64_t x = Src.getZExtValue();
6592   if (ShAmt & 1)
6593     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6594   if (ShAmt & 2)
6595     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6596   if (ShAmt & 4)
6597     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6598   if (ShAmt & 8)
6599     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6600   if (ShAmt & 16)
6601     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6602   if (ShAmt & 32)
6603     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6604   Src = x;
6605 }
6606 
6607 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6608                                                         KnownBits &Known,
6609                                                         const APInt &DemandedElts,
6610                                                         const SelectionDAG &DAG,
6611                                                         unsigned Depth) const {
6612   unsigned BitWidth = Known.getBitWidth();
6613   unsigned Opc = Op.getOpcode();
6614   assert((Opc >= ISD::BUILTIN_OP_END ||
6615           Opc == ISD::INTRINSIC_WO_CHAIN ||
6616           Opc == ISD::INTRINSIC_W_CHAIN ||
6617           Opc == ISD::INTRINSIC_VOID) &&
6618          "Should use MaskedValueIsZero if you don't know whether Op"
6619          " is a target node!");
6620 
6621   Known.resetAll();
6622   switch (Opc) {
6623   default: break;
6624   case RISCVISD::SELECT_CC: {
6625     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6626     // If we don't know any bits, early out.
6627     if (Known.isUnknown())
6628       break;
6629     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6630 
6631     // Only known if known in both the LHS and RHS.
6632     Known = KnownBits::commonBits(Known, Known2);
6633     break;
6634   }
6635   case RISCVISD::REMUW: {
6636     KnownBits Known2;
6637     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6638     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6639     // We only care about the lower 32 bits.
6640     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6641     // Restore the original width by sign extending.
6642     Known = Known.sext(BitWidth);
6643     break;
6644   }
6645   case RISCVISD::DIVUW: {
6646     KnownBits Known2;
6647     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6648     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6649     // We only care about the lower 32 bits.
6650     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6651     // Restore the original width by sign extending.
6652     Known = Known.sext(BitWidth);
6653     break;
6654   }
6655   case RISCVISD::CTZW: {
6656     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6657     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6658     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6659     Known.Zero.setBitsFrom(LowBits);
6660     break;
6661   }
6662   case RISCVISD::CLZW: {
6663     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6664     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6665     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6666     Known.Zero.setBitsFrom(LowBits);
6667     break;
6668   }
6669   case RISCVISD::GREV:
6670   case RISCVISD::GREVW: {
6671     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6672       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6673       if (Opc == RISCVISD::GREVW)
6674         Known = Known.trunc(32);
6675       unsigned ShAmt = C->getZExtValue();
6676       computeGREV(Known.Zero, ShAmt);
6677       computeGREV(Known.One, ShAmt);
6678       if (Opc == RISCVISD::GREVW)
6679         Known = Known.sext(BitWidth);
6680     }
6681     break;
6682   }
6683   case RISCVISD::READ_VLENB:
6684     // We assume VLENB is at least 16 bytes.
6685     Known.Zero.setLowBits(4);
6686     // We assume VLENB is no more than 65536 / 8 bytes.
6687     Known.Zero.setBitsFrom(14);
6688     break;
6689   case ISD::INTRINSIC_W_CHAIN: {
6690     unsigned IntNo = Op.getConstantOperandVal(1);
6691     switch (IntNo) {
6692     default:
6693       // We can't do anything for most intrinsics.
6694       break;
6695     case Intrinsic::riscv_vsetvli:
6696     case Intrinsic::riscv_vsetvlimax:
6697       // Assume that VL output is positive and would fit in an int32_t.
6698       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6699       if (BitWidth >= 32)
6700         Known.Zero.setBitsFrom(31);
6701       break;
6702     }
6703     break;
6704   }
6705   }
6706 }
6707 
6708 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6709     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6710     unsigned Depth) const {
6711   switch (Op.getOpcode()) {
6712   default:
6713     break;
6714   case RISCVISD::SELECT_CC: {
6715     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
6716     if (Tmp == 1) return 1;  // Early out.
6717     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
6718     return std::min(Tmp, Tmp2);
6719   }
6720   case RISCVISD::SLLW:
6721   case RISCVISD::SRAW:
6722   case RISCVISD::SRLW:
6723   case RISCVISD::DIVW:
6724   case RISCVISD::DIVUW:
6725   case RISCVISD::REMUW:
6726   case RISCVISD::ROLW:
6727   case RISCVISD::RORW:
6728   case RISCVISD::GREVW:
6729   case RISCVISD::GORCW:
6730   case RISCVISD::FSLW:
6731   case RISCVISD::FSRW:
6732   case RISCVISD::SHFLW:
6733   case RISCVISD::UNSHFLW:
6734   case RISCVISD::BCOMPRESSW:
6735   case RISCVISD::BDECOMPRESSW:
6736   case RISCVISD::FCVT_W_RTZ_RV64:
6737   case RISCVISD::FCVT_WU_RTZ_RV64:
6738     // TODO: As the result is sign-extended, this is conservatively correct. A
6739     // more precise answer could be calculated for SRAW depending on known
6740     // bits in the shift amount.
6741     return 33;
6742   case RISCVISD::SHFL:
6743   case RISCVISD::UNSHFL: {
6744     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6745     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6746     // will stay within the upper 32 bits. If there were more than 32 sign bits
6747     // before there will be at least 33 sign bits after.
6748     if (Op.getValueType() == MVT::i64 &&
6749         isa<ConstantSDNode>(Op.getOperand(1)) &&
6750         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6751       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6752       if (Tmp > 32)
6753         return 33;
6754     }
6755     break;
6756   }
6757   case RISCVISD::VMV_X_S:
6758     // The number of sign bits of the scalar result is computed by obtaining the
6759     // element type of the input vector operand, subtracting its width from the
6760     // XLEN, and then adding one (sign bit within the element type). If the
6761     // element type is wider than XLen, the least-significant XLEN bits are
6762     // taken.
6763     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6764       return 1;
6765     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6766   }
6767 
6768   return 1;
6769 }
6770 
6771 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6772                                                   MachineBasicBlock *BB) {
6773   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6774 
6775   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6776   // Should the count have wrapped while it was being read, we need to try
6777   // again.
6778   // ...
6779   // read:
6780   // rdcycleh x3 # load high word of cycle
6781   // rdcycle  x2 # load low word of cycle
6782   // rdcycleh x4 # load high word of cycle
6783   // bne x3, x4, read # check if high word reads match, otherwise try again
6784   // ...
6785 
6786   MachineFunction &MF = *BB->getParent();
6787   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6788   MachineFunction::iterator It = ++BB->getIterator();
6789 
6790   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6791   MF.insert(It, LoopMBB);
6792 
6793   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6794   MF.insert(It, DoneMBB);
6795 
6796   // Transfer the remainder of BB and its successor edges to DoneMBB.
6797   DoneMBB->splice(DoneMBB->begin(), BB,
6798                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6799   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6800 
6801   BB->addSuccessor(LoopMBB);
6802 
6803   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6804   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6805   Register LoReg = MI.getOperand(0).getReg();
6806   Register HiReg = MI.getOperand(1).getReg();
6807   DebugLoc DL = MI.getDebugLoc();
6808 
6809   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6810   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6811       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6812       .addReg(RISCV::X0);
6813   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6814       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6815       .addReg(RISCV::X0);
6816   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6817       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6818       .addReg(RISCV::X0);
6819 
6820   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6821       .addReg(HiReg)
6822       .addReg(ReadAgainReg)
6823       .addMBB(LoopMBB);
6824 
6825   LoopMBB->addSuccessor(LoopMBB);
6826   LoopMBB->addSuccessor(DoneMBB);
6827 
6828   MI.eraseFromParent();
6829 
6830   return DoneMBB;
6831 }
6832 
6833 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6834                                              MachineBasicBlock *BB) {
6835   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6836 
6837   MachineFunction &MF = *BB->getParent();
6838   DebugLoc DL = MI.getDebugLoc();
6839   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6840   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6841   Register LoReg = MI.getOperand(0).getReg();
6842   Register HiReg = MI.getOperand(1).getReg();
6843   Register SrcReg = MI.getOperand(2).getReg();
6844   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6845   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6846 
6847   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6848                           RI);
6849   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6850   MachineMemOperand *MMOLo =
6851       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6852   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6853       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6854   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6855       .addFrameIndex(FI)
6856       .addImm(0)
6857       .addMemOperand(MMOLo);
6858   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6859       .addFrameIndex(FI)
6860       .addImm(4)
6861       .addMemOperand(MMOHi);
6862   MI.eraseFromParent(); // The pseudo instruction is gone now.
6863   return BB;
6864 }
6865 
6866 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6867                                                  MachineBasicBlock *BB) {
6868   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6869          "Unexpected instruction");
6870 
6871   MachineFunction &MF = *BB->getParent();
6872   DebugLoc DL = MI.getDebugLoc();
6873   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6874   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6875   Register DstReg = MI.getOperand(0).getReg();
6876   Register LoReg = MI.getOperand(1).getReg();
6877   Register HiReg = MI.getOperand(2).getReg();
6878   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6879   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6880 
6881   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6882   MachineMemOperand *MMOLo =
6883       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6884   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6885       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6886   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6887       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6888       .addFrameIndex(FI)
6889       .addImm(0)
6890       .addMemOperand(MMOLo);
6891   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6892       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6893       .addFrameIndex(FI)
6894       .addImm(4)
6895       .addMemOperand(MMOHi);
6896   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6897   MI.eraseFromParent(); // The pseudo instruction is gone now.
6898   return BB;
6899 }
6900 
6901 static bool isSelectPseudo(MachineInstr &MI) {
6902   switch (MI.getOpcode()) {
6903   default:
6904     return false;
6905   case RISCV::Select_GPR_Using_CC_GPR:
6906   case RISCV::Select_FPR16_Using_CC_GPR:
6907   case RISCV::Select_FPR32_Using_CC_GPR:
6908   case RISCV::Select_FPR64_Using_CC_GPR:
6909     return true;
6910   }
6911 }
6912 
6913 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6914                                            MachineBasicBlock *BB,
6915                                            const RISCVSubtarget &Subtarget) {
6916   // To "insert" Select_* instructions, we actually have to insert the triangle
6917   // control-flow pattern.  The incoming instructions know the destination vreg
6918   // to set, the condition code register to branch on, the true/false values to
6919   // select between, and the condcode to use to select the appropriate branch.
6920   //
6921   // We produce the following control flow:
6922   //     HeadMBB
6923   //     |  \
6924   //     |  IfFalseMBB
6925   //     | /
6926   //    TailMBB
6927   //
6928   // When we find a sequence of selects we attempt to optimize their emission
6929   // by sharing the control flow. Currently we only handle cases where we have
6930   // multiple selects with the exact same condition (same LHS, RHS and CC).
6931   // The selects may be interleaved with other instructions if the other
6932   // instructions meet some requirements we deem safe:
6933   // - They are debug instructions. Otherwise,
6934   // - They do not have side-effects, do not access memory and their inputs do
6935   //   not depend on the results of the select pseudo-instructions.
6936   // The TrueV/FalseV operands of the selects cannot depend on the result of
6937   // previous selects in the sequence.
6938   // These conditions could be further relaxed. See the X86 target for a
6939   // related approach and more information.
6940   Register LHS = MI.getOperand(1).getReg();
6941   Register RHS = MI.getOperand(2).getReg();
6942   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
6943 
6944   SmallVector<MachineInstr *, 4> SelectDebugValues;
6945   SmallSet<Register, 4> SelectDests;
6946   SelectDests.insert(MI.getOperand(0).getReg());
6947 
6948   MachineInstr *LastSelectPseudo = &MI;
6949 
6950   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6951        SequenceMBBI != E; ++SequenceMBBI) {
6952     if (SequenceMBBI->isDebugInstr())
6953       continue;
6954     else if (isSelectPseudo(*SequenceMBBI)) {
6955       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6956           SequenceMBBI->getOperand(2).getReg() != RHS ||
6957           SequenceMBBI->getOperand(3).getImm() != CC ||
6958           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6959           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6960         break;
6961       LastSelectPseudo = &*SequenceMBBI;
6962       SequenceMBBI->collectDebugValues(SelectDebugValues);
6963       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6964     } else {
6965       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6966           SequenceMBBI->mayLoadOrStore())
6967         break;
6968       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6969             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6970           }))
6971         break;
6972     }
6973   }
6974 
6975   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
6976   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6977   DebugLoc DL = MI.getDebugLoc();
6978   MachineFunction::iterator I = ++BB->getIterator();
6979 
6980   MachineBasicBlock *HeadMBB = BB;
6981   MachineFunction *F = BB->getParent();
6982   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6983   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6984 
6985   F->insert(I, IfFalseMBB);
6986   F->insert(I, TailMBB);
6987 
6988   // Transfer debug instructions associated with the selects to TailMBB.
6989   for (MachineInstr *DebugInstr : SelectDebugValues) {
6990     TailMBB->push_back(DebugInstr->removeFromParent());
6991   }
6992 
6993   // Move all instructions after the sequence to TailMBB.
6994   TailMBB->splice(TailMBB->end(), HeadMBB,
6995                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6996   // Update machine-CFG edges by transferring all successors of the current
6997   // block to the new block which will contain the Phi nodes for the selects.
6998   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6999   // Set the successors for HeadMBB.
7000   HeadMBB->addSuccessor(IfFalseMBB);
7001   HeadMBB->addSuccessor(TailMBB);
7002 
7003   // Insert appropriate branch.
7004   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
7005     .addReg(LHS)
7006     .addReg(RHS)
7007     .addMBB(TailMBB);
7008 
7009   // IfFalseMBB just falls through to TailMBB.
7010   IfFalseMBB->addSuccessor(TailMBB);
7011 
7012   // Create PHIs for all of the select pseudo-instructions.
7013   auto SelectMBBI = MI.getIterator();
7014   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
7015   auto InsertionPoint = TailMBB->begin();
7016   while (SelectMBBI != SelectEnd) {
7017     auto Next = std::next(SelectMBBI);
7018     if (isSelectPseudo(*SelectMBBI)) {
7019       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
7020       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
7021               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
7022           .addReg(SelectMBBI->getOperand(4).getReg())
7023           .addMBB(HeadMBB)
7024           .addReg(SelectMBBI->getOperand(5).getReg())
7025           .addMBB(IfFalseMBB);
7026       SelectMBBI->eraseFromParent();
7027     }
7028     SelectMBBI = Next;
7029   }
7030 
7031   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
7032   return TailMBB;
7033 }
7034 
7035 MachineBasicBlock *
7036 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
7037                                                  MachineBasicBlock *BB) const {
7038   switch (MI.getOpcode()) {
7039   default:
7040     llvm_unreachable("Unexpected instr type to insert");
7041   case RISCV::ReadCycleWide:
7042     assert(!Subtarget.is64Bit() &&
7043            "ReadCycleWrite is only to be used on riscv32");
7044     return emitReadCycleWidePseudo(MI, BB);
7045   case RISCV::Select_GPR_Using_CC_GPR:
7046   case RISCV::Select_FPR16_Using_CC_GPR:
7047   case RISCV::Select_FPR32_Using_CC_GPR:
7048   case RISCV::Select_FPR64_Using_CC_GPR:
7049     return emitSelectPseudo(MI, BB, Subtarget);
7050   case RISCV::BuildPairF64Pseudo:
7051     return emitBuildPairF64Pseudo(MI, BB);
7052   case RISCV::SplitF64Pseudo:
7053     return emitSplitF64Pseudo(MI, BB);
7054   }
7055 }
7056 
7057 // Calling Convention Implementation.
7058 // The expectations for frontend ABI lowering vary from target to target.
7059 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
7060 // details, but this is a longer term goal. For now, we simply try to keep the
7061 // role of the frontend as simple and well-defined as possible. The rules can
7062 // be summarised as:
7063 // * Never split up large scalar arguments. We handle them here.
7064 // * If a hardfloat calling convention is being used, and the struct may be
7065 // passed in a pair of registers (fp+fp, int+fp), and both registers are
7066 // available, then pass as two separate arguments. If either the GPRs or FPRs
7067 // are exhausted, then pass according to the rule below.
7068 // * If a struct could never be passed in registers or directly in a stack
7069 // slot (as it is larger than 2*XLEN and the floating point rules don't
7070 // apply), then pass it using a pointer with the byval attribute.
7071 // * If a struct is less than 2*XLEN, then coerce to either a two-element
7072 // word-sized array or a 2*XLEN scalar (depending on alignment).
7073 // * The frontend can determine whether a struct is returned by reference or
7074 // not based on its size and fields. If it will be returned by reference, the
7075 // frontend must modify the prototype so a pointer with the sret annotation is
7076 // passed as the first argument. This is not necessary for large scalar
7077 // returns.
7078 // * Struct return values and varargs should be coerced to structs containing
7079 // register-size fields in the same situations they would be for fixed
7080 // arguments.
7081 
7082 static const MCPhysReg ArgGPRs[] = {
7083   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
7084   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
7085 };
7086 static const MCPhysReg ArgFPR16s[] = {
7087   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
7088   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
7089 };
7090 static const MCPhysReg ArgFPR32s[] = {
7091   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
7092   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
7093 };
7094 static const MCPhysReg ArgFPR64s[] = {
7095   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
7096   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
7097 };
7098 // This is an interim calling convention and it may be changed in the future.
7099 static const MCPhysReg ArgVRs[] = {
7100     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
7101     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
7102     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
7103 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
7104                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
7105                                      RISCV::V20M2, RISCV::V22M2};
7106 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
7107                                      RISCV::V20M4};
7108 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
7109 
7110 // Pass a 2*XLEN argument that has been split into two XLEN values through
7111 // registers or the stack as necessary.
7112 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
7113                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
7114                                 MVT ValVT2, MVT LocVT2,
7115                                 ISD::ArgFlagsTy ArgFlags2) {
7116   unsigned XLenInBytes = XLen / 8;
7117   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7118     // At least one half can be passed via register.
7119     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
7120                                      VA1.getLocVT(), CCValAssign::Full));
7121   } else {
7122     // Both halves must be passed on the stack, with proper alignment.
7123     Align StackAlign =
7124         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
7125     State.addLoc(
7126         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
7127                             State.AllocateStack(XLenInBytes, StackAlign),
7128                             VA1.getLocVT(), CCValAssign::Full));
7129     State.addLoc(CCValAssign::getMem(
7130         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7131         LocVT2, CCValAssign::Full));
7132     return false;
7133   }
7134 
7135   if (Register Reg = State.AllocateReg(ArgGPRs)) {
7136     // The second half can also be passed via register.
7137     State.addLoc(
7138         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
7139   } else {
7140     // The second half is passed via the stack, without additional alignment.
7141     State.addLoc(CCValAssign::getMem(
7142         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7143         LocVT2, CCValAssign::Full));
7144   }
7145 
7146   return false;
7147 }
7148 
7149 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
7150                                Optional<unsigned> FirstMaskArgument,
7151                                CCState &State, const RISCVTargetLowering &TLI) {
7152   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
7153   if (RC == &RISCV::VRRegClass) {
7154     // Assign the first mask argument to V0.
7155     // This is an interim calling convention and it may be changed in the
7156     // future.
7157     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
7158       return State.AllocateReg(RISCV::V0);
7159     return State.AllocateReg(ArgVRs);
7160   }
7161   if (RC == &RISCV::VRM2RegClass)
7162     return State.AllocateReg(ArgVRM2s);
7163   if (RC == &RISCV::VRM4RegClass)
7164     return State.AllocateReg(ArgVRM4s);
7165   if (RC == &RISCV::VRM8RegClass)
7166     return State.AllocateReg(ArgVRM8s);
7167   llvm_unreachable("Unhandled register class for ValueType");
7168 }
7169 
7170 // Implements the RISC-V calling convention. Returns true upon failure.
7171 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
7172                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
7173                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
7174                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
7175                      Optional<unsigned> FirstMaskArgument) {
7176   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
7177   assert(XLen == 32 || XLen == 64);
7178   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
7179 
7180   // Any return value split in to more than two values can't be returned
7181   // directly. Vectors are returned via the available vector registers.
7182   if (!LocVT.isVector() && IsRet && ValNo > 1)
7183     return true;
7184 
7185   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
7186   // variadic argument, or if no F16/F32 argument registers are available.
7187   bool UseGPRForF16_F32 = true;
7188   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
7189   // variadic argument, or if no F64 argument registers are available.
7190   bool UseGPRForF64 = true;
7191 
7192   switch (ABI) {
7193   default:
7194     llvm_unreachable("Unexpected ABI");
7195   case RISCVABI::ABI_ILP32:
7196   case RISCVABI::ABI_LP64:
7197     break;
7198   case RISCVABI::ABI_ILP32F:
7199   case RISCVABI::ABI_LP64F:
7200     UseGPRForF16_F32 = !IsFixed;
7201     break;
7202   case RISCVABI::ABI_ILP32D:
7203   case RISCVABI::ABI_LP64D:
7204     UseGPRForF16_F32 = !IsFixed;
7205     UseGPRForF64 = !IsFixed;
7206     break;
7207   }
7208 
7209   // FPR16, FPR32, and FPR64 alias each other.
7210   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
7211     UseGPRForF16_F32 = true;
7212     UseGPRForF64 = true;
7213   }
7214 
7215   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
7216   // similar local variables rather than directly checking against the target
7217   // ABI.
7218 
7219   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
7220     LocVT = XLenVT;
7221     LocInfo = CCValAssign::BCvt;
7222   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
7223     LocVT = MVT::i64;
7224     LocInfo = CCValAssign::BCvt;
7225   }
7226 
7227   // If this is a variadic argument, the RISC-V calling convention requires
7228   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
7229   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
7230   // be used regardless of whether the original argument was split during
7231   // legalisation or not. The argument will not be passed by registers if the
7232   // original type is larger than 2*XLEN, so the register alignment rule does
7233   // not apply.
7234   unsigned TwoXLenInBytes = (2 * XLen) / 8;
7235   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
7236       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
7237     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
7238     // Skip 'odd' register if necessary.
7239     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
7240       State.AllocateReg(ArgGPRs);
7241   }
7242 
7243   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
7244   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
7245       State.getPendingArgFlags();
7246 
7247   assert(PendingLocs.size() == PendingArgFlags.size() &&
7248          "PendingLocs and PendingArgFlags out of sync");
7249 
7250   // Handle passing f64 on RV32D with a soft float ABI or when floating point
7251   // registers are exhausted.
7252   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
7253     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
7254            "Can't lower f64 if it is split");
7255     // Depending on available argument GPRS, f64 may be passed in a pair of
7256     // GPRs, split between a GPR and the stack, or passed completely on the
7257     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
7258     // cases.
7259     Register Reg = State.AllocateReg(ArgGPRs);
7260     LocVT = MVT::i32;
7261     if (!Reg) {
7262       unsigned StackOffset = State.AllocateStack(8, Align(8));
7263       State.addLoc(
7264           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7265       return false;
7266     }
7267     if (!State.AllocateReg(ArgGPRs))
7268       State.AllocateStack(4, Align(4));
7269     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7270     return false;
7271   }
7272 
7273   // Fixed-length vectors are located in the corresponding scalable-vector
7274   // container types.
7275   if (ValVT.isFixedLengthVector())
7276     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7277 
7278   // Split arguments might be passed indirectly, so keep track of the pending
7279   // values. Split vectors are passed via a mix of registers and indirectly, so
7280   // treat them as we would any other argument.
7281   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
7282     LocVT = XLenVT;
7283     LocInfo = CCValAssign::Indirect;
7284     PendingLocs.push_back(
7285         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
7286     PendingArgFlags.push_back(ArgFlags);
7287     if (!ArgFlags.isSplitEnd()) {
7288       return false;
7289     }
7290   }
7291 
7292   // If the split argument only had two elements, it should be passed directly
7293   // in registers or on the stack.
7294   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
7295       PendingLocs.size() <= 2) {
7296     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
7297     // Apply the normal calling convention rules to the first half of the
7298     // split argument.
7299     CCValAssign VA = PendingLocs[0];
7300     ISD::ArgFlagsTy AF = PendingArgFlags[0];
7301     PendingLocs.clear();
7302     PendingArgFlags.clear();
7303     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
7304                                ArgFlags);
7305   }
7306 
7307   // Allocate to a register if possible, or else a stack slot.
7308   Register Reg;
7309   unsigned StoreSizeBytes = XLen / 8;
7310   Align StackAlign = Align(XLen / 8);
7311 
7312   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
7313     Reg = State.AllocateReg(ArgFPR16s);
7314   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
7315     Reg = State.AllocateReg(ArgFPR32s);
7316   else if (ValVT == MVT::f64 && !UseGPRForF64)
7317     Reg = State.AllocateReg(ArgFPR64s);
7318   else if (ValVT.isVector()) {
7319     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
7320     if (!Reg) {
7321       // For return values, the vector must be passed fully via registers or
7322       // via the stack.
7323       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
7324       // but we're using all of them.
7325       if (IsRet)
7326         return true;
7327       // Try using a GPR to pass the address
7328       if ((Reg = State.AllocateReg(ArgGPRs))) {
7329         LocVT = XLenVT;
7330         LocInfo = CCValAssign::Indirect;
7331       } else if (ValVT.isScalableVector()) {
7332         report_fatal_error("Unable to pass scalable vector types on the stack");
7333       } else {
7334         // Pass fixed-length vectors on the stack.
7335         LocVT = ValVT;
7336         StoreSizeBytes = ValVT.getStoreSize();
7337         // Align vectors to their element sizes, being careful for vXi1
7338         // vectors.
7339         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7340       }
7341     }
7342   } else {
7343     Reg = State.AllocateReg(ArgGPRs);
7344   }
7345 
7346   unsigned StackOffset =
7347       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7348 
7349   // If we reach this point and PendingLocs is non-empty, we must be at the
7350   // end of a split argument that must be passed indirectly.
7351   if (!PendingLocs.empty()) {
7352     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
7353     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
7354 
7355     for (auto &It : PendingLocs) {
7356       if (Reg)
7357         It.convertToReg(Reg);
7358       else
7359         It.convertToMem(StackOffset);
7360       State.addLoc(It);
7361     }
7362     PendingLocs.clear();
7363     PendingArgFlags.clear();
7364     return false;
7365   }
7366 
7367   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
7368           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
7369          "Expected an XLenVT or vector types at this stage");
7370 
7371   if (Reg) {
7372     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7373     return false;
7374   }
7375 
7376   // When a floating-point value is passed on the stack, no bit-conversion is
7377   // needed.
7378   if (ValVT.isFloatingPoint()) {
7379     LocVT = ValVT;
7380     LocInfo = CCValAssign::Full;
7381   }
7382   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7383   return false;
7384 }
7385 
7386 template <typename ArgTy>
7387 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
7388   for (const auto &ArgIdx : enumerate(Args)) {
7389     MVT ArgVT = ArgIdx.value().VT;
7390     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
7391       return ArgIdx.index();
7392   }
7393   return None;
7394 }
7395 
7396 void RISCVTargetLowering::analyzeInputArgs(
7397     MachineFunction &MF, CCState &CCInfo,
7398     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
7399     RISCVCCAssignFn Fn) const {
7400   unsigned NumArgs = Ins.size();
7401   FunctionType *FType = MF.getFunction().getFunctionType();
7402 
7403   Optional<unsigned> FirstMaskArgument;
7404   if (Subtarget.hasStdExtV())
7405     FirstMaskArgument = preAssignMask(Ins);
7406 
7407   for (unsigned i = 0; i != NumArgs; ++i) {
7408     MVT ArgVT = Ins[i].VT;
7409     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
7410 
7411     Type *ArgTy = nullptr;
7412     if (IsRet)
7413       ArgTy = FType->getReturnType();
7414     else if (Ins[i].isOrigArg())
7415       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7416 
7417     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7418     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7419            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7420            FirstMaskArgument)) {
7421       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7422                         << EVT(ArgVT).getEVTString() << '\n');
7423       llvm_unreachable(nullptr);
7424     }
7425   }
7426 }
7427 
7428 void RISCVTargetLowering::analyzeOutputArgs(
7429     MachineFunction &MF, CCState &CCInfo,
7430     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7431     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7432   unsigned NumArgs = Outs.size();
7433 
7434   Optional<unsigned> FirstMaskArgument;
7435   if (Subtarget.hasStdExtV())
7436     FirstMaskArgument = preAssignMask(Outs);
7437 
7438   for (unsigned i = 0; i != NumArgs; i++) {
7439     MVT ArgVT = Outs[i].VT;
7440     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7441     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7442 
7443     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7444     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7445            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7446            FirstMaskArgument)) {
7447       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7448                         << EVT(ArgVT).getEVTString() << "\n");
7449       llvm_unreachable(nullptr);
7450     }
7451   }
7452 }
7453 
7454 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7455 // values.
7456 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7457                                    const CCValAssign &VA, const SDLoc &DL,
7458                                    const RISCVSubtarget &Subtarget) {
7459   switch (VA.getLocInfo()) {
7460   default:
7461     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7462   case CCValAssign::Full:
7463     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7464       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7465     break;
7466   case CCValAssign::BCvt:
7467     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7468       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7469     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7470       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7471     else
7472       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7473     break;
7474   }
7475   return Val;
7476 }
7477 
7478 // The caller is responsible for loading the full value if the argument is
7479 // passed with CCValAssign::Indirect.
7480 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7481                                 const CCValAssign &VA, const SDLoc &DL,
7482                                 const RISCVTargetLowering &TLI) {
7483   MachineFunction &MF = DAG.getMachineFunction();
7484   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7485   EVT LocVT = VA.getLocVT();
7486   SDValue Val;
7487   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7488   Register VReg = RegInfo.createVirtualRegister(RC);
7489   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7490   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7491 
7492   if (VA.getLocInfo() == CCValAssign::Indirect)
7493     return Val;
7494 
7495   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7496 }
7497 
7498 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7499                                    const CCValAssign &VA, const SDLoc &DL,
7500                                    const RISCVSubtarget &Subtarget) {
7501   EVT LocVT = VA.getLocVT();
7502 
7503   switch (VA.getLocInfo()) {
7504   default:
7505     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7506   case CCValAssign::Full:
7507     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7508       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7509     break;
7510   case CCValAssign::BCvt:
7511     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7512       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7513     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7514       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7515     else
7516       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7517     break;
7518   }
7519   return Val;
7520 }
7521 
7522 // The caller is responsible for loading the full value if the argument is
7523 // passed with CCValAssign::Indirect.
7524 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7525                                 const CCValAssign &VA, const SDLoc &DL) {
7526   MachineFunction &MF = DAG.getMachineFunction();
7527   MachineFrameInfo &MFI = MF.getFrameInfo();
7528   EVT LocVT = VA.getLocVT();
7529   EVT ValVT = VA.getValVT();
7530   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7531   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7532                                  /*Immutable=*/true);
7533   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7534   SDValue Val;
7535 
7536   ISD::LoadExtType ExtType;
7537   switch (VA.getLocInfo()) {
7538   default:
7539     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7540   case CCValAssign::Full:
7541   case CCValAssign::Indirect:
7542   case CCValAssign::BCvt:
7543     ExtType = ISD::NON_EXTLOAD;
7544     break;
7545   }
7546   Val = DAG.getExtLoad(
7547       ExtType, DL, LocVT, Chain, FIN,
7548       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7549   return Val;
7550 }
7551 
7552 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7553                                        const CCValAssign &VA, const SDLoc &DL) {
7554   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7555          "Unexpected VA");
7556   MachineFunction &MF = DAG.getMachineFunction();
7557   MachineFrameInfo &MFI = MF.getFrameInfo();
7558   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7559 
7560   if (VA.isMemLoc()) {
7561     // f64 is passed on the stack.
7562     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7563     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7564     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7565                        MachinePointerInfo::getFixedStack(MF, FI));
7566   }
7567 
7568   assert(VA.isRegLoc() && "Expected register VA assignment");
7569 
7570   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7571   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7572   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7573   SDValue Hi;
7574   if (VA.getLocReg() == RISCV::X17) {
7575     // Second half of f64 is passed on the stack.
7576     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7577     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7578     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7579                      MachinePointerInfo::getFixedStack(MF, FI));
7580   } else {
7581     // Second half of f64 is passed in another GPR.
7582     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7583     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7584     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7585   }
7586   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7587 }
7588 
7589 // FastCC has less than 1% performance improvement for some particular
7590 // benchmark. But theoretically, it may has benenfit for some cases.
7591 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7592                             unsigned ValNo, MVT ValVT, MVT LocVT,
7593                             CCValAssign::LocInfo LocInfo,
7594                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7595                             bool IsFixed, bool IsRet, Type *OrigTy,
7596                             const RISCVTargetLowering &TLI,
7597                             Optional<unsigned> FirstMaskArgument) {
7598 
7599   // X5 and X6 might be used for save-restore libcall.
7600   static const MCPhysReg GPRList[] = {
7601       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7602       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7603       RISCV::X29, RISCV::X30, RISCV::X31};
7604 
7605   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7606     if (unsigned Reg = State.AllocateReg(GPRList)) {
7607       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7608       return false;
7609     }
7610   }
7611 
7612   if (LocVT == MVT::f16) {
7613     static const MCPhysReg FPR16List[] = {
7614         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7615         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7616         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7617         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7618     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7619       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7620       return false;
7621     }
7622   }
7623 
7624   if (LocVT == MVT::f32) {
7625     static const MCPhysReg FPR32List[] = {
7626         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7627         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7628         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7629         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7630     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7631       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7632       return false;
7633     }
7634   }
7635 
7636   if (LocVT == MVT::f64) {
7637     static const MCPhysReg FPR64List[] = {
7638         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7639         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7640         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7641         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7642     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7643       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7644       return false;
7645     }
7646   }
7647 
7648   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7649     unsigned Offset4 = State.AllocateStack(4, Align(4));
7650     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7651     return false;
7652   }
7653 
7654   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7655     unsigned Offset5 = State.AllocateStack(8, Align(8));
7656     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7657     return false;
7658   }
7659 
7660   if (LocVT.isVector()) {
7661     if (unsigned Reg =
7662             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7663       // Fixed-length vectors are located in the corresponding scalable-vector
7664       // container types.
7665       if (ValVT.isFixedLengthVector())
7666         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7667       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7668     } else {
7669       // Try and pass the address via a "fast" GPR.
7670       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7671         LocInfo = CCValAssign::Indirect;
7672         LocVT = TLI.getSubtarget().getXLenVT();
7673         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7674       } else if (ValVT.isFixedLengthVector()) {
7675         auto StackAlign =
7676             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7677         unsigned StackOffset =
7678             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7679         State.addLoc(
7680             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7681       } else {
7682         // Can't pass scalable vectors on the stack.
7683         return true;
7684       }
7685     }
7686 
7687     return false;
7688   }
7689 
7690   return true; // CC didn't match.
7691 }
7692 
7693 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7694                          CCValAssign::LocInfo LocInfo,
7695                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7696 
7697   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7698     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7699     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7700     static const MCPhysReg GPRList[] = {
7701         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7702         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7703     if (unsigned Reg = State.AllocateReg(GPRList)) {
7704       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7705       return false;
7706     }
7707   }
7708 
7709   if (LocVT == MVT::f32) {
7710     // Pass in STG registers: F1, ..., F6
7711     //                        fs0 ... fs5
7712     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7713                                           RISCV::F18_F, RISCV::F19_F,
7714                                           RISCV::F20_F, RISCV::F21_F};
7715     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7716       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7717       return false;
7718     }
7719   }
7720 
7721   if (LocVT == MVT::f64) {
7722     // Pass in STG registers: D1, ..., D6
7723     //                        fs6 ... fs11
7724     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7725                                           RISCV::F24_D, RISCV::F25_D,
7726                                           RISCV::F26_D, RISCV::F27_D};
7727     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7728       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7729       return false;
7730     }
7731   }
7732 
7733   report_fatal_error("No registers left in GHC calling convention");
7734   return true;
7735 }
7736 
7737 // Transform physical registers into virtual registers.
7738 SDValue RISCVTargetLowering::LowerFormalArguments(
7739     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7740     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7741     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7742 
7743   MachineFunction &MF = DAG.getMachineFunction();
7744 
7745   switch (CallConv) {
7746   default:
7747     report_fatal_error("Unsupported calling convention");
7748   case CallingConv::C:
7749   case CallingConv::Fast:
7750     break;
7751   case CallingConv::GHC:
7752     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7753         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7754       report_fatal_error(
7755         "GHC calling convention requires the F and D instruction set extensions");
7756   }
7757 
7758   const Function &Func = MF.getFunction();
7759   if (Func.hasFnAttribute("interrupt")) {
7760     if (!Func.arg_empty())
7761       report_fatal_error(
7762         "Functions with the interrupt attribute cannot have arguments!");
7763 
7764     StringRef Kind =
7765       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7766 
7767     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7768       report_fatal_error(
7769         "Function interrupt attribute argument not supported!");
7770   }
7771 
7772   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7773   MVT XLenVT = Subtarget.getXLenVT();
7774   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7775   // Used with vargs to acumulate store chains.
7776   std::vector<SDValue> OutChains;
7777 
7778   // Assign locations to all of the incoming arguments.
7779   SmallVector<CCValAssign, 16> ArgLocs;
7780   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7781 
7782   if (CallConv == CallingConv::GHC)
7783     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7784   else
7785     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7786                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7787                                                    : CC_RISCV);
7788 
7789   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7790     CCValAssign &VA = ArgLocs[i];
7791     SDValue ArgValue;
7792     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7793     // case.
7794     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7795       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7796     else if (VA.isRegLoc())
7797       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7798     else
7799       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7800 
7801     if (VA.getLocInfo() == CCValAssign::Indirect) {
7802       // If the original argument was split and passed by reference (e.g. i128
7803       // on RV32), we need to load all parts of it here (using the same
7804       // address). Vectors may be partly split to registers and partly to the
7805       // stack, in which case the base address is partly offset and subsequent
7806       // stores are relative to that.
7807       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7808                                    MachinePointerInfo()));
7809       unsigned ArgIndex = Ins[i].OrigArgIndex;
7810       unsigned ArgPartOffset = Ins[i].PartOffset;
7811       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7812       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7813         CCValAssign &PartVA = ArgLocs[i + 1];
7814         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7815         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7816         if (PartVA.getValVT().isScalableVector())
7817           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7818         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7819         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7820                                      MachinePointerInfo()));
7821         ++i;
7822       }
7823       continue;
7824     }
7825     InVals.push_back(ArgValue);
7826   }
7827 
7828   if (IsVarArg) {
7829     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7830     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7831     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7832     MachineFrameInfo &MFI = MF.getFrameInfo();
7833     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7834     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7835 
7836     // Offset of the first variable argument from stack pointer, and size of
7837     // the vararg save area. For now, the varargs save area is either zero or
7838     // large enough to hold a0-a7.
7839     int VaArgOffset, VarArgsSaveSize;
7840 
7841     // If all registers are allocated, then all varargs must be passed on the
7842     // stack and we don't need to save any argregs.
7843     if (ArgRegs.size() == Idx) {
7844       VaArgOffset = CCInfo.getNextStackOffset();
7845       VarArgsSaveSize = 0;
7846     } else {
7847       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7848       VaArgOffset = -VarArgsSaveSize;
7849     }
7850 
7851     // Record the frame index of the first variable argument
7852     // which is a value necessary to VASTART.
7853     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7854     RVFI->setVarArgsFrameIndex(FI);
7855 
7856     // If saving an odd number of registers then create an extra stack slot to
7857     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7858     // offsets to even-numbered registered remain 2*XLEN-aligned.
7859     if (Idx % 2) {
7860       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7861       VarArgsSaveSize += XLenInBytes;
7862     }
7863 
7864     // Copy the integer registers that may have been used for passing varargs
7865     // to the vararg save area.
7866     for (unsigned I = Idx; I < ArgRegs.size();
7867          ++I, VaArgOffset += XLenInBytes) {
7868       const Register Reg = RegInfo.createVirtualRegister(RC);
7869       RegInfo.addLiveIn(ArgRegs[I], Reg);
7870       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7871       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7872       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7873       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7874                                    MachinePointerInfo::getFixedStack(MF, FI));
7875       cast<StoreSDNode>(Store.getNode())
7876           ->getMemOperand()
7877           ->setValue((Value *)nullptr);
7878       OutChains.push_back(Store);
7879     }
7880     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7881   }
7882 
7883   // All stores are grouped in one node to allow the matching between
7884   // the size of Ins and InVals. This only happens for vararg functions.
7885   if (!OutChains.empty()) {
7886     OutChains.push_back(Chain);
7887     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7888   }
7889 
7890   return Chain;
7891 }
7892 
7893 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7894 /// for tail call optimization.
7895 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7896 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7897     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7898     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7899 
7900   auto &Callee = CLI.Callee;
7901   auto CalleeCC = CLI.CallConv;
7902   auto &Outs = CLI.Outs;
7903   auto &Caller = MF.getFunction();
7904   auto CallerCC = Caller.getCallingConv();
7905 
7906   // Exception-handling functions need a special set of instructions to
7907   // indicate a return to the hardware. Tail-calling another function would
7908   // probably break this.
7909   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7910   // should be expanded as new function attributes are introduced.
7911   if (Caller.hasFnAttribute("interrupt"))
7912     return false;
7913 
7914   // Do not tail call opt if the stack is used to pass parameters.
7915   if (CCInfo.getNextStackOffset() != 0)
7916     return false;
7917 
7918   // Do not tail call opt if any parameters need to be passed indirectly.
7919   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7920   // passed indirectly. So the address of the value will be passed in a
7921   // register, or if not available, then the address is put on the stack. In
7922   // order to pass indirectly, space on the stack often needs to be allocated
7923   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7924   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7925   // are passed CCValAssign::Indirect.
7926   for (auto &VA : ArgLocs)
7927     if (VA.getLocInfo() == CCValAssign::Indirect)
7928       return false;
7929 
7930   // Do not tail call opt if either caller or callee uses struct return
7931   // semantics.
7932   auto IsCallerStructRet = Caller.hasStructRetAttr();
7933   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7934   if (IsCallerStructRet || IsCalleeStructRet)
7935     return false;
7936 
7937   // Externally-defined functions with weak linkage should not be
7938   // tail-called. The behaviour of branch instructions in this situation (as
7939   // used for tail calls) is implementation-defined, so we cannot rely on the
7940   // linker replacing the tail call with a return.
7941   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7942     const GlobalValue *GV = G->getGlobal();
7943     if (GV->hasExternalWeakLinkage())
7944       return false;
7945   }
7946 
7947   // The callee has to preserve all registers the caller needs to preserve.
7948   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7949   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7950   if (CalleeCC != CallerCC) {
7951     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7952     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7953       return false;
7954   }
7955 
7956   // Byval parameters hand the function a pointer directly into the stack area
7957   // we want to reuse during a tail call. Working around this *is* possible
7958   // but less efficient and uglier in LowerCall.
7959   for (auto &Arg : Outs)
7960     if (Arg.Flags.isByVal())
7961       return false;
7962 
7963   return true;
7964 }
7965 
7966 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7967   return DAG.getDataLayout().getPrefTypeAlign(
7968       VT.getTypeForEVT(*DAG.getContext()));
7969 }
7970 
7971 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7972 // and output parameter nodes.
7973 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7974                                        SmallVectorImpl<SDValue> &InVals) const {
7975   SelectionDAG &DAG = CLI.DAG;
7976   SDLoc &DL = CLI.DL;
7977   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7978   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7979   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7980   SDValue Chain = CLI.Chain;
7981   SDValue Callee = CLI.Callee;
7982   bool &IsTailCall = CLI.IsTailCall;
7983   CallingConv::ID CallConv = CLI.CallConv;
7984   bool IsVarArg = CLI.IsVarArg;
7985   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7986   MVT XLenVT = Subtarget.getXLenVT();
7987 
7988   MachineFunction &MF = DAG.getMachineFunction();
7989 
7990   // Analyze the operands of the call, assigning locations to each operand.
7991   SmallVector<CCValAssign, 16> ArgLocs;
7992   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7993 
7994   if (CallConv == CallingConv::GHC)
7995     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7996   else
7997     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7998                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7999                                                     : CC_RISCV);
8000 
8001   // Check if it's really possible to do a tail call.
8002   if (IsTailCall)
8003     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8004 
8005   if (IsTailCall)
8006     ++NumTailCalls;
8007   else if (CLI.CB && CLI.CB->isMustTailCall())
8008     report_fatal_error("failed to perform tail call elimination on a call "
8009                        "site marked musttail");
8010 
8011   // Get a count of how many bytes are to be pushed on the stack.
8012   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
8013 
8014   // Create local copies for byval args
8015   SmallVector<SDValue, 8> ByValArgs;
8016   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8017     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8018     if (!Flags.isByVal())
8019       continue;
8020 
8021     SDValue Arg = OutVals[i];
8022     unsigned Size = Flags.getByValSize();
8023     Align Alignment = Flags.getNonZeroByValAlign();
8024 
8025     int FI =
8026         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
8027     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8028     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
8029 
8030     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
8031                           /*IsVolatile=*/false,
8032                           /*AlwaysInline=*/false, IsTailCall,
8033                           MachinePointerInfo(), MachinePointerInfo());
8034     ByValArgs.push_back(FIPtr);
8035   }
8036 
8037   if (!IsTailCall)
8038     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
8039 
8040   // Copy argument values to their designated locations.
8041   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
8042   SmallVector<SDValue, 8> MemOpChains;
8043   SDValue StackPtr;
8044   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
8045     CCValAssign &VA = ArgLocs[i];
8046     SDValue ArgValue = OutVals[i];
8047     ISD::ArgFlagsTy Flags = Outs[i].Flags;
8048 
8049     // Handle passing f64 on RV32D with a soft float ABI as a special case.
8050     bool IsF64OnRV32DSoftABI =
8051         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
8052     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
8053       SDValue SplitF64 = DAG.getNode(
8054           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
8055       SDValue Lo = SplitF64.getValue(0);
8056       SDValue Hi = SplitF64.getValue(1);
8057 
8058       Register RegLo = VA.getLocReg();
8059       RegsToPass.push_back(std::make_pair(RegLo, Lo));
8060 
8061       if (RegLo == RISCV::X17) {
8062         // Second half of f64 is passed on the stack.
8063         // Work out the address of the stack slot.
8064         if (!StackPtr.getNode())
8065           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8066         // Emit the store.
8067         MemOpChains.push_back(
8068             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
8069       } else {
8070         // Second half of f64 is passed in another GPR.
8071         assert(RegLo < RISCV::X31 && "Invalid register pair");
8072         Register RegHigh = RegLo + 1;
8073         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
8074       }
8075       continue;
8076     }
8077 
8078     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
8079     // as any other MemLoc.
8080 
8081     // Promote the value if needed.
8082     // For now, only handle fully promoted and indirect arguments.
8083     if (VA.getLocInfo() == CCValAssign::Indirect) {
8084       // Store the argument in a stack slot and pass its address.
8085       Align StackAlign =
8086           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
8087                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
8088       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
8089       // If the original argument was split (e.g. i128), we need
8090       // to store the required parts of it here (and pass just one address).
8091       // Vectors may be partly split to registers and partly to the stack, in
8092       // which case the base address is partly offset and subsequent stores are
8093       // relative to that.
8094       unsigned ArgIndex = Outs[i].OrigArgIndex;
8095       unsigned ArgPartOffset = Outs[i].PartOffset;
8096       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8097       // Calculate the total size to store. We don't have access to what we're
8098       // actually storing other than performing the loop and collecting the
8099       // info.
8100       SmallVector<std::pair<SDValue, SDValue>> Parts;
8101       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
8102         SDValue PartValue = OutVals[i + 1];
8103         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
8104         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8105         EVT PartVT = PartValue.getValueType();
8106         if (PartVT.isScalableVector())
8107           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8108         StoredSize += PartVT.getStoreSize();
8109         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
8110         Parts.push_back(std::make_pair(PartValue, Offset));
8111         ++i;
8112       }
8113       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
8114       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
8115       MemOpChains.push_back(
8116           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
8117                        MachinePointerInfo::getFixedStack(MF, FI)));
8118       for (const auto &Part : Parts) {
8119         SDValue PartValue = Part.first;
8120         SDValue PartOffset = Part.second;
8121         SDValue Address =
8122             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
8123         MemOpChains.push_back(
8124             DAG.getStore(Chain, DL, PartValue, Address,
8125                          MachinePointerInfo::getFixedStack(MF, FI)));
8126       }
8127       ArgValue = SpillSlot;
8128     } else {
8129       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
8130     }
8131 
8132     // Use local copy if it is a byval arg.
8133     if (Flags.isByVal())
8134       ArgValue = ByValArgs[j++];
8135 
8136     if (VA.isRegLoc()) {
8137       // Queue up the argument copies and emit them at the end.
8138       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
8139     } else {
8140       assert(VA.isMemLoc() && "Argument not register or memory");
8141       assert(!IsTailCall && "Tail call not allowed if stack is used "
8142                             "for passing parameters");
8143 
8144       // Work out the address of the stack slot.
8145       if (!StackPtr.getNode())
8146         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8147       SDValue Address =
8148           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
8149                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
8150 
8151       // Emit the store.
8152       MemOpChains.push_back(
8153           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
8154     }
8155   }
8156 
8157   // Join the stores, which are independent of one another.
8158   if (!MemOpChains.empty())
8159     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
8160 
8161   SDValue Glue;
8162 
8163   // Build a sequence of copy-to-reg nodes, chained and glued together.
8164   for (auto &Reg : RegsToPass) {
8165     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
8166     Glue = Chain.getValue(1);
8167   }
8168 
8169   // Validate that none of the argument registers have been marked as
8170   // reserved, if so report an error. Do the same for the return address if this
8171   // is not a tailcall.
8172   validateCCReservedRegs(RegsToPass, MF);
8173   if (!IsTailCall &&
8174       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
8175     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8176         MF.getFunction(),
8177         "Return address register required, but has been reserved."});
8178 
8179   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
8180   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
8181   // split it and then direct call can be matched by PseudoCALL.
8182   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
8183     const GlobalValue *GV = S->getGlobal();
8184 
8185     unsigned OpFlags = RISCVII::MO_CALL;
8186     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
8187       OpFlags = RISCVII::MO_PLT;
8188 
8189     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
8190   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
8191     unsigned OpFlags = RISCVII::MO_CALL;
8192 
8193     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
8194                                                  nullptr))
8195       OpFlags = RISCVII::MO_PLT;
8196 
8197     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
8198   }
8199 
8200   // The first call operand is the chain and the second is the target address.
8201   SmallVector<SDValue, 8> Ops;
8202   Ops.push_back(Chain);
8203   Ops.push_back(Callee);
8204 
8205   // Add argument registers to the end of the list so that they are
8206   // known live into the call.
8207   for (auto &Reg : RegsToPass)
8208     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
8209 
8210   if (!IsTailCall) {
8211     // Add a register mask operand representing the call-preserved registers.
8212     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
8213     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
8214     assert(Mask && "Missing call preserved mask for calling convention");
8215     Ops.push_back(DAG.getRegisterMask(Mask));
8216   }
8217 
8218   // Glue the call to the argument copies, if any.
8219   if (Glue.getNode())
8220     Ops.push_back(Glue);
8221 
8222   // Emit the call.
8223   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8224 
8225   if (IsTailCall) {
8226     MF.getFrameInfo().setHasTailCall();
8227     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
8228   }
8229 
8230   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
8231   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
8232   Glue = Chain.getValue(1);
8233 
8234   // Mark the end of the call, which is glued to the call itself.
8235   Chain = DAG.getCALLSEQ_END(Chain,
8236                              DAG.getConstant(NumBytes, DL, PtrVT, true),
8237                              DAG.getConstant(0, DL, PtrVT, true),
8238                              Glue, DL);
8239   Glue = Chain.getValue(1);
8240 
8241   // Assign locations to each value returned by this call.
8242   SmallVector<CCValAssign, 16> RVLocs;
8243   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
8244   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
8245 
8246   // Copy all of the result registers out of their specified physreg.
8247   for (auto &VA : RVLocs) {
8248     // Copy the value out
8249     SDValue RetValue =
8250         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
8251     // Glue the RetValue to the end of the call sequence
8252     Chain = RetValue.getValue(1);
8253     Glue = RetValue.getValue(2);
8254 
8255     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8256       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
8257       SDValue RetValue2 =
8258           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
8259       Chain = RetValue2.getValue(1);
8260       Glue = RetValue2.getValue(2);
8261       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8262                              RetValue2);
8263     }
8264 
8265     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8266 
8267     InVals.push_back(RetValue);
8268   }
8269 
8270   return Chain;
8271 }
8272 
8273 bool RISCVTargetLowering::CanLowerReturn(
8274     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
8275     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
8276   SmallVector<CCValAssign, 16> RVLocs;
8277   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8278 
8279   Optional<unsigned> FirstMaskArgument;
8280   if (Subtarget.hasStdExtV())
8281     FirstMaskArgument = preAssignMask(Outs);
8282 
8283   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8284     MVT VT = Outs[i].VT;
8285     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8286     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8287     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
8288                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
8289                  *this, FirstMaskArgument))
8290       return false;
8291   }
8292   return true;
8293 }
8294 
8295 SDValue
8296 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
8297                                  bool IsVarArg,
8298                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
8299                                  const SmallVectorImpl<SDValue> &OutVals,
8300                                  const SDLoc &DL, SelectionDAG &DAG) const {
8301   const MachineFunction &MF = DAG.getMachineFunction();
8302   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8303 
8304   // Stores the assignment of the return value to a location.
8305   SmallVector<CCValAssign, 16> RVLocs;
8306 
8307   // Info about the registers and stack slot.
8308   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
8309                  *DAG.getContext());
8310 
8311   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
8312                     nullptr, CC_RISCV);
8313 
8314   if (CallConv == CallingConv::GHC && !RVLocs.empty())
8315     report_fatal_error("GHC functions return void only");
8316 
8317   SDValue Glue;
8318   SmallVector<SDValue, 4> RetOps(1, Chain);
8319 
8320   // Copy the result values into the output registers.
8321   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
8322     SDValue Val = OutVals[i];
8323     CCValAssign &VA = RVLocs[i];
8324     assert(VA.isRegLoc() && "Can only return in registers!");
8325 
8326     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8327       // Handle returning f64 on RV32D with a soft float ABI.
8328       assert(VA.isRegLoc() && "Expected return via registers");
8329       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
8330                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
8331       SDValue Lo = SplitF64.getValue(0);
8332       SDValue Hi = SplitF64.getValue(1);
8333       Register RegLo = VA.getLocReg();
8334       assert(RegLo < RISCV::X31 && "Invalid register pair");
8335       Register RegHi = RegLo + 1;
8336 
8337       if (STI.isRegisterReservedByUser(RegLo) ||
8338           STI.isRegisterReservedByUser(RegHi))
8339         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8340             MF.getFunction(),
8341             "Return value register required, but has been reserved."});
8342 
8343       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
8344       Glue = Chain.getValue(1);
8345       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
8346       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
8347       Glue = Chain.getValue(1);
8348       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
8349     } else {
8350       // Handle a 'normal' return.
8351       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
8352       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
8353 
8354       if (STI.isRegisterReservedByUser(VA.getLocReg()))
8355         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8356             MF.getFunction(),
8357             "Return value register required, but has been reserved."});
8358 
8359       // Guarantee that all emitted copies are stuck together.
8360       Glue = Chain.getValue(1);
8361       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
8362     }
8363   }
8364 
8365   RetOps[0] = Chain; // Update chain.
8366 
8367   // Add the glue node if we have it.
8368   if (Glue.getNode()) {
8369     RetOps.push_back(Glue);
8370   }
8371 
8372   unsigned RetOpc = RISCVISD::RET_FLAG;
8373   // Interrupt service routines use different return instructions.
8374   const Function &Func = DAG.getMachineFunction().getFunction();
8375   if (Func.hasFnAttribute("interrupt")) {
8376     if (!Func.getReturnType()->isVoidTy())
8377       report_fatal_error(
8378           "Functions with the interrupt attribute must have void return type!");
8379 
8380     MachineFunction &MF = DAG.getMachineFunction();
8381     StringRef Kind =
8382       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8383 
8384     if (Kind == "user")
8385       RetOpc = RISCVISD::URET_FLAG;
8386     else if (Kind == "supervisor")
8387       RetOpc = RISCVISD::SRET_FLAG;
8388     else
8389       RetOpc = RISCVISD::MRET_FLAG;
8390   }
8391 
8392   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
8393 }
8394 
8395 void RISCVTargetLowering::validateCCReservedRegs(
8396     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
8397     MachineFunction &MF) const {
8398   const Function &F = MF.getFunction();
8399   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8400 
8401   if (llvm::any_of(Regs, [&STI](auto Reg) {
8402         return STI.isRegisterReservedByUser(Reg.first);
8403       }))
8404     F.getContext().diagnose(DiagnosticInfoUnsupported{
8405         F, "Argument register required, but has been reserved."});
8406 }
8407 
8408 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
8409   return CI->isTailCall();
8410 }
8411 
8412 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
8413 #define NODE_NAME_CASE(NODE)                                                   \
8414   case RISCVISD::NODE:                                                         \
8415     return "RISCVISD::" #NODE;
8416   // clang-format off
8417   switch ((RISCVISD::NodeType)Opcode) {
8418   case RISCVISD::FIRST_NUMBER:
8419     break;
8420   NODE_NAME_CASE(RET_FLAG)
8421   NODE_NAME_CASE(URET_FLAG)
8422   NODE_NAME_CASE(SRET_FLAG)
8423   NODE_NAME_CASE(MRET_FLAG)
8424   NODE_NAME_CASE(CALL)
8425   NODE_NAME_CASE(SELECT_CC)
8426   NODE_NAME_CASE(BR_CC)
8427   NODE_NAME_CASE(BuildPairF64)
8428   NODE_NAME_CASE(SplitF64)
8429   NODE_NAME_CASE(TAIL)
8430   NODE_NAME_CASE(MULHSU)
8431   NODE_NAME_CASE(SLLW)
8432   NODE_NAME_CASE(SRAW)
8433   NODE_NAME_CASE(SRLW)
8434   NODE_NAME_CASE(DIVW)
8435   NODE_NAME_CASE(DIVUW)
8436   NODE_NAME_CASE(REMUW)
8437   NODE_NAME_CASE(ROLW)
8438   NODE_NAME_CASE(RORW)
8439   NODE_NAME_CASE(CLZW)
8440   NODE_NAME_CASE(CTZW)
8441   NODE_NAME_CASE(FSLW)
8442   NODE_NAME_CASE(FSRW)
8443   NODE_NAME_CASE(FSL)
8444   NODE_NAME_CASE(FSR)
8445   NODE_NAME_CASE(FMV_H_X)
8446   NODE_NAME_CASE(FMV_X_ANYEXTH)
8447   NODE_NAME_CASE(FMV_W_X_RV64)
8448   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8449   NODE_NAME_CASE(FCVT_X_RTZ)
8450   NODE_NAME_CASE(FCVT_XU_RTZ)
8451   NODE_NAME_CASE(FCVT_W_RTZ_RV64)
8452   NODE_NAME_CASE(FCVT_WU_RTZ_RV64)
8453   NODE_NAME_CASE(READ_CYCLE_WIDE)
8454   NODE_NAME_CASE(GREV)
8455   NODE_NAME_CASE(GREVW)
8456   NODE_NAME_CASE(GORC)
8457   NODE_NAME_CASE(GORCW)
8458   NODE_NAME_CASE(SHFL)
8459   NODE_NAME_CASE(SHFLW)
8460   NODE_NAME_CASE(UNSHFL)
8461   NODE_NAME_CASE(UNSHFLW)
8462   NODE_NAME_CASE(BCOMPRESS)
8463   NODE_NAME_CASE(BCOMPRESSW)
8464   NODE_NAME_CASE(BDECOMPRESS)
8465   NODE_NAME_CASE(BDECOMPRESSW)
8466   NODE_NAME_CASE(VMV_V_X_VL)
8467   NODE_NAME_CASE(VFMV_V_F_VL)
8468   NODE_NAME_CASE(VMV_X_S)
8469   NODE_NAME_CASE(VMV_S_X_VL)
8470   NODE_NAME_CASE(VFMV_S_F_VL)
8471   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8472   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8473   NODE_NAME_CASE(READ_VLENB)
8474   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8475   NODE_NAME_CASE(VSLIDEUP_VL)
8476   NODE_NAME_CASE(VSLIDE1UP_VL)
8477   NODE_NAME_CASE(VSLIDEDOWN_VL)
8478   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8479   NODE_NAME_CASE(VID_VL)
8480   NODE_NAME_CASE(VFNCVT_ROD_VL)
8481   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8482   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8483   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8484   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8485   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8486   NODE_NAME_CASE(VECREDUCE_AND_VL)
8487   NODE_NAME_CASE(VECREDUCE_OR_VL)
8488   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8489   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8490   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8491   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8492   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8493   NODE_NAME_CASE(ADD_VL)
8494   NODE_NAME_CASE(AND_VL)
8495   NODE_NAME_CASE(MUL_VL)
8496   NODE_NAME_CASE(OR_VL)
8497   NODE_NAME_CASE(SDIV_VL)
8498   NODE_NAME_CASE(SHL_VL)
8499   NODE_NAME_CASE(SREM_VL)
8500   NODE_NAME_CASE(SRA_VL)
8501   NODE_NAME_CASE(SRL_VL)
8502   NODE_NAME_CASE(SUB_VL)
8503   NODE_NAME_CASE(UDIV_VL)
8504   NODE_NAME_CASE(UREM_VL)
8505   NODE_NAME_CASE(XOR_VL)
8506   NODE_NAME_CASE(SADDSAT_VL)
8507   NODE_NAME_CASE(UADDSAT_VL)
8508   NODE_NAME_CASE(SSUBSAT_VL)
8509   NODE_NAME_CASE(USUBSAT_VL)
8510   NODE_NAME_CASE(FADD_VL)
8511   NODE_NAME_CASE(FSUB_VL)
8512   NODE_NAME_CASE(FMUL_VL)
8513   NODE_NAME_CASE(FDIV_VL)
8514   NODE_NAME_CASE(FNEG_VL)
8515   NODE_NAME_CASE(FABS_VL)
8516   NODE_NAME_CASE(FSQRT_VL)
8517   NODE_NAME_CASE(FMA_VL)
8518   NODE_NAME_CASE(FCOPYSIGN_VL)
8519   NODE_NAME_CASE(SMIN_VL)
8520   NODE_NAME_CASE(SMAX_VL)
8521   NODE_NAME_CASE(UMIN_VL)
8522   NODE_NAME_CASE(UMAX_VL)
8523   NODE_NAME_CASE(FMINNUM_VL)
8524   NODE_NAME_CASE(FMAXNUM_VL)
8525   NODE_NAME_CASE(MULHS_VL)
8526   NODE_NAME_CASE(MULHU_VL)
8527   NODE_NAME_CASE(FP_TO_SINT_VL)
8528   NODE_NAME_CASE(FP_TO_UINT_VL)
8529   NODE_NAME_CASE(SINT_TO_FP_VL)
8530   NODE_NAME_CASE(UINT_TO_FP_VL)
8531   NODE_NAME_CASE(FP_EXTEND_VL)
8532   NODE_NAME_CASE(FP_ROUND_VL)
8533   NODE_NAME_CASE(VWMUL_VL)
8534   NODE_NAME_CASE(VWMULU_VL)
8535   NODE_NAME_CASE(SETCC_VL)
8536   NODE_NAME_CASE(VSELECT_VL)
8537   NODE_NAME_CASE(VMAND_VL)
8538   NODE_NAME_CASE(VMOR_VL)
8539   NODE_NAME_CASE(VMXOR_VL)
8540   NODE_NAME_CASE(VMCLR_VL)
8541   NODE_NAME_CASE(VMSET_VL)
8542   NODE_NAME_CASE(VRGATHER_VX_VL)
8543   NODE_NAME_CASE(VRGATHER_VV_VL)
8544   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8545   NODE_NAME_CASE(VSEXT_VL)
8546   NODE_NAME_CASE(VZEXT_VL)
8547   NODE_NAME_CASE(VPOPC_VL)
8548   NODE_NAME_CASE(VLE_VL)
8549   NODE_NAME_CASE(VSE_VL)
8550   NODE_NAME_CASE(READ_CSR)
8551   NODE_NAME_CASE(WRITE_CSR)
8552   NODE_NAME_CASE(SWAP_CSR)
8553   }
8554   // clang-format on
8555   return nullptr;
8556 #undef NODE_NAME_CASE
8557 }
8558 
8559 /// getConstraintType - Given a constraint letter, return the type of
8560 /// constraint it is for this target.
8561 RISCVTargetLowering::ConstraintType
8562 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8563   if (Constraint.size() == 1) {
8564     switch (Constraint[0]) {
8565     default:
8566       break;
8567     case 'f':
8568       return C_RegisterClass;
8569     case 'I':
8570     case 'J':
8571     case 'K':
8572       return C_Immediate;
8573     case 'A':
8574       return C_Memory;
8575     case 'S': // A symbolic address
8576       return C_Other;
8577     }
8578   } else {
8579     if (Constraint == "vr" || Constraint == "vm")
8580       return C_RegisterClass;
8581   }
8582   return TargetLowering::getConstraintType(Constraint);
8583 }
8584 
8585 std::pair<unsigned, const TargetRegisterClass *>
8586 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8587                                                   StringRef Constraint,
8588                                                   MVT VT) const {
8589   // First, see if this is a constraint that directly corresponds to a
8590   // RISCV register class.
8591   if (Constraint.size() == 1) {
8592     switch (Constraint[0]) {
8593     case 'r':
8594       return std::make_pair(0U, &RISCV::GPRRegClass);
8595     case 'f':
8596       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8597         return std::make_pair(0U, &RISCV::FPR16RegClass);
8598       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8599         return std::make_pair(0U, &RISCV::FPR32RegClass);
8600       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8601         return std::make_pair(0U, &RISCV::FPR64RegClass);
8602       break;
8603     default:
8604       break;
8605     }
8606   } else {
8607     if (Constraint == "vr") {
8608       for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
8609                              &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8610         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8611           return std::make_pair(0U, RC);
8612       }
8613     } else if (Constraint == "vm") {
8614       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8615         return std::make_pair(0U, &RISCV::VMRegClass);
8616     }
8617   }
8618 
8619   // Clang will correctly decode the usage of register name aliases into their
8620   // official names. However, other frontends like `rustc` do not. This allows
8621   // users of these frontends to use the ABI names for registers in LLVM-style
8622   // register constraints.
8623   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8624                                .Case("{zero}", RISCV::X0)
8625                                .Case("{ra}", RISCV::X1)
8626                                .Case("{sp}", RISCV::X2)
8627                                .Case("{gp}", RISCV::X3)
8628                                .Case("{tp}", RISCV::X4)
8629                                .Case("{t0}", RISCV::X5)
8630                                .Case("{t1}", RISCV::X6)
8631                                .Case("{t2}", RISCV::X7)
8632                                .Cases("{s0}", "{fp}", RISCV::X8)
8633                                .Case("{s1}", RISCV::X9)
8634                                .Case("{a0}", RISCV::X10)
8635                                .Case("{a1}", RISCV::X11)
8636                                .Case("{a2}", RISCV::X12)
8637                                .Case("{a3}", RISCV::X13)
8638                                .Case("{a4}", RISCV::X14)
8639                                .Case("{a5}", RISCV::X15)
8640                                .Case("{a6}", RISCV::X16)
8641                                .Case("{a7}", RISCV::X17)
8642                                .Case("{s2}", RISCV::X18)
8643                                .Case("{s3}", RISCV::X19)
8644                                .Case("{s4}", RISCV::X20)
8645                                .Case("{s5}", RISCV::X21)
8646                                .Case("{s6}", RISCV::X22)
8647                                .Case("{s7}", RISCV::X23)
8648                                .Case("{s8}", RISCV::X24)
8649                                .Case("{s9}", RISCV::X25)
8650                                .Case("{s10}", RISCV::X26)
8651                                .Case("{s11}", RISCV::X27)
8652                                .Case("{t3}", RISCV::X28)
8653                                .Case("{t4}", RISCV::X29)
8654                                .Case("{t5}", RISCV::X30)
8655                                .Case("{t6}", RISCV::X31)
8656                                .Default(RISCV::NoRegister);
8657   if (XRegFromAlias != RISCV::NoRegister)
8658     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8659 
8660   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8661   // TableGen record rather than the AsmName to choose registers for InlineAsm
8662   // constraints, plus we want to match those names to the widest floating point
8663   // register type available, manually select floating point registers here.
8664   //
8665   // The second case is the ABI name of the register, so that frontends can also
8666   // use the ABI names in register constraint lists.
8667   if (Subtarget.hasStdExtF()) {
8668     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8669                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8670                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8671                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8672                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8673                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8674                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8675                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8676                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8677                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8678                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8679                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8680                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8681                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8682                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8683                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8684                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8685                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8686                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8687                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8688                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8689                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8690                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8691                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8692                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8693                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8694                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8695                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8696                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8697                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8698                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8699                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8700                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8701                         .Default(RISCV::NoRegister);
8702     if (FReg != RISCV::NoRegister) {
8703       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8704       if (Subtarget.hasStdExtD()) {
8705         unsigned RegNo = FReg - RISCV::F0_F;
8706         unsigned DReg = RISCV::F0_D + RegNo;
8707         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8708       }
8709       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8710     }
8711   }
8712 
8713   if (Subtarget.hasStdExtV()) {
8714     Register VReg = StringSwitch<Register>(Constraint.lower())
8715                         .Case("{v0}", RISCV::V0)
8716                         .Case("{v1}", RISCV::V1)
8717                         .Case("{v2}", RISCV::V2)
8718                         .Case("{v3}", RISCV::V3)
8719                         .Case("{v4}", RISCV::V4)
8720                         .Case("{v5}", RISCV::V5)
8721                         .Case("{v6}", RISCV::V6)
8722                         .Case("{v7}", RISCV::V7)
8723                         .Case("{v8}", RISCV::V8)
8724                         .Case("{v9}", RISCV::V9)
8725                         .Case("{v10}", RISCV::V10)
8726                         .Case("{v11}", RISCV::V11)
8727                         .Case("{v12}", RISCV::V12)
8728                         .Case("{v13}", RISCV::V13)
8729                         .Case("{v14}", RISCV::V14)
8730                         .Case("{v15}", RISCV::V15)
8731                         .Case("{v16}", RISCV::V16)
8732                         .Case("{v17}", RISCV::V17)
8733                         .Case("{v18}", RISCV::V18)
8734                         .Case("{v19}", RISCV::V19)
8735                         .Case("{v20}", RISCV::V20)
8736                         .Case("{v21}", RISCV::V21)
8737                         .Case("{v22}", RISCV::V22)
8738                         .Case("{v23}", RISCV::V23)
8739                         .Case("{v24}", RISCV::V24)
8740                         .Case("{v25}", RISCV::V25)
8741                         .Case("{v26}", RISCV::V26)
8742                         .Case("{v27}", RISCV::V27)
8743                         .Case("{v28}", RISCV::V28)
8744                         .Case("{v29}", RISCV::V29)
8745                         .Case("{v30}", RISCV::V30)
8746                         .Case("{v31}", RISCV::V31)
8747                         .Default(RISCV::NoRegister);
8748     if (VReg != RISCV::NoRegister) {
8749       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8750         return std::make_pair(VReg, &RISCV::VMRegClass);
8751       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8752         return std::make_pair(VReg, &RISCV::VRRegClass);
8753       for (const auto *RC :
8754            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8755         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8756           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8757           return std::make_pair(VReg, RC);
8758         }
8759       }
8760     }
8761   }
8762 
8763   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8764 }
8765 
8766 unsigned
8767 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8768   // Currently only support length 1 constraints.
8769   if (ConstraintCode.size() == 1) {
8770     switch (ConstraintCode[0]) {
8771     case 'A':
8772       return InlineAsm::Constraint_A;
8773     default:
8774       break;
8775     }
8776   }
8777 
8778   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8779 }
8780 
8781 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8782     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8783     SelectionDAG &DAG) const {
8784   // Currently only support length 1 constraints.
8785   if (Constraint.length() == 1) {
8786     switch (Constraint[0]) {
8787     case 'I':
8788       // Validate & create a 12-bit signed immediate operand.
8789       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8790         uint64_t CVal = C->getSExtValue();
8791         if (isInt<12>(CVal))
8792           Ops.push_back(
8793               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8794       }
8795       return;
8796     case 'J':
8797       // Validate & create an integer zero operand.
8798       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8799         if (C->getZExtValue() == 0)
8800           Ops.push_back(
8801               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8802       return;
8803     case 'K':
8804       // Validate & create a 5-bit unsigned immediate operand.
8805       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8806         uint64_t CVal = C->getZExtValue();
8807         if (isUInt<5>(CVal))
8808           Ops.push_back(
8809               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8810       }
8811       return;
8812     case 'S':
8813       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
8814         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
8815                                                  GA->getValueType(0)));
8816       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
8817         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
8818                                                 BA->getValueType(0)));
8819       }
8820       return;
8821     default:
8822       break;
8823     }
8824   }
8825   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8826 }
8827 
8828 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8829                                                    Instruction *Inst,
8830                                                    AtomicOrdering Ord) const {
8831   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8832     return Builder.CreateFence(Ord);
8833   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8834     return Builder.CreateFence(AtomicOrdering::Release);
8835   return nullptr;
8836 }
8837 
8838 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8839                                                     Instruction *Inst,
8840                                                     AtomicOrdering Ord) const {
8841   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8842     return Builder.CreateFence(AtomicOrdering::Acquire);
8843   return nullptr;
8844 }
8845 
8846 TargetLowering::AtomicExpansionKind
8847 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8848   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8849   // point operations can't be used in an lr/sc sequence without breaking the
8850   // forward-progress guarantee.
8851   if (AI->isFloatingPointOperation())
8852     return AtomicExpansionKind::CmpXChg;
8853 
8854   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8855   if (Size == 8 || Size == 16)
8856     return AtomicExpansionKind::MaskedIntrinsic;
8857   return AtomicExpansionKind::None;
8858 }
8859 
8860 static Intrinsic::ID
8861 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8862   if (XLen == 32) {
8863     switch (BinOp) {
8864     default:
8865       llvm_unreachable("Unexpected AtomicRMW BinOp");
8866     case AtomicRMWInst::Xchg:
8867       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8868     case AtomicRMWInst::Add:
8869       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8870     case AtomicRMWInst::Sub:
8871       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8872     case AtomicRMWInst::Nand:
8873       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8874     case AtomicRMWInst::Max:
8875       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8876     case AtomicRMWInst::Min:
8877       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8878     case AtomicRMWInst::UMax:
8879       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8880     case AtomicRMWInst::UMin:
8881       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8882     }
8883   }
8884 
8885   if (XLen == 64) {
8886     switch (BinOp) {
8887     default:
8888       llvm_unreachable("Unexpected AtomicRMW BinOp");
8889     case AtomicRMWInst::Xchg:
8890       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8891     case AtomicRMWInst::Add:
8892       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8893     case AtomicRMWInst::Sub:
8894       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8895     case AtomicRMWInst::Nand:
8896       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8897     case AtomicRMWInst::Max:
8898       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8899     case AtomicRMWInst::Min:
8900       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8901     case AtomicRMWInst::UMax:
8902       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8903     case AtomicRMWInst::UMin:
8904       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8905     }
8906   }
8907 
8908   llvm_unreachable("Unexpected XLen\n");
8909 }
8910 
8911 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8912     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8913     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8914   unsigned XLen = Subtarget.getXLen();
8915   Value *Ordering =
8916       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8917   Type *Tys[] = {AlignedAddr->getType()};
8918   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8919       AI->getModule(),
8920       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8921 
8922   if (XLen == 64) {
8923     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8924     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8925     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8926   }
8927 
8928   Value *Result;
8929 
8930   // Must pass the shift amount needed to sign extend the loaded value prior
8931   // to performing a signed comparison for min/max. ShiftAmt is the number of
8932   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8933   // is the number of bits to left+right shift the value in order to
8934   // sign-extend.
8935   if (AI->getOperation() == AtomicRMWInst::Min ||
8936       AI->getOperation() == AtomicRMWInst::Max) {
8937     const DataLayout &DL = AI->getModule()->getDataLayout();
8938     unsigned ValWidth =
8939         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8940     Value *SextShamt =
8941         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8942     Result = Builder.CreateCall(LrwOpScwLoop,
8943                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8944   } else {
8945     Result =
8946         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8947   }
8948 
8949   if (XLen == 64)
8950     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8951   return Result;
8952 }
8953 
8954 TargetLowering::AtomicExpansionKind
8955 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8956     AtomicCmpXchgInst *CI) const {
8957   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8958   if (Size == 8 || Size == 16)
8959     return AtomicExpansionKind::MaskedIntrinsic;
8960   return AtomicExpansionKind::None;
8961 }
8962 
8963 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8964     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8965     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8966   unsigned XLen = Subtarget.getXLen();
8967   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8968   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8969   if (XLen == 64) {
8970     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8971     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8972     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8973     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8974   }
8975   Type *Tys[] = {AlignedAddr->getType()};
8976   Function *MaskedCmpXchg =
8977       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8978   Value *Result = Builder.CreateCall(
8979       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8980   if (XLen == 64)
8981     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8982   return Result;
8983 }
8984 
8985 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8986   return false;
8987 }
8988 
8989 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8990                                                      EVT VT) const {
8991   VT = VT.getScalarType();
8992 
8993   if (!VT.isSimple())
8994     return false;
8995 
8996   switch (VT.getSimpleVT().SimpleTy) {
8997   case MVT::f16:
8998     return Subtarget.hasStdExtZfh();
8999   case MVT::f32:
9000     return Subtarget.hasStdExtF();
9001   case MVT::f64:
9002     return Subtarget.hasStdExtD();
9003   default:
9004     break;
9005   }
9006 
9007   return false;
9008 }
9009 
9010 Register RISCVTargetLowering::getExceptionPointerRegister(
9011     const Constant *PersonalityFn) const {
9012   return RISCV::X10;
9013 }
9014 
9015 Register RISCVTargetLowering::getExceptionSelectorRegister(
9016     const Constant *PersonalityFn) const {
9017   return RISCV::X11;
9018 }
9019 
9020 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
9021   // Return false to suppress the unnecessary extensions if the LibCall
9022   // arguments or return value is f32 type for LP64 ABI.
9023   RISCVABI::ABI ABI = Subtarget.getTargetABI();
9024   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
9025     return false;
9026 
9027   return true;
9028 }
9029 
9030 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
9031   if (Subtarget.is64Bit() && Type == MVT::i32)
9032     return true;
9033 
9034   return IsSigned;
9035 }
9036 
9037 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
9038                                                  SDValue C) const {
9039   // Check integral scalar types.
9040   if (VT.isScalarInteger()) {
9041     // Omit the optimization if the sub target has the M extension and the data
9042     // size exceeds XLen.
9043     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
9044       return false;
9045     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
9046       // Break the MUL to a SLLI and an ADD/SUB.
9047       const APInt &Imm = ConstNode->getAPIntValue();
9048       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9049           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9050         return true;
9051       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
9052       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
9053           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9054            (Imm - 8).isPowerOf2()))
9055         return true;
9056       // Omit the following optimization if the sub target has the M extension
9057       // and the data size >= XLen.
9058       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
9059         return false;
9060       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
9061       // a pair of LUI/ADDI.
9062       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
9063         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
9064         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
9065             (1 - ImmS).isPowerOf2())
9066         return true;
9067       }
9068     }
9069   }
9070 
9071   return false;
9072 }
9073 
9074 bool RISCVTargetLowering::isMulAddWithConstProfitable(
9075     const SDValue &AddNode, const SDValue &ConstNode) const {
9076   // Let the DAGCombiner decide for vectors.
9077   EVT VT = AddNode.getValueType();
9078   if (VT.isVector())
9079     return true;
9080 
9081   // Let the DAGCombiner decide for larger types.
9082   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
9083     return true;
9084 
9085   // It is worse if c1 is simm12 while c1*c2 is not.
9086   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
9087   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
9088   const APInt &C1 = C1Node->getAPIntValue();
9089   const APInt &C2 = C2Node->getAPIntValue();
9090   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
9091     return false;
9092 
9093   // Default to true and let the DAGCombiner decide.
9094   return true;
9095 }
9096 
9097 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
9098     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
9099     bool *Fast) const {
9100   if (!VT.isVector())
9101     return false;
9102 
9103   EVT ElemVT = VT.getVectorElementType();
9104   if (Alignment >= ElemVT.getStoreSize()) {
9105     if (Fast)
9106       *Fast = true;
9107     return true;
9108   }
9109 
9110   return false;
9111 }
9112 
9113 bool RISCVTargetLowering::splitValueIntoRegisterParts(
9114     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
9115     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
9116   bool IsABIRegCopy = CC.hasValue();
9117   EVT ValueVT = Val.getValueType();
9118   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9119     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
9120     // and cast to f32.
9121     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
9122     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
9123     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
9124                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
9125     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
9126     Parts[0] = Val;
9127     return true;
9128   }
9129 
9130   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9131     LLVMContext &Context = *DAG.getContext();
9132     EVT ValueEltVT = ValueVT.getVectorElementType();
9133     EVT PartEltVT = PartVT.getVectorElementType();
9134     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9135     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9136     if (PartVTBitSize % ValueVTBitSize == 0) {
9137       // If the element types are different, bitcast to the same element type of
9138       // PartVT first.
9139       if (ValueEltVT != PartEltVT) {
9140         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9141         assert(Count != 0 && "The number of element should not be zero.");
9142         EVT SameEltTypeVT =
9143             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9144         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
9145       }
9146       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
9147                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9148       Parts[0] = Val;
9149       return true;
9150     }
9151   }
9152   return false;
9153 }
9154 
9155 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
9156     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
9157     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
9158   bool IsABIRegCopy = CC.hasValue();
9159   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9160     SDValue Val = Parts[0];
9161 
9162     // Cast the f32 to i32, truncate to i16, and cast back to f16.
9163     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
9164     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
9165     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
9166     return Val;
9167   }
9168 
9169   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9170     LLVMContext &Context = *DAG.getContext();
9171     SDValue Val = Parts[0];
9172     EVT ValueEltVT = ValueVT.getVectorElementType();
9173     EVT PartEltVT = PartVT.getVectorElementType();
9174     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9175     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9176     if (PartVTBitSize % ValueVTBitSize == 0) {
9177       EVT SameEltTypeVT = ValueVT;
9178       // If the element types are different, convert it to the same element type
9179       // of PartVT.
9180       if (ValueEltVT != PartEltVT) {
9181         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9182         assert(Count != 0 && "The number of element should not be zero.");
9183         SameEltTypeVT =
9184             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9185       }
9186       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
9187                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9188       if (ValueEltVT != PartEltVT)
9189         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
9190       return Val;
9191     }
9192   }
9193   return SDValue();
9194 }
9195 
9196 #define GET_REGISTER_MATCHER
9197 #include "RISCVGenAsmMatcher.inc"
9198 
9199 Register
9200 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
9201                                        const MachineFunction &MF) const {
9202   Register Reg = MatchRegisterAltName(RegName);
9203   if (Reg == RISCV::NoRegister)
9204     Reg = MatchRegisterName(RegName);
9205   if (Reg == RISCV::NoRegister)
9206     report_fatal_error(
9207         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
9208   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9209   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
9210     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
9211                              StringRef(RegName) + "\"."));
9212   return Reg;
9213 }
9214 
9215 namespace llvm {
9216 namespace RISCVVIntrinsicsTable {
9217 
9218 #define GET_RISCVVIntrinsicsTable_IMPL
9219 #include "RISCVGenSearchableTables.inc"
9220 
9221 } // namespace RISCVVIntrinsicsTable
9222 
9223 } // namespace llvm
9224