1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
254     if (Subtarget.is64Bit()) {
255       setOperationAction(ISD::ROTL, MVT::i32, Custom);
256       setOperationAction(ISD::ROTR, MVT::i32, Custom);
257     }
258   } else {
259     setOperationAction(ISD::ROTL, XLenVT, Expand);
260     setOperationAction(ISD::ROTR, XLenVT, Expand);
261   }
262 
263   if (Subtarget.hasStdExtZbp()) {
264     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
265     // more combining.
266     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
267     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
268     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
269     // BSWAP i8 doesn't exist.
270     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
271     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
272 
273     if (Subtarget.is64Bit()) {
274       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
275       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
276     }
277   } else {
278     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
279     // pattern match it directly in isel.
280     setOperationAction(ISD::BSWAP, XLenVT,
281                        Subtarget.hasStdExtZbb() ? Legal : Expand);
282   }
283 
284   if (Subtarget.hasStdExtZbb()) {
285     setOperationAction(ISD::SMIN, XLenVT, Legal);
286     setOperationAction(ISD::SMAX, XLenVT, Legal);
287     setOperationAction(ISD::UMIN, XLenVT, Legal);
288     setOperationAction(ISD::UMAX, XLenVT, Legal);
289 
290     if (Subtarget.is64Bit()) {
291       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
292       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
293       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
294       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
295     }
296   } else {
297     setOperationAction(ISD::CTTZ, XLenVT, Expand);
298     setOperationAction(ISD::CTLZ, XLenVT, Expand);
299     setOperationAction(ISD::CTPOP, XLenVT, Expand);
300   }
301 
302   if (Subtarget.hasStdExtZbt()) {
303     setOperationAction(ISD::FSHL, XLenVT, Custom);
304     setOperationAction(ISD::FSHR, XLenVT, Custom);
305     setOperationAction(ISD::SELECT, XLenVT, Legal);
306 
307     if (Subtarget.is64Bit()) {
308       setOperationAction(ISD::FSHL, MVT::i32, Custom);
309       setOperationAction(ISD::FSHR, MVT::i32, Custom);
310     }
311   } else {
312     setOperationAction(ISD::SELECT, XLenVT, Custom);
313   }
314 
315   static const ISD::CondCode FPCCToExpand[] = {
316       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
317       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
318       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
319 
320   static const ISD::NodeType FPOpToExpand[] = {
321       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
322       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
323 
324   if (Subtarget.hasStdExtZfh())
325     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
326 
327   if (Subtarget.hasStdExtZfh()) {
328     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
329     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
330     setOperationAction(ISD::LRINT, MVT::f16, Legal);
331     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
332     setOperationAction(ISD::LROUND, MVT::f16, Legal);
333     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
334     setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
335     setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
336     setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
337     setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
338     setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
339     setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
340     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
341     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal);
342     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal);
343     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
344     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
345     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
346     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
348     for (auto CC : FPCCToExpand)
349       setCondCodeAction(CC, MVT::f16, Expand);
350     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
351     setOperationAction(ISD::SELECT, MVT::f16, Custom);
352     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
353 
354     setOperationAction(ISD::FREM,       MVT::f16, Promote);
355     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
356     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
357     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
358     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
359     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
360     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
361     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
362     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
363     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
364     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
365     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
366     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
367     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
368     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
369     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
370     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
371     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
372 
373     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
374     // complete support for all operations in LegalizeDAG.
375 
376     // We need to custom promote this.
377     if (Subtarget.is64Bit())
378       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
379   }
380 
381   if (Subtarget.hasStdExtF()) {
382     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
383     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
384     setOperationAction(ISD::LRINT, MVT::f32, Legal);
385     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
386     setOperationAction(ISD::LROUND, MVT::f32, Legal);
387     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
388     setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
389     setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
390     setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
391     setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
392     setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
393     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
394     setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
395     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
396     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
397     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
398     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
399     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
400     for (auto CC : FPCCToExpand)
401       setCondCodeAction(CC, MVT::f32, Expand);
402     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
403     setOperationAction(ISD::SELECT, MVT::f32, Custom);
404     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
405     for (auto Op : FPOpToExpand)
406       setOperationAction(Op, MVT::f32, Expand);
407     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
408     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
409   }
410 
411   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
412     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
413 
414   if (Subtarget.hasStdExtD()) {
415     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
416     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
417     setOperationAction(ISD::LRINT, MVT::f64, Legal);
418     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
419     setOperationAction(ISD::LROUND, MVT::f64, Legal);
420     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
421     setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
422     setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
423     setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
424     setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
425     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
426     setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
427     setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
428     setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
429     setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
430     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
431     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
432     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
433     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
434     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
435     for (auto CC : FPCCToExpand)
436       setCondCodeAction(CC, MVT::f64, Expand);
437     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
438     setOperationAction(ISD::SELECT, MVT::f64, Custom);
439     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
440     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
441     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
442     for (auto Op : FPOpToExpand)
443       setOperationAction(Op, MVT::f64, Expand);
444     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
445     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
446   }
447 
448   if (Subtarget.is64Bit()) {
449     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
450     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
451     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
452     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
453   }
454 
455   if (Subtarget.hasStdExtF()) {
456     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
457     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
458 
459     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
460     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
461     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
462     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
463 
464     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
465     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
466   }
467 
468   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
469   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
470   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
471   setOperationAction(ISD::JumpTable, XLenVT, Custom);
472 
473   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
474 
475   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
476   // Unfortunately this can't be determined just from the ISA naming string.
477   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
478                      Subtarget.is64Bit() ? Legal : Custom);
479 
480   setOperationAction(ISD::TRAP, MVT::Other, Legal);
481   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
482   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
483   if (Subtarget.is64Bit())
484     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
485 
486   if (Subtarget.hasStdExtA()) {
487     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
488     setMinCmpXchgSizeInBits(32);
489   } else {
490     setMaxAtomicSizeInBitsSupported(0);
491   }
492 
493   setBooleanContents(ZeroOrOneBooleanContent);
494 
495   if (Subtarget.hasVInstructions()) {
496     setBooleanVectorContents(ZeroOrOneBooleanContent);
497 
498     setOperationAction(ISD::VSCALE, XLenVT, Custom);
499 
500     // RVV intrinsics may have illegal operands.
501     // We also need to custom legalize vmv.x.s.
502     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
503     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
504     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
505     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
506     if (Subtarget.is64Bit()) {
507       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
508     } else {
509       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
510       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
511     }
512 
513     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
514     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
515 
516     static const unsigned IntegerVPOps[] = {
517         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
518         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
519         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
520         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
521         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
522         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
523         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
524         ISD::VP_SELECT};
525 
526     static const unsigned FloatingPointVPOps[] = {
527         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
528         ISD::VP_FDIV,        ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
529         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_SELECT};
530 
531     if (!Subtarget.is64Bit()) {
532       // We must custom-lower certain vXi64 operations on RV32 due to the vector
533       // element type being illegal.
534       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
535       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
536 
537       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
538       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
539       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
540       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
541       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
542       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
543       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
544       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
545 
546       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
547       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
548       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
549       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
550       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
551       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
552       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
553       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
554     }
555 
556     for (MVT VT : BoolVecVTs) {
557       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
558 
559       // Mask VTs are custom-expanded into a series of standard nodes
560       setOperationAction(ISD::TRUNCATE, VT, Custom);
561       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
562       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
563       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
564 
565       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
566       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
567 
568       setOperationAction(ISD::SELECT, VT, Custom);
569       setOperationAction(ISD::SELECT_CC, VT, Expand);
570       setOperationAction(ISD::VSELECT, VT, Expand);
571       setOperationAction(ISD::VP_SELECT, VT, Expand);
572 
573       setOperationAction(ISD::VP_AND, VT, Custom);
574       setOperationAction(ISD::VP_OR, VT, Custom);
575       setOperationAction(ISD::VP_XOR, VT, Custom);
576 
577       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
578       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
579       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
580 
581       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
582       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
583       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
584 
585       // RVV has native int->float & float->int conversions where the
586       // element type sizes are within one power-of-two of each other. Any
587       // wider distances between type sizes have to be lowered as sequences
588       // which progressively narrow the gap in stages.
589       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
590       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
591       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
592       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
593 
594       // Expand all extending loads to types larger than this, and truncating
595       // stores from types larger than this.
596       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
597         setTruncStoreAction(OtherVT, VT, Expand);
598         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
599         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
600         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
601       }
602     }
603 
604     for (MVT VT : IntVecVTs) {
605       if (VT.getVectorElementType() == MVT::i64 &&
606           !Subtarget.hasVInstructionsI64())
607         continue;
608 
609       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
610       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
611 
612       // Vectors implement MULHS/MULHU.
613       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
614       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
615 
616       setOperationAction(ISD::SMIN, VT, Legal);
617       setOperationAction(ISD::SMAX, VT, Legal);
618       setOperationAction(ISD::UMIN, VT, Legal);
619       setOperationAction(ISD::UMAX, VT, Legal);
620 
621       setOperationAction(ISD::ROTL, VT, Expand);
622       setOperationAction(ISD::ROTR, VT, Expand);
623 
624       setOperationAction(ISD::CTTZ, VT, Expand);
625       setOperationAction(ISD::CTLZ, VT, Expand);
626       setOperationAction(ISD::CTPOP, VT, Expand);
627 
628       setOperationAction(ISD::BSWAP, VT, Expand);
629 
630       // Custom-lower extensions and truncations from/to mask types.
631       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
632       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
633       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
634 
635       // RVV has native int->float & float->int conversions where the
636       // element type sizes are within one power-of-two of each other. Any
637       // wider distances between type sizes have to be lowered as sequences
638       // which progressively narrow the gap in stages.
639       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
640       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
641       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
642       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
643 
644       setOperationAction(ISD::SADDSAT, VT, Legal);
645       setOperationAction(ISD::UADDSAT, VT, Legal);
646       setOperationAction(ISD::SSUBSAT, VT, Legal);
647       setOperationAction(ISD::USUBSAT, VT, Legal);
648 
649       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
650       // nodes which truncate by one power of two at a time.
651       setOperationAction(ISD::TRUNCATE, VT, Custom);
652 
653       // Custom-lower insert/extract operations to simplify patterns.
654       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
655       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
656 
657       // Custom-lower reduction operations to set up the corresponding custom
658       // nodes' operands.
659       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
660       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
661       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
662       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
663       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
664       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
665       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
666       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
667 
668       for (unsigned VPOpc : IntegerVPOps)
669         setOperationAction(VPOpc, VT, Custom);
670 
671       setOperationAction(ISD::LOAD, VT, Custom);
672       setOperationAction(ISD::STORE, VT, Custom);
673 
674       setOperationAction(ISD::MLOAD, VT, Custom);
675       setOperationAction(ISD::MSTORE, VT, Custom);
676       setOperationAction(ISD::MGATHER, VT, Custom);
677       setOperationAction(ISD::MSCATTER, VT, Custom);
678 
679       setOperationAction(ISD::VP_LOAD, VT, Custom);
680       setOperationAction(ISD::VP_STORE, VT, Custom);
681       setOperationAction(ISD::VP_GATHER, VT, Custom);
682       setOperationAction(ISD::VP_SCATTER, VT, Custom);
683 
684       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
685       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
686       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
687 
688       setOperationAction(ISD::SELECT, VT, Custom);
689       setOperationAction(ISD::SELECT_CC, VT, Expand);
690 
691       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
692       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
693 
694       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
695         setTruncStoreAction(VT, OtherVT, Expand);
696         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
697         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
698         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
699       }
700 
701       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
702       // type that can represent the value exactly.
703       if (VT.getVectorElementType() != MVT::i64) {
704         MVT FloatEltVT =
705             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
706         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
707         if (isTypeLegal(FloatVT)) {
708           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
709           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
710         }
711       }
712     }
713 
714     // Expand various CCs to best match the RVV ISA, which natively supports UNE
715     // but no other unordered comparisons, and supports all ordered comparisons
716     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
717     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
718     // and we pattern-match those back to the "original", swapping operands once
719     // more. This way we catch both operations and both "vf" and "fv" forms with
720     // fewer patterns.
721     static const ISD::CondCode VFPCCToExpand[] = {
722         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
723         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
724         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
725     };
726 
727     // Sets common operation actions on RVV floating-point vector types.
728     const auto SetCommonVFPActions = [&](MVT VT) {
729       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
730       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
731       // sizes are within one power-of-two of each other. Therefore conversions
732       // between vXf16 and vXf64 must be lowered as sequences which convert via
733       // vXf32.
734       setOperationAction(ISD::FP_ROUND, VT, Custom);
735       setOperationAction(ISD::FP_EXTEND, VT, Custom);
736       // Custom-lower insert/extract operations to simplify patterns.
737       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
738       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
739       // Expand various condition codes (explained above).
740       for (auto CC : VFPCCToExpand)
741         setCondCodeAction(CC, VT, Expand);
742 
743       setOperationAction(ISD::FMINNUM, VT, Legal);
744       setOperationAction(ISD::FMAXNUM, VT, Legal);
745 
746       setOperationAction(ISD::FTRUNC, VT, Custom);
747       setOperationAction(ISD::FCEIL, VT, Custom);
748       setOperationAction(ISD::FFLOOR, VT, Custom);
749 
750       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
751       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
752       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
753       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
754 
755       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
756 
757       setOperationAction(ISD::LOAD, VT, Custom);
758       setOperationAction(ISD::STORE, VT, Custom);
759 
760       setOperationAction(ISD::MLOAD, VT, Custom);
761       setOperationAction(ISD::MSTORE, VT, Custom);
762       setOperationAction(ISD::MGATHER, VT, Custom);
763       setOperationAction(ISD::MSCATTER, VT, Custom);
764 
765       setOperationAction(ISD::VP_LOAD, VT, Custom);
766       setOperationAction(ISD::VP_STORE, VT, Custom);
767       setOperationAction(ISD::VP_GATHER, VT, Custom);
768       setOperationAction(ISD::VP_SCATTER, VT, Custom);
769 
770       setOperationAction(ISD::SELECT, VT, Custom);
771       setOperationAction(ISD::SELECT_CC, VT, Expand);
772 
773       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
774       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
775       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
776 
777       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
778 
779       for (unsigned VPOpc : FloatingPointVPOps)
780         setOperationAction(VPOpc, VT, Custom);
781     };
782 
783     // Sets common extload/truncstore actions on RVV floating-point vector
784     // types.
785     const auto SetCommonVFPExtLoadTruncStoreActions =
786         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
787           for (auto SmallVT : SmallerVTs) {
788             setTruncStoreAction(VT, SmallVT, Expand);
789             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
790           }
791         };
792 
793     if (Subtarget.hasVInstructionsF16())
794       for (MVT VT : F16VecVTs)
795         SetCommonVFPActions(VT);
796 
797     for (MVT VT : F32VecVTs) {
798       if (Subtarget.hasVInstructionsF32())
799         SetCommonVFPActions(VT);
800       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
801     }
802 
803     for (MVT VT : F64VecVTs) {
804       if (Subtarget.hasVInstructionsF64())
805         SetCommonVFPActions(VT);
806       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
807       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
808     }
809 
810     if (Subtarget.useRVVForFixedLengthVectors()) {
811       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
812         if (!useRVVForFixedLengthVectorVT(VT))
813           continue;
814 
815         // By default everything must be expanded.
816         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
817           setOperationAction(Op, VT, Expand);
818         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
819           setTruncStoreAction(VT, OtherVT, Expand);
820           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
821           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
822           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
823         }
824 
825         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
826         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
827         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
828 
829         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
830         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
831 
832         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
833         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
834 
835         setOperationAction(ISD::LOAD, VT, Custom);
836         setOperationAction(ISD::STORE, VT, Custom);
837 
838         setOperationAction(ISD::SETCC, VT, Custom);
839 
840         setOperationAction(ISD::SELECT, VT, Custom);
841 
842         setOperationAction(ISD::TRUNCATE, VT, Custom);
843 
844         setOperationAction(ISD::BITCAST, VT, Custom);
845 
846         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
847         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
848         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
849 
850         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
851         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
852         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
853 
854         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
855         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
856         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
857         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
858 
859         // Operations below are different for between masks and other vectors.
860         if (VT.getVectorElementType() == MVT::i1) {
861           setOperationAction(ISD::VP_AND, VT, Custom);
862           setOperationAction(ISD::VP_OR, VT, Custom);
863           setOperationAction(ISD::VP_XOR, VT, Custom);
864           setOperationAction(ISD::AND, VT, Custom);
865           setOperationAction(ISD::OR, VT, Custom);
866           setOperationAction(ISD::XOR, VT, Custom);
867           continue;
868         }
869 
870         // Use SPLAT_VECTOR to prevent type legalization from destroying the
871         // splats when type legalizing i64 scalar on RV32.
872         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
873         // improvements first.
874         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
875           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
876           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
877         }
878 
879         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
880         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
881 
882         setOperationAction(ISD::MLOAD, VT, Custom);
883         setOperationAction(ISD::MSTORE, VT, Custom);
884         setOperationAction(ISD::MGATHER, VT, Custom);
885         setOperationAction(ISD::MSCATTER, VT, Custom);
886 
887         setOperationAction(ISD::VP_LOAD, VT, Custom);
888         setOperationAction(ISD::VP_STORE, VT, Custom);
889         setOperationAction(ISD::VP_GATHER, VT, Custom);
890         setOperationAction(ISD::VP_SCATTER, VT, Custom);
891 
892         setOperationAction(ISD::ADD, VT, Custom);
893         setOperationAction(ISD::MUL, VT, Custom);
894         setOperationAction(ISD::SUB, VT, Custom);
895         setOperationAction(ISD::AND, VT, Custom);
896         setOperationAction(ISD::OR, VT, Custom);
897         setOperationAction(ISD::XOR, VT, Custom);
898         setOperationAction(ISD::SDIV, VT, Custom);
899         setOperationAction(ISD::SREM, VT, Custom);
900         setOperationAction(ISD::UDIV, VT, Custom);
901         setOperationAction(ISD::UREM, VT, Custom);
902         setOperationAction(ISD::SHL, VT, Custom);
903         setOperationAction(ISD::SRA, VT, Custom);
904         setOperationAction(ISD::SRL, VT, Custom);
905 
906         setOperationAction(ISD::SMIN, VT, Custom);
907         setOperationAction(ISD::SMAX, VT, Custom);
908         setOperationAction(ISD::UMIN, VT, Custom);
909         setOperationAction(ISD::UMAX, VT, Custom);
910         setOperationAction(ISD::ABS,  VT, Custom);
911 
912         setOperationAction(ISD::MULHS, VT, Custom);
913         setOperationAction(ISD::MULHU, VT, Custom);
914 
915         setOperationAction(ISD::SADDSAT, VT, Custom);
916         setOperationAction(ISD::UADDSAT, VT, Custom);
917         setOperationAction(ISD::SSUBSAT, VT, Custom);
918         setOperationAction(ISD::USUBSAT, VT, Custom);
919 
920         setOperationAction(ISD::VSELECT, VT, Custom);
921         setOperationAction(ISD::SELECT_CC, VT, Expand);
922 
923         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
924         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
925         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
926 
927         // Custom-lower reduction operations to set up the corresponding custom
928         // nodes' operands.
929         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
930         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
931         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
932         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
933         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
934 
935         for (unsigned VPOpc : IntegerVPOps)
936           setOperationAction(VPOpc, VT, Custom);
937 
938         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
939         // type that can represent the value exactly.
940         if (VT.getVectorElementType() != MVT::i64) {
941           MVT FloatEltVT =
942               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
943           EVT FloatVT =
944               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
945           if (isTypeLegal(FloatVT)) {
946             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
947             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
948           }
949         }
950       }
951 
952       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
953         if (!useRVVForFixedLengthVectorVT(VT))
954           continue;
955 
956         // By default everything must be expanded.
957         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
958           setOperationAction(Op, VT, Expand);
959         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
960           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
961           setTruncStoreAction(VT, OtherVT, Expand);
962         }
963 
964         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
965         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
966         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
967 
968         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
969         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
970         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
971         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
972         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
973 
974         setOperationAction(ISD::LOAD, VT, Custom);
975         setOperationAction(ISD::STORE, VT, Custom);
976         setOperationAction(ISD::MLOAD, VT, Custom);
977         setOperationAction(ISD::MSTORE, VT, Custom);
978         setOperationAction(ISD::MGATHER, VT, Custom);
979         setOperationAction(ISD::MSCATTER, VT, Custom);
980 
981         setOperationAction(ISD::VP_LOAD, VT, Custom);
982         setOperationAction(ISD::VP_STORE, VT, Custom);
983         setOperationAction(ISD::VP_GATHER, VT, Custom);
984         setOperationAction(ISD::VP_SCATTER, VT, Custom);
985 
986         setOperationAction(ISD::FADD, VT, Custom);
987         setOperationAction(ISD::FSUB, VT, Custom);
988         setOperationAction(ISD::FMUL, VT, Custom);
989         setOperationAction(ISD::FDIV, VT, Custom);
990         setOperationAction(ISD::FNEG, VT, Custom);
991         setOperationAction(ISD::FABS, VT, Custom);
992         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
993         setOperationAction(ISD::FSQRT, VT, Custom);
994         setOperationAction(ISD::FMA, VT, Custom);
995         setOperationAction(ISD::FMINNUM, VT, Custom);
996         setOperationAction(ISD::FMAXNUM, VT, Custom);
997 
998         setOperationAction(ISD::FP_ROUND, VT, Custom);
999         setOperationAction(ISD::FP_EXTEND, VT, Custom);
1000 
1001         setOperationAction(ISD::FTRUNC, VT, Custom);
1002         setOperationAction(ISD::FCEIL, VT, Custom);
1003         setOperationAction(ISD::FFLOOR, VT, Custom);
1004 
1005         for (auto CC : VFPCCToExpand)
1006           setCondCodeAction(CC, VT, Expand);
1007 
1008         setOperationAction(ISD::VSELECT, VT, Custom);
1009         setOperationAction(ISD::SELECT, VT, Custom);
1010         setOperationAction(ISD::SELECT_CC, VT, Expand);
1011 
1012         setOperationAction(ISD::BITCAST, VT, Custom);
1013 
1014         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1015         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1016         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1017         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1018 
1019         for (unsigned VPOpc : FloatingPointVPOps)
1020           setOperationAction(VPOpc, VT, Custom);
1021       }
1022 
1023       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1024       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1025       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1026       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1027       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1028       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1029       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1030       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1031     }
1032   }
1033 
1034   // Function alignments.
1035   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1036   setMinFunctionAlignment(FunctionAlignment);
1037   setPrefFunctionAlignment(FunctionAlignment);
1038 
1039   setMinimumJumpTableEntries(5);
1040 
1041   // Jumps are expensive, compared to logic
1042   setJumpIsExpensive();
1043 
1044   setTargetDAGCombine(ISD::ADD);
1045   setTargetDAGCombine(ISD::SUB);
1046   setTargetDAGCombine(ISD::AND);
1047   setTargetDAGCombine(ISD::OR);
1048   setTargetDAGCombine(ISD::XOR);
1049   setTargetDAGCombine(ISD::ANY_EXTEND);
1050   if (Subtarget.hasStdExtF()) {
1051     setTargetDAGCombine(ISD::ZERO_EXTEND);
1052     setTargetDAGCombine(ISD::FP_TO_SINT);
1053     setTargetDAGCombine(ISD::FP_TO_UINT);
1054     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1055     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1056   }
1057   if (Subtarget.hasVInstructions()) {
1058     setTargetDAGCombine(ISD::FCOPYSIGN);
1059     setTargetDAGCombine(ISD::MGATHER);
1060     setTargetDAGCombine(ISD::MSCATTER);
1061     setTargetDAGCombine(ISD::VP_GATHER);
1062     setTargetDAGCombine(ISD::VP_SCATTER);
1063     setTargetDAGCombine(ISD::SRA);
1064     setTargetDAGCombine(ISD::SRL);
1065     setTargetDAGCombine(ISD::SHL);
1066     setTargetDAGCombine(ISD::STORE);
1067   }
1068 }
1069 
1070 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1071                                             LLVMContext &Context,
1072                                             EVT VT) const {
1073   if (!VT.isVector())
1074     return getPointerTy(DL);
1075   if (Subtarget.hasVInstructions() &&
1076       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1077     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1078   return VT.changeVectorElementTypeToInteger();
1079 }
1080 
1081 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1082   return Subtarget.getXLenVT();
1083 }
1084 
1085 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1086                                              const CallInst &I,
1087                                              MachineFunction &MF,
1088                                              unsigned Intrinsic) const {
1089   auto &DL = I.getModule()->getDataLayout();
1090   switch (Intrinsic) {
1091   default:
1092     return false;
1093   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1094   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1095   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1096   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1097   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1098   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1099   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1100   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1101   case Intrinsic::riscv_masked_cmpxchg_i32: {
1102     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
1103     Info.opc = ISD::INTRINSIC_W_CHAIN;
1104     Info.memVT = MVT::getVT(PtrTy->getElementType());
1105     Info.ptrVal = I.getArgOperand(0);
1106     Info.offset = 0;
1107     Info.align = Align(4);
1108     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1109                  MachineMemOperand::MOVolatile;
1110     return true;
1111   }
1112   case Intrinsic::riscv_masked_strided_load:
1113     Info.opc = ISD::INTRINSIC_W_CHAIN;
1114     Info.ptrVal = I.getArgOperand(1);
1115     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1116     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1117     Info.size = MemoryLocation::UnknownSize;
1118     Info.flags |= MachineMemOperand::MOLoad;
1119     return true;
1120   case Intrinsic::riscv_masked_strided_store:
1121     Info.opc = ISD::INTRINSIC_VOID;
1122     Info.ptrVal = I.getArgOperand(1);
1123     Info.memVT =
1124         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1125     Info.align = Align(
1126         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1127         8);
1128     Info.size = MemoryLocation::UnknownSize;
1129     Info.flags |= MachineMemOperand::MOStore;
1130     return true;
1131   }
1132 }
1133 
1134 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1135                                                 const AddrMode &AM, Type *Ty,
1136                                                 unsigned AS,
1137                                                 Instruction *I) const {
1138   // No global is ever allowed as a base.
1139   if (AM.BaseGV)
1140     return false;
1141 
1142   // Require a 12-bit signed offset.
1143   if (!isInt<12>(AM.BaseOffs))
1144     return false;
1145 
1146   switch (AM.Scale) {
1147   case 0: // "r+i" or just "i", depending on HasBaseReg.
1148     break;
1149   case 1:
1150     if (!AM.HasBaseReg) // allow "r+i".
1151       break;
1152     return false; // disallow "r+r" or "r+r+i".
1153   default:
1154     return false;
1155   }
1156 
1157   return true;
1158 }
1159 
1160 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1161   return isInt<12>(Imm);
1162 }
1163 
1164 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1165   return isInt<12>(Imm);
1166 }
1167 
1168 // On RV32, 64-bit integers are split into their high and low parts and held
1169 // in two different registers, so the trunc is free since the low register can
1170 // just be used.
1171 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1172   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1173     return false;
1174   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1175   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1176   return (SrcBits == 64 && DestBits == 32);
1177 }
1178 
1179 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1180   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1181       !SrcVT.isInteger() || !DstVT.isInteger())
1182     return false;
1183   unsigned SrcBits = SrcVT.getSizeInBits();
1184   unsigned DestBits = DstVT.getSizeInBits();
1185   return (SrcBits == 64 && DestBits == 32);
1186 }
1187 
1188 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1189   // Zexts are free if they can be combined with a load.
1190   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1191   // poorly with type legalization of compares preferring sext.
1192   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1193     EVT MemVT = LD->getMemoryVT();
1194     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1195         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1196          LD->getExtensionType() == ISD::ZEXTLOAD))
1197       return true;
1198   }
1199 
1200   return TargetLowering::isZExtFree(Val, VT2);
1201 }
1202 
1203 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1204   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1205 }
1206 
1207 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1208   return Subtarget.hasStdExtZbb();
1209 }
1210 
1211 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1212   return Subtarget.hasStdExtZbb();
1213 }
1214 
1215 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1216   EVT VT = Y.getValueType();
1217 
1218   // FIXME: Support vectors once we have tests.
1219   if (VT.isVector())
1220     return false;
1221 
1222   return Subtarget.hasStdExtZbb() && !isa<ConstantSDNode>(Y);
1223 }
1224 
1225 /// Check if sinking \p I's operands to I's basic block is profitable, because
1226 /// the operands can be folded into a target instruction, e.g.
1227 /// splats of scalars can fold into vector instructions.
1228 bool RISCVTargetLowering::shouldSinkOperands(
1229     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1230   using namespace llvm::PatternMatch;
1231 
1232   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1233     return false;
1234 
1235   auto IsSinker = [&](Instruction *I, int Operand) {
1236     switch (I->getOpcode()) {
1237     case Instruction::Add:
1238     case Instruction::Sub:
1239     case Instruction::Mul:
1240     case Instruction::And:
1241     case Instruction::Or:
1242     case Instruction::Xor:
1243     case Instruction::FAdd:
1244     case Instruction::FSub:
1245     case Instruction::FMul:
1246     case Instruction::FDiv:
1247     case Instruction::ICmp:
1248     case Instruction::FCmp:
1249       return true;
1250     case Instruction::Shl:
1251     case Instruction::LShr:
1252     case Instruction::AShr:
1253     case Instruction::UDiv:
1254     case Instruction::SDiv:
1255     case Instruction::URem:
1256     case Instruction::SRem:
1257       return Operand == 1;
1258     case Instruction::Call:
1259       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1260         switch (II->getIntrinsicID()) {
1261         case Intrinsic::fma:
1262           return Operand == 0 || Operand == 1;
1263         default:
1264           return false;
1265         }
1266       }
1267       return false;
1268     default:
1269       return false;
1270     }
1271   };
1272 
1273   for (auto OpIdx : enumerate(I->operands())) {
1274     if (!IsSinker(I, OpIdx.index()))
1275       continue;
1276 
1277     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1278     // Make sure we are not already sinking this operand
1279     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1280       continue;
1281 
1282     // We are looking for a splat that can be sunk.
1283     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1284                              m_Undef(), m_ZeroMask())))
1285       continue;
1286 
1287     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1288     // and vector registers
1289     for (Use &U : Op->uses()) {
1290       Instruction *Insn = cast<Instruction>(U.getUser());
1291       if (!IsSinker(Insn, U.getOperandNo()))
1292         return false;
1293     }
1294 
1295     Ops.push_back(&Op->getOperandUse(0));
1296     Ops.push_back(&OpIdx.value());
1297   }
1298   return true;
1299 }
1300 
1301 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1302                                        bool ForCodeSize) const {
1303   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1304   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1305     return false;
1306   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1307     return false;
1308   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1309     return false;
1310   return Imm.isZero();
1311 }
1312 
1313 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1314   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1315          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1316          (VT == MVT::f64 && Subtarget.hasStdExtD());
1317 }
1318 
1319 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1320                                                       CallingConv::ID CC,
1321                                                       EVT VT) const {
1322   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1323   // We might still end up using a GPR but that will be decided based on ABI.
1324   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1325   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1326     return MVT::f32;
1327 
1328   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1329 }
1330 
1331 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1332                                                            CallingConv::ID CC,
1333                                                            EVT VT) const {
1334   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1335   // We might still end up using a GPR but that will be decided based on ABI.
1336   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1337   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1338     return 1;
1339 
1340   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1341 }
1342 
1343 // Changes the condition code and swaps operands if necessary, so the SetCC
1344 // operation matches one of the comparisons supported directly by branches
1345 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1346 // with 1/-1.
1347 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1348                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1349   // Convert X > -1 to X >= 0.
1350   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1351     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1352     CC = ISD::SETGE;
1353     return;
1354   }
1355   // Convert X < 1 to 0 >= X.
1356   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1357     RHS = LHS;
1358     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1359     CC = ISD::SETGE;
1360     return;
1361   }
1362 
1363   switch (CC) {
1364   default:
1365     break;
1366   case ISD::SETGT:
1367   case ISD::SETLE:
1368   case ISD::SETUGT:
1369   case ISD::SETULE:
1370     CC = ISD::getSetCCSwappedOperands(CC);
1371     std::swap(LHS, RHS);
1372     break;
1373   }
1374 }
1375 
1376 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1377   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1378   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1379   if (VT.getVectorElementType() == MVT::i1)
1380     KnownSize *= 8;
1381 
1382   switch (KnownSize) {
1383   default:
1384     llvm_unreachable("Invalid LMUL.");
1385   case 8:
1386     return RISCVII::VLMUL::LMUL_F8;
1387   case 16:
1388     return RISCVII::VLMUL::LMUL_F4;
1389   case 32:
1390     return RISCVII::VLMUL::LMUL_F2;
1391   case 64:
1392     return RISCVII::VLMUL::LMUL_1;
1393   case 128:
1394     return RISCVII::VLMUL::LMUL_2;
1395   case 256:
1396     return RISCVII::VLMUL::LMUL_4;
1397   case 512:
1398     return RISCVII::VLMUL::LMUL_8;
1399   }
1400 }
1401 
1402 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1403   switch (LMul) {
1404   default:
1405     llvm_unreachable("Invalid LMUL.");
1406   case RISCVII::VLMUL::LMUL_F8:
1407   case RISCVII::VLMUL::LMUL_F4:
1408   case RISCVII::VLMUL::LMUL_F2:
1409   case RISCVII::VLMUL::LMUL_1:
1410     return RISCV::VRRegClassID;
1411   case RISCVII::VLMUL::LMUL_2:
1412     return RISCV::VRM2RegClassID;
1413   case RISCVII::VLMUL::LMUL_4:
1414     return RISCV::VRM4RegClassID;
1415   case RISCVII::VLMUL::LMUL_8:
1416     return RISCV::VRM8RegClassID;
1417   }
1418 }
1419 
1420 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1421   RISCVII::VLMUL LMUL = getLMUL(VT);
1422   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1423       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1424       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1425       LMUL == RISCVII::VLMUL::LMUL_1) {
1426     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1427                   "Unexpected subreg numbering");
1428     return RISCV::sub_vrm1_0 + Index;
1429   }
1430   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1431     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1432                   "Unexpected subreg numbering");
1433     return RISCV::sub_vrm2_0 + Index;
1434   }
1435   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1436     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1437                   "Unexpected subreg numbering");
1438     return RISCV::sub_vrm4_0 + Index;
1439   }
1440   llvm_unreachable("Invalid vector type.");
1441 }
1442 
1443 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1444   if (VT.getVectorElementType() == MVT::i1)
1445     return RISCV::VRRegClassID;
1446   return getRegClassIDForLMUL(getLMUL(VT));
1447 }
1448 
1449 // Attempt to decompose a subvector insert/extract between VecVT and
1450 // SubVecVT via subregister indices. Returns the subregister index that
1451 // can perform the subvector insert/extract with the given element index, as
1452 // well as the index corresponding to any leftover subvectors that must be
1453 // further inserted/extracted within the register class for SubVecVT.
1454 std::pair<unsigned, unsigned>
1455 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1456     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1457     const RISCVRegisterInfo *TRI) {
1458   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1459                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1460                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1461                 "Register classes not ordered");
1462   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1463   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1464   // Try to compose a subregister index that takes us from the incoming
1465   // LMUL>1 register class down to the outgoing one. At each step we half
1466   // the LMUL:
1467   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1468   // Note that this is not guaranteed to find a subregister index, such as
1469   // when we are extracting from one VR type to another.
1470   unsigned SubRegIdx = RISCV::NoSubRegister;
1471   for (const unsigned RCID :
1472        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1473     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1474       VecVT = VecVT.getHalfNumVectorElementsVT();
1475       bool IsHi =
1476           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1477       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1478                                             getSubregIndexByMVT(VecVT, IsHi));
1479       if (IsHi)
1480         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1481     }
1482   return {SubRegIdx, InsertExtractIdx};
1483 }
1484 
1485 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1486 // stores for those types.
1487 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1488   return !Subtarget.useRVVForFixedLengthVectors() ||
1489          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1490 }
1491 
1492 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1493   if (ScalarTy->isPointerTy())
1494     return true;
1495 
1496   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1497       ScalarTy->isIntegerTy(32))
1498     return true;
1499 
1500   if (ScalarTy->isIntegerTy(64))
1501     return Subtarget.hasVInstructionsI64();
1502 
1503   if (ScalarTy->isHalfTy())
1504     return Subtarget.hasVInstructionsF16();
1505   if (ScalarTy->isFloatTy())
1506     return Subtarget.hasVInstructionsF32();
1507   if (ScalarTy->isDoubleTy())
1508     return Subtarget.hasVInstructionsF64();
1509 
1510   return false;
1511 }
1512 
1513 static bool useRVVForFixedLengthVectorVT(MVT VT,
1514                                          const RISCVSubtarget &Subtarget) {
1515   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1516   if (!Subtarget.useRVVForFixedLengthVectors())
1517     return false;
1518 
1519   // We only support a set of vector types with a consistent maximum fixed size
1520   // across all supported vector element types to avoid legalization issues.
1521   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1522   // fixed-length vector type we support is 1024 bytes.
1523   if (VT.getFixedSizeInBits() > 1024 * 8)
1524     return false;
1525 
1526   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1527 
1528   MVT EltVT = VT.getVectorElementType();
1529 
1530   // Don't use RVV for vectors we cannot scalarize if required.
1531   switch (EltVT.SimpleTy) {
1532   // i1 is supported but has different rules.
1533   default:
1534     return false;
1535   case MVT::i1:
1536     // Masks can only use a single register.
1537     if (VT.getVectorNumElements() > MinVLen)
1538       return false;
1539     MinVLen /= 8;
1540     break;
1541   case MVT::i8:
1542   case MVT::i16:
1543   case MVT::i32:
1544     break;
1545   case MVT::i64:
1546     if (!Subtarget.hasVInstructionsI64())
1547       return false;
1548     break;
1549   case MVT::f16:
1550     if (!Subtarget.hasVInstructionsF16())
1551       return false;
1552     break;
1553   case MVT::f32:
1554     if (!Subtarget.hasVInstructionsF32())
1555       return false;
1556     break;
1557   case MVT::f64:
1558     if (!Subtarget.hasVInstructionsF64())
1559       return false;
1560     break;
1561   }
1562 
1563   // Reject elements larger than ELEN.
1564   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1565     return false;
1566 
1567   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1568   // Don't use RVV for types that don't fit.
1569   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1570     return false;
1571 
1572   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1573   // the base fixed length RVV support in place.
1574   if (!VT.isPow2VectorType())
1575     return false;
1576 
1577   return true;
1578 }
1579 
1580 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1581   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1582 }
1583 
1584 // Return the largest legal scalable vector type that matches VT's element type.
1585 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1586                                             const RISCVSubtarget &Subtarget) {
1587   // This may be called before legal types are setup.
1588   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1589           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1590          "Expected legal fixed length vector!");
1591 
1592   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1593   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1594 
1595   MVT EltVT = VT.getVectorElementType();
1596   switch (EltVT.SimpleTy) {
1597   default:
1598     llvm_unreachable("unexpected element type for RVV container");
1599   case MVT::i1:
1600   case MVT::i8:
1601   case MVT::i16:
1602   case MVT::i32:
1603   case MVT::i64:
1604   case MVT::f16:
1605   case MVT::f32:
1606   case MVT::f64: {
1607     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1608     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1609     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1610     unsigned NumElts =
1611         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1612     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1613     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1614     return MVT::getScalableVectorVT(EltVT, NumElts);
1615   }
1616   }
1617 }
1618 
1619 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1620                                             const RISCVSubtarget &Subtarget) {
1621   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1622                                           Subtarget);
1623 }
1624 
1625 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1626   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1627 }
1628 
1629 // Grow V to consume an entire RVV register.
1630 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1631                                        const RISCVSubtarget &Subtarget) {
1632   assert(VT.isScalableVector() &&
1633          "Expected to convert into a scalable vector!");
1634   assert(V.getValueType().isFixedLengthVector() &&
1635          "Expected a fixed length vector operand!");
1636   SDLoc DL(V);
1637   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1638   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1639 }
1640 
1641 // Shrink V so it's just big enough to maintain a VT's worth of data.
1642 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1643                                          const RISCVSubtarget &Subtarget) {
1644   assert(VT.isFixedLengthVector() &&
1645          "Expected to convert into a fixed length vector!");
1646   assert(V.getValueType().isScalableVector() &&
1647          "Expected a scalable vector operand!");
1648   SDLoc DL(V);
1649   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1650   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1651 }
1652 
1653 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1654 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1655 // the vector type that it is contained in.
1656 static std::pair<SDValue, SDValue>
1657 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1658                 const RISCVSubtarget &Subtarget) {
1659   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1660   MVT XLenVT = Subtarget.getXLenVT();
1661   SDValue VL = VecVT.isFixedLengthVector()
1662                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1663                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1664   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1665   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1666   return {Mask, VL};
1667 }
1668 
1669 // As above but assuming the given type is a scalable vector type.
1670 static std::pair<SDValue, SDValue>
1671 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1672                         const RISCVSubtarget &Subtarget) {
1673   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1674   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1675 }
1676 
1677 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1678 // of either is (currently) supported. This can get us into an infinite loop
1679 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1680 // as a ..., etc.
1681 // Until either (or both) of these can reliably lower any node, reporting that
1682 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1683 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1684 // which is not desirable.
1685 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1686     EVT VT, unsigned DefinedValues) const {
1687   return false;
1688 }
1689 
1690 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1691   // Only splats are currently supported.
1692   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1693     return true;
1694 
1695   return false;
1696 }
1697 
1698 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1699                                   const RISCVSubtarget &Subtarget) {
1700   // RISCV FP-to-int conversions saturate to the destination register size, but
1701   // don't produce 0 for nan. We can use a conversion instruction and fix the
1702   // nan case with a compare and a select.
1703   SDValue Src = Op.getOperand(0);
1704 
1705   EVT DstVT = Op.getValueType();
1706   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1707 
1708   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1709   unsigned Opc;
1710   if (SatVT == DstVT)
1711     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1712   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1713     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1714   else
1715     return SDValue();
1716   // FIXME: Support other SatVTs by clamping before or after the conversion.
1717 
1718   SDLoc DL(Op);
1719   SDValue FpToInt = DAG.getNode(
1720       Opc, DL, DstVT, Src,
1721       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1722 
1723   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1724   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1725 }
1726 
1727 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1728 // and back. Taking care to avoid converting values that are nan or already
1729 // correct.
1730 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1731 // have FRM dependencies modeled yet.
1732 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1733   MVT VT = Op.getSimpleValueType();
1734   assert(VT.isVector() && "Unexpected type");
1735 
1736   SDLoc DL(Op);
1737 
1738   // Freeze the source since we are increasing the number of uses.
1739   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1740 
1741   // Truncate to integer and convert back to FP.
1742   MVT IntVT = VT.changeVectorElementTypeToInteger();
1743   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1744   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1745 
1746   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1747 
1748   if (Op.getOpcode() == ISD::FCEIL) {
1749     // If the truncated value is the greater than or equal to the original
1750     // value, we've computed the ceil. Otherwise, we went the wrong way and
1751     // need to increase by 1.
1752     // FIXME: This should use a masked operation. Handle here or in isel?
1753     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1754                                  DAG.getConstantFP(1.0, DL, VT));
1755     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1756     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1757   } else if (Op.getOpcode() == ISD::FFLOOR) {
1758     // If the truncated value is the less than or equal to the original value,
1759     // we've computed the floor. Otherwise, we went the wrong way and need to
1760     // decrease by 1.
1761     // FIXME: This should use a masked operation. Handle here or in isel?
1762     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1763                                  DAG.getConstantFP(1.0, DL, VT));
1764     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1765     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1766   }
1767 
1768   // Restore the original sign so that -0.0 is preserved.
1769   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1770 
1771   // Determine the largest integer that can be represented exactly. This and
1772   // values larger than it don't have any fractional bits so don't need to
1773   // be converted.
1774   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1775   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1776   APFloat MaxVal = APFloat(FltSem);
1777   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1778                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1779   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1780 
1781   // If abs(Src) was larger than MaxVal or nan, keep it.
1782   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1783   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1784   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1785 }
1786 
1787 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1788                                  const RISCVSubtarget &Subtarget) {
1789   MVT VT = Op.getSimpleValueType();
1790   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1791 
1792   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1793 
1794   SDLoc DL(Op);
1795   SDValue Mask, VL;
1796   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1797 
1798   unsigned Opc =
1799       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1800   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1801   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1802 }
1803 
1804 struct VIDSequence {
1805   int64_t StepNumerator;
1806   unsigned StepDenominator;
1807   int64_t Addend;
1808 };
1809 
1810 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1811 // to the (non-zero) step S and start value X. This can be then lowered as the
1812 // RVV sequence (VID * S) + X, for example.
1813 // The step S is represented as an integer numerator divided by a positive
1814 // denominator. Note that the implementation currently only identifies
1815 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1816 // cannot detect 2/3, for example.
1817 // Note that this method will also match potentially unappealing index
1818 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1819 // determine whether this is worth generating code for.
1820 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1821   unsigned NumElts = Op.getNumOperands();
1822   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1823   if (!Op.getValueType().isInteger())
1824     return None;
1825 
1826   Optional<unsigned> SeqStepDenom;
1827   Optional<int64_t> SeqStepNum, SeqAddend;
1828   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1829   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1830   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1831     // Assume undef elements match the sequence; we just have to be careful
1832     // when interpolating across them.
1833     if (Op.getOperand(Idx).isUndef())
1834       continue;
1835     // The BUILD_VECTOR must be all constants.
1836     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1837       return None;
1838 
1839     uint64_t Val = Op.getConstantOperandVal(Idx) &
1840                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1841 
1842     if (PrevElt) {
1843       // Calculate the step since the last non-undef element, and ensure
1844       // it's consistent across the entire sequence.
1845       unsigned IdxDiff = Idx - PrevElt->second;
1846       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1847 
1848       // A zero-value value difference means that we're somewhere in the middle
1849       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1850       // step change before evaluating the sequence.
1851       if (ValDiff != 0) {
1852         int64_t Remainder = ValDiff % IdxDiff;
1853         // Normalize the step if it's greater than 1.
1854         if (Remainder != ValDiff) {
1855           // The difference must cleanly divide the element span.
1856           if (Remainder != 0)
1857             return None;
1858           ValDiff /= IdxDiff;
1859           IdxDiff = 1;
1860         }
1861 
1862         if (!SeqStepNum)
1863           SeqStepNum = ValDiff;
1864         else if (ValDiff != SeqStepNum)
1865           return None;
1866 
1867         if (!SeqStepDenom)
1868           SeqStepDenom = IdxDiff;
1869         else if (IdxDiff != *SeqStepDenom)
1870           return None;
1871       }
1872     }
1873 
1874     // Record and/or check any addend.
1875     if (SeqStepNum && SeqStepDenom) {
1876       uint64_t ExpectedVal =
1877           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1878       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1879       if (!SeqAddend)
1880         SeqAddend = Addend;
1881       else if (SeqAddend != Addend)
1882         return None;
1883     }
1884 
1885     // Record this non-undef element for later.
1886     if (!PrevElt || PrevElt->first != Val)
1887       PrevElt = std::make_pair(Val, Idx);
1888   }
1889   // We need to have logged both a step and an addend for this to count as
1890   // a legal index sequence.
1891   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1892     return None;
1893 
1894   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1895 }
1896 
1897 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1898                                  const RISCVSubtarget &Subtarget) {
1899   MVT VT = Op.getSimpleValueType();
1900   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1901 
1902   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1903 
1904   SDLoc DL(Op);
1905   SDValue Mask, VL;
1906   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1907 
1908   MVT XLenVT = Subtarget.getXLenVT();
1909   unsigned NumElts = Op.getNumOperands();
1910 
1911   if (VT.getVectorElementType() == MVT::i1) {
1912     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1913       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1914       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1915     }
1916 
1917     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1918       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1919       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1920     }
1921 
1922     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1923     // scalar integer chunks whose bit-width depends on the number of mask
1924     // bits and XLEN.
1925     // First, determine the most appropriate scalar integer type to use. This
1926     // is at most XLenVT, but may be shrunk to a smaller vector element type
1927     // according to the size of the final vector - use i8 chunks rather than
1928     // XLenVT if we're producing a v8i1. This results in more consistent
1929     // codegen across RV32 and RV64.
1930     unsigned NumViaIntegerBits =
1931         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1932     NumViaIntegerBits = std::min(NumViaIntegerBits,
1933                                  Subtarget.getMaxELENForFixedLengthVectors());
1934     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1935       // If we have to use more than one INSERT_VECTOR_ELT then this
1936       // optimization is likely to increase code size; avoid peforming it in
1937       // such a case. We can use a load from a constant pool in this case.
1938       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1939         return SDValue();
1940       // Now we can create our integer vector type. Note that it may be larger
1941       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1942       MVT IntegerViaVecVT =
1943           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1944                            divideCeil(NumElts, NumViaIntegerBits));
1945 
1946       uint64_t Bits = 0;
1947       unsigned BitPos = 0, IntegerEltIdx = 0;
1948       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1949 
1950       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1951         // Once we accumulate enough bits to fill our scalar type, insert into
1952         // our vector and clear our accumulated data.
1953         if (I != 0 && I % NumViaIntegerBits == 0) {
1954           if (NumViaIntegerBits <= 32)
1955             Bits = SignExtend64(Bits, 32);
1956           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1957           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1958                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1959           Bits = 0;
1960           BitPos = 0;
1961           IntegerEltIdx++;
1962         }
1963         SDValue V = Op.getOperand(I);
1964         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1965         Bits |= ((uint64_t)BitValue << BitPos);
1966       }
1967 
1968       // Insert the (remaining) scalar value into position in our integer
1969       // vector type.
1970       if (NumViaIntegerBits <= 32)
1971         Bits = SignExtend64(Bits, 32);
1972       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1973       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1974                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1975 
1976       if (NumElts < NumViaIntegerBits) {
1977         // If we're producing a smaller vector than our minimum legal integer
1978         // type, bitcast to the equivalent (known-legal) mask type, and extract
1979         // our final mask.
1980         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1981         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1982         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1983                           DAG.getConstant(0, DL, XLenVT));
1984       } else {
1985         // Else we must have produced an integer type with the same size as the
1986         // mask type; bitcast for the final result.
1987         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1988         Vec = DAG.getBitcast(VT, Vec);
1989       }
1990 
1991       return Vec;
1992     }
1993 
1994     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1995     // vector type, we have a legal equivalently-sized i8 type, so we can use
1996     // that.
1997     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1998     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1999 
2000     SDValue WideVec;
2001     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2002       // For a splat, perform a scalar truncate before creating the wider
2003       // vector.
2004       assert(Splat.getValueType() == XLenVT &&
2005              "Unexpected type for i1 splat value");
2006       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2007                           DAG.getConstant(1, DL, XLenVT));
2008       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2009     } else {
2010       SmallVector<SDValue, 8> Ops(Op->op_values());
2011       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2012       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2013       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2014     }
2015 
2016     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2017   }
2018 
2019   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2020     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2021                                         : RISCVISD::VMV_V_X_VL;
2022     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
2023     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2024   }
2025 
2026   // Try and match index sequences, which we can lower to the vid instruction
2027   // with optional modifications. An all-undef vector is matched by
2028   // getSplatValue, above.
2029   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2030     int64_t StepNumerator = SimpleVID->StepNumerator;
2031     unsigned StepDenominator = SimpleVID->StepDenominator;
2032     int64_t Addend = SimpleVID->Addend;
2033 
2034     assert(StepNumerator != 0 && "Invalid step");
2035     bool Negate = false;
2036     int64_t SplatStepVal = StepNumerator;
2037     unsigned StepOpcode = ISD::MUL;
2038     if (StepNumerator != 1) {
2039       if (isPowerOf2_64(std::abs(StepNumerator))) {
2040         Negate = StepNumerator < 0;
2041         StepOpcode = ISD::SHL;
2042         SplatStepVal = Log2_64(std::abs(StepNumerator));
2043       }
2044     }
2045 
2046     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2047     // threshold since it's the immediate value many RVV instructions accept.
2048     // There is no vmul.vi instruction so ensure multiply constant can fit in
2049     // a single addi instruction.
2050     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2051          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2052         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2053       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2054       // Convert right out of the scalable type so we can use standard ISD
2055       // nodes for the rest of the computation. If we used scalable types with
2056       // these, we'd lose the fixed-length vector info and generate worse
2057       // vsetvli code.
2058       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2059       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2060           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2061         SDValue SplatStep = DAG.getSplatVector(
2062             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2063         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2064       }
2065       if (StepDenominator != 1) {
2066         SDValue SplatStep = DAG.getSplatVector(
2067             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2068         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2069       }
2070       if (Addend != 0 || Negate) {
2071         SDValue SplatAddend =
2072             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2073         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2074       }
2075       return VID;
2076     }
2077   }
2078 
2079   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2080   // when re-interpreted as a vector with a larger element type. For example,
2081   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2082   // could be instead splat as
2083   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2084   // TODO: This optimization could also work on non-constant splats, but it
2085   // would require bit-manipulation instructions to construct the splat value.
2086   SmallVector<SDValue> Sequence;
2087   unsigned EltBitSize = VT.getScalarSizeInBits();
2088   const auto *BV = cast<BuildVectorSDNode>(Op);
2089   if (VT.isInteger() && EltBitSize < 64 &&
2090       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2091       BV->getRepeatedSequence(Sequence) &&
2092       (Sequence.size() * EltBitSize) <= 64) {
2093     unsigned SeqLen = Sequence.size();
2094     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2095     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2096     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2097             ViaIntVT == MVT::i64) &&
2098            "Unexpected sequence type");
2099 
2100     unsigned EltIdx = 0;
2101     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2102     uint64_t SplatValue = 0;
2103     // Construct the amalgamated value which can be splatted as this larger
2104     // vector type.
2105     for (const auto &SeqV : Sequence) {
2106       if (!SeqV.isUndef())
2107         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2108                        << (EltIdx * EltBitSize));
2109       EltIdx++;
2110     }
2111 
2112     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2113     // achieve better constant materializion.
2114     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2115       SplatValue = SignExtend64(SplatValue, 32);
2116 
2117     // Since we can't introduce illegal i64 types at this stage, we can only
2118     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2119     // way we can use RVV instructions to splat.
2120     assert((ViaIntVT.bitsLE(XLenVT) ||
2121             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2122            "Unexpected bitcast sequence");
2123     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2124       SDValue ViaVL =
2125           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2126       MVT ViaContainerVT =
2127           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2128       SDValue Splat =
2129           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2130                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2131       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2132       return DAG.getBitcast(VT, Splat);
2133     }
2134   }
2135 
2136   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2137   // which constitute a large proportion of the elements. In such cases we can
2138   // splat a vector with the dominant element and make up the shortfall with
2139   // INSERT_VECTOR_ELTs.
2140   // Note that this includes vectors of 2 elements by association. The
2141   // upper-most element is the "dominant" one, allowing us to use a splat to
2142   // "insert" the upper element, and an insert of the lower element at position
2143   // 0, which improves codegen.
2144   SDValue DominantValue;
2145   unsigned MostCommonCount = 0;
2146   DenseMap<SDValue, unsigned> ValueCounts;
2147   unsigned NumUndefElts =
2148       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2149 
2150   // Track the number of scalar loads we know we'd be inserting, estimated as
2151   // any non-zero floating-point constant. Other kinds of element are either
2152   // already in registers or are materialized on demand. The threshold at which
2153   // a vector load is more desirable than several scalar materializion and
2154   // vector-insertion instructions is not known.
2155   unsigned NumScalarLoads = 0;
2156 
2157   for (SDValue V : Op->op_values()) {
2158     if (V.isUndef())
2159       continue;
2160 
2161     ValueCounts.insert(std::make_pair(V, 0));
2162     unsigned &Count = ValueCounts[V];
2163 
2164     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2165       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2166 
2167     // Is this value dominant? In case of a tie, prefer the highest element as
2168     // it's cheaper to insert near the beginning of a vector than it is at the
2169     // end.
2170     if (++Count >= MostCommonCount) {
2171       DominantValue = V;
2172       MostCommonCount = Count;
2173     }
2174   }
2175 
2176   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2177   unsigned NumDefElts = NumElts - NumUndefElts;
2178   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2179 
2180   // Don't perform this optimization when optimizing for size, since
2181   // materializing elements and inserting them tends to cause code bloat.
2182   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2183       ((MostCommonCount > DominantValueCountThreshold) ||
2184        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2185     // Start by splatting the most common element.
2186     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2187 
2188     DenseSet<SDValue> Processed{DominantValue};
2189     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2190     for (const auto &OpIdx : enumerate(Op->ops())) {
2191       const SDValue &V = OpIdx.value();
2192       if (V.isUndef() || !Processed.insert(V).second)
2193         continue;
2194       if (ValueCounts[V] == 1) {
2195         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2196                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2197       } else {
2198         // Blend in all instances of this value using a VSELECT, using a
2199         // mask where each bit signals whether that element is the one
2200         // we're after.
2201         SmallVector<SDValue> Ops;
2202         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2203           return DAG.getConstant(V == V1, DL, XLenVT);
2204         });
2205         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2206                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2207                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2208       }
2209     }
2210 
2211     return Vec;
2212   }
2213 
2214   return SDValue();
2215 }
2216 
2217 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2218                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2219   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2220     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2221     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2222     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2223     // node in order to try and match RVV vector/scalar instructions.
2224     if ((LoC >> 31) == HiC)
2225       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2226 
2227     // If vl is equal to VLMax and Hi constant is equal to Lo, we could use
2228     // vmv.v.x whose EEW = 32 to lower it.
2229     auto *Const = dyn_cast<ConstantSDNode>(VL);
2230     if (LoC == HiC && Const && Const->getSExtValue() == RISCV::VLMaxSentinel) {
2231       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2232       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2233       // access the subtarget here now.
2234       auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT, Lo, VL);
2235       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2236     }
2237   }
2238 
2239   // Fall back to a stack store and stride x0 vector load.
2240   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2241 }
2242 
2243 // Called by type legalization to handle splat of i64 on RV32.
2244 // FIXME: We can optimize this when the type has sign or zero bits in one
2245 // of the halves.
2246 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2247                                    SDValue VL, SelectionDAG &DAG) {
2248   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2249   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2250                            DAG.getConstant(0, DL, MVT::i32));
2251   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2252                            DAG.getConstant(1, DL, MVT::i32));
2253   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2254 }
2255 
2256 // This function lowers a splat of a scalar operand Splat with the vector
2257 // length VL. It ensures the final sequence is type legal, which is useful when
2258 // lowering a splat after type legalization.
2259 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2260                                 SelectionDAG &DAG,
2261                                 const RISCVSubtarget &Subtarget) {
2262   if (VT.isFloatingPoint()) {
2263     // If VL is 1, we could use vfmv.s.f.
2264     if (isOneConstant(VL))
2265       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT),
2266                          Scalar, VL);
2267     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2268   }
2269 
2270   MVT XLenVT = Subtarget.getXLenVT();
2271 
2272   // Simplest case is that the operand needs to be promoted to XLenVT.
2273   if (Scalar.getValueType().bitsLE(XLenVT)) {
2274     // If the operand is a constant, sign extend to increase our chances
2275     // of being able to use a .vi instruction. ANY_EXTEND would become a
2276     // a zero extend and the simm5 check in isel would fail.
2277     // FIXME: Should we ignore the upper bits in isel instead?
2278     unsigned ExtOpc =
2279         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2280     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2281     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2282     // If VL is 1 and the scalar value won't benefit from immediate, we could
2283     // use vmv.s.x.
2284     if (isOneConstant(VL) &&
2285         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2286       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar,
2287                          VL);
2288     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2289   }
2290 
2291   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2292          "Unexpected scalar for splat lowering!");
2293 
2294   if (isOneConstant(VL) && isNullConstant(Scalar))
2295     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT),
2296                        DAG.getConstant(0, DL, XLenVT), VL);
2297 
2298   // Otherwise use the more complicated splatting algorithm.
2299   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2300 }
2301 
2302 // Is the mask a slidedown that shifts in undefs.
2303 static int matchShuffleAsSlideDown(ArrayRef<int> Mask) {
2304   int Size = Mask.size();
2305 
2306   // Elements shifted in should be undef.
2307   auto CheckUndefs = [&](int Shift) {
2308     for (int i = Size - Shift; i != Size; ++i)
2309       if (Mask[i] >= 0)
2310         return false;
2311     return true;
2312   };
2313 
2314   // Elements should be shifted or undef.
2315   auto MatchShift = [&](int Shift) {
2316     for (int i = 0; i != Size - Shift; ++i)
2317        if (Mask[i] >= 0 && Mask[i] != Shift + i)
2318          return false;
2319     return true;
2320   };
2321 
2322   // Try all possible shifts.
2323   for (int Shift = 1; Shift != Size; ++Shift)
2324     if (CheckUndefs(Shift) && MatchShift(Shift))
2325       return Shift;
2326 
2327   // No match.
2328   return -1;
2329 }
2330 
2331 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2332                                    const RISCVSubtarget &Subtarget) {
2333   SDValue V1 = Op.getOperand(0);
2334   SDValue V2 = Op.getOperand(1);
2335   SDLoc DL(Op);
2336   MVT XLenVT = Subtarget.getXLenVT();
2337   MVT VT = Op.getSimpleValueType();
2338   unsigned NumElts = VT.getVectorNumElements();
2339   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2340 
2341   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2342 
2343   SDValue TrueMask, VL;
2344   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2345 
2346   if (SVN->isSplat()) {
2347     const int Lane = SVN->getSplatIndex();
2348     if (Lane >= 0) {
2349       MVT SVT = VT.getVectorElementType();
2350 
2351       // Turn splatted vector load into a strided load with an X0 stride.
2352       SDValue V = V1;
2353       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2354       // with undef.
2355       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2356       int Offset = Lane;
2357       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2358         int OpElements =
2359             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2360         V = V.getOperand(Offset / OpElements);
2361         Offset %= OpElements;
2362       }
2363 
2364       // We need to ensure the load isn't atomic or volatile.
2365       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2366         auto *Ld = cast<LoadSDNode>(V);
2367         Offset *= SVT.getStoreSize();
2368         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2369                                                    TypeSize::Fixed(Offset), DL);
2370 
2371         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2372         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2373           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2374           SDValue IntID =
2375               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2376           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
2377                            DAG.getRegister(RISCV::X0, XLenVT), VL};
2378           SDValue NewLoad = DAG.getMemIntrinsicNode(
2379               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2380               DAG.getMachineFunction().getMachineMemOperand(
2381                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2382           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2383           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2384         }
2385 
2386         // Otherwise use a scalar load and splat. This will give the best
2387         // opportunity to fold a splat into the operation. ISel can turn it into
2388         // the x0 strided load if we aren't able to fold away the select.
2389         if (SVT.isFloatingPoint())
2390           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2391                           Ld->getPointerInfo().getWithOffset(Offset),
2392                           Ld->getOriginalAlign(),
2393                           Ld->getMemOperand()->getFlags());
2394         else
2395           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2396                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2397                              Ld->getOriginalAlign(),
2398                              Ld->getMemOperand()->getFlags());
2399         DAG.makeEquivalentMemoryOrdering(Ld, V);
2400 
2401         unsigned Opc =
2402             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2403         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2404         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2405       }
2406 
2407       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2408       assert(Lane < (int)NumElts && "Unexpected lane!");
2409       SDValue Gather =
2410           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2411                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2412       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2413     }
2414   }
2415 
2416   // Try to match as a slidedown.
2417   int SlideAmt = matchShuffleAsSlideDown(SVN->getMask());
2418   if (SlideAmt >= 0) {
2419     // TODO: Should we reduce the VL to account for the upper undef elements?
2420     // Requires additional vsetvlis, but might be faster to execute.
2421     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2422     SDValue SlideDown =
2423         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2424                     DAG.getUNDEF(ContainerVT), V1,
2425                     DAG.getConstant(SlideAmt, DL, XLenVT),
2426                     TrueMask, VL);
2427     return convertFromScalableVector(VT, SlideDown, DAG, Subtarget);
2428   }
2429 
2430   // Detect shuffles which can be re-expressed as vector selects; these are
2431   // shuffles in which each element in the destination is taken from an element
2432   // at the corresponding index in either source vectors.
2433   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
2434     int MaskIndex = MaskIdx.value();
2435     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2436   });
2437 
2438   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2439 
2440   SmallVector<SDValue> MaskVals;
2441   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2442   // merged with a second vrgather.
2443   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2444 
2445   // By default we preserve the original operand order, and use a mask to
2446   // select LHS as true and RHS as false. However, since RVV vector selects may
2447   // feature splats but only on the LHS, we may choose to invert our mask and
2448   // instead select between RHS and LHS.
2449   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2450   bool InvertMask = IsSelect == SwapOps;
2451 
2452   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2453   // half.
2454   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2455 
2456   // Now construct the mask that will be used by the vselect or blended
2457   // vrgather operation. For vrgathers, construct the appropriate indices into
2458   // each vector.
2459   for (int MaskIndex : SVN->getMask()) {
2460     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2461     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2462     if (!IsSelect) {
2463       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2464       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2465                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2466                                      : DAG.getUNDEF(XLenVT));
2467       GatherIndicesRHS.push_back(
2468           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2469                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2470       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2471         ++LHSIndexCounts[MaskIndex];
2472       if (!IsLHSOrUndefIndex)
2473         ++RHSIndexCounts[MaskIndex - NumElts];
2474     }
2475   }
2476 
2477   if (SwapOps) {
2478     std::swap(V1, V2);
2479     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2480   }
2481 
2482   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2483   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2484   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2485 
2486   if (IsSelect)
2487     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2488 
2489   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2490     // On such a large vector we're unable to use i8 as the index type.
2491     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2492     // may involve vector splitting if we're already at LMUL=8, or our
2493     // user-supplied maximum fixed-length LMUL.
2494     return SDValue();
2495   }
2496 
2497   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2498   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2499   MVT IndexVT = VT.changeTypeToInteger();
2500   // Since we can't introduce illegal index types at this stage, use i16 and
2501   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2502   // than XLenVT.
2503   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2504     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2505     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2506   }
2507 
2508   MVT IndexContainerVT =
2509       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2510 
2511   SDValue Gather;
2512   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2513   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2514   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2515     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2516   } else {
2517     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2518     // If only one index is used, we can use a "splat" vrgather.
2519     // TODO: We can splat the most-common index and fix-up any stragglers, if
2520     // that's beneficial.
2521     if (LHSIndexCounts.size() == 1) {
2522       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2523       Gather =
2524           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2525                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2526     } else {
2527       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2528       LHSIndices =
2529           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2530 
2531       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2532                            TrueMask, VL);
2533     }
2534   }
2535 
2536   // If a second vector operand is used by this shuffle, blend it in with an
2537   // additional vrgather.
2538   if (!V2.isUndef()) {
2539     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2540     // If only one index is used, we can use a "splat" vrgather.
2541     // TODO: We can splat the most-common index and fix-up any stragglers, if
2542     // that's beneficial.
2543     if (RHSIndexCounts.size() == 1) {
2544       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2545       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2546                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2547     } else {
2548       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2549       RHSIndices =
2550           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2551       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2552                        VL);
2553     }
2554 
2555     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2556     SelectMask =
2557         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2558 
2559     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2560                          Gather, VL);
2561   }
2562 
2563   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2564 }
2565 
2566 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2567                                      SDLoc DL, SelectionDAG &DAG,
2568                                      const RISCVSubtarget &Subtarget) {
2569   if (VT.isScalableVector())
2570     return DAG.getFPExtendOrRound(Op, DL, VT);
2571   assert(VT.isFixedLengthVector() &&
2572          "Unexpected value type for RVV FP extend/round lowering");
2573   SDValue Mask, VL;
2574   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2575   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2576                         ? RISCVISD::FP_EXTEND_VL
2577                         : RISCVISD::FP_ROUND_VL;
2578   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2579 }
2580 
2581 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2582 // the exponent.
2583 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2584   MVT VT = Op.getSimpleValueType();
2585   unsigned EltSize = VT.getScalarSizeInBits();
2586   SDValue Src = Op.getOperand(0);
2587   SDLoc DL(Op);
2588 
2589   // We need a FP type that can represent the value.
2590   // TODO: Use f16 for i8 when possible?
2591   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2592   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2593 
2594   // Legal types should have been checked in the RISCVTargetLowering
2595   // constructor.
2596   // TODO: Splitting may make sense in some cases.
2597   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2598          "Expected legal float type!");
2599 
2600   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2601   // The trailing zero count is equal to log2 of this single bit value.
2602   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2603     SDValue Neg =
2604         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2605     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2606   }
2607 
2608   // We have a legal FP type, convert to it.
2609   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2610   // Bitcast to integer and shift the exponent to the LSB.
2611   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2612   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2613   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2614   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2615                               DAG.getConstant(ShiftAmt, DL, IntVT));
2616   // Truncate back to original type to allow vnsrl.
2617   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2618   // The exponent contains log2 of the value in biased form.
2619   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2620 
2621   // For trailing zeros, we just need to subtract the bias.
2622   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2623     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2624                        DAG.getConstant(ExponentBias, DL, VT));
2625 
2626   // For leading zeros, we need to remove the bias and convert from log2 to
2627   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2628   unsigned Adjust = ExponentBias + (EltSize - 1);
2629   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2630 }
2631 
2632 // While RVV has alignment restrictions, we should always be able to load as a
2633 // legal equivalently-sized byte-typed vector instead. This method is
2634 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2635 // the load is already correctly-aligned, it returns SDValue().
2636 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2637                                                     SelectionDAG &DAG) const {
2638   auto *Load = cast<LoadSDNode>(Op);
2639   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2640 
2641   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2642                                      Load->getMemoryVT(),
2643                                      *Load->getMemOperand()))
2644     return SDValue();
2645 
2646   SDLoc DL(Op);
2647   MVT VT = Op.getSimpleValueType();
2648   unsigned EltSizeBits = VT.getScalarSizeInBits();
2649   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2650          "Unexpected unaligned RVV load type");
2651   MVT NewVT =
2652       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2653   assert(NewVT.isValid() &&
2654          "Expecting equally-sized RVV vector types to be legal");
2655   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2656                           Load->getPointerInfo(), Load->getOriginalAlign(),
2657                           Load->getMemOperand()->getFlags());
2658   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2659 }
2660 
2661 // While RVV has alignment restrictions, we should always be able to store as a
2662 // legal equivalently-sized byte-typed vector instead. This method is
2663 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2664 // returns SDValue() if the store is already correctly aligned.
2665 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2666                                                      SelectionDAG &DAG) const {
2667   auto *Store = cast<StoreSDNode>(Op);
2668   assert(Store && Store->getValue().getValueType().isVector() &&
2669          "Expected vector store");
2670 
2671   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2672                                      Store->getMemoryVT(),
2673                                      *Store->getMemOperand()))
2674     return SDValue();
2675 
2676   SDLoc DL(Op);
2677   SDValue StoredVal = Store->getValue();
2678   MVT VT = StoredVal.getSimpleValueType();
2679   unsigned EltSizeBits = VT.getScalarSizeInBits();
2680   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2681          "Unexpected unaligned RVV store type");
2682   MVT NewVT =
2683       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2684   assert(NewVT.isValid() &&
2685          "Expecting equally-sized RVV vector types to be legal");
2686   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2687   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2688                       Store->getPointerInfo(), Store->getOriginalAlign(),
2689                       Store->getMemOperand()->getFlags());
2690 }
2691 
2692 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2693                                             SelectionDAG &DAG) const {
2694   switch (Op.getOpcode()) {
2695   default:
2696     report_fatal_error("unimplemented operand");
2697   case ISD::GlobalAddress:
2698     return lowerGlobalAddress(Op, DAG);
2699   case ISD::BlockAddress:
2700     return lowerBlockAddress(Op, DAG);
2701   case ISD::ConstantPool:
2702     return lowerConstantPool(Op, DAG);
2703   case ISD::JumpTable:
2704     return lowerJumpTable(Op, DAG);
2705   case ISD::GlobalTLSAddress:
2706     return lowerGlobalTLSAddress(Op, DAG);
2707   case ISD::SELECT:
2708     return lowerSELECT(Op, DAG);
2709   case ISD::BRCOND:
2710     return lowerBRCOND(Op, DAG);
2711   case ISD::VASTART:
2712     return lowerVASTART(Op, DAG);
2713   case ISD::FRAMEADDR:
2714     return lowerFRAMEADDR(Op, DAG);
2715   case ISD::RETURNADDR:
2716     return lowerRETURNADDR(Op, DAG);
2717   case ISD::SHL_PARTS:
2718     return lowerShiftLeftParts(Op, DAG);
2719   case ISD::SRA_PARTS:
2720     return lowerShiftRightParts(Op, DAG, true);
2721   case ISD::SRL_PARTS:
2722     return lowerShiftRightParts(Op, DAG, false);
2723   case ISD::BITCAST: {
2724     SDLoc DL(Op);
2725     EVT VT = Op.getValueType();
2726     SDValue Op0 = Op.getOperand(0);
2727     EVT Op0VT = Op0.getValueType();
2728     MVT XLenVT = Subtarget.getXLenVT();
2729     if (VT.isFixedLengthVector()) {
2730       // We can handle fixed length vector bitcasts with a simple replacement
2731       // in isel.
2732       if (Op0VT.isFixedLengthVector())
2733         return Op;
2734       // When bitcasting from scalar to fixed-length vector, insert the scalar
2735       // into a one-element vector of the result type, and perform a vector
2736       // bitcast.
2737       if (!Op0VT.isVector()) {
2738         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2739         if (!isTypeLegal(BVT))
2740           return SDValue();
2741         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2742                                               DAG.getUNDEF(BVT), Op0,
2743                                               DAG.getConstant(0, DL, XLenVT)));
2744       }
2745       return SDValue();
2746     }
2747     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2748     // thus: bitcast the vector to a one-element vector type whose element type
2749     // is the same as the result type, and extract the first element.
2750     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2751       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2752       if (!isTypeLegal(BVT))
2753         return SDValue();
2754       SDValue BVec = DAG.getBitcast(BVT, Op0);
2755       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2756                          DAG.getConstant(0, DL, XLenVT));
2757     }
2758     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2759       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2760       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2761       return FPConv;
2762     }
2763     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2764         Subtarget.hasStdExtF()) {
2765       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2766       SDValue FPConv =
2767           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2768       return FPConv;
2769     }
2770     return SDValue();
2771   }
2772   case ISD::INTRINSIC_WO_CHAIN:
2773     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2774   case ISD::INTRINSIC_W_CHAIN:
2775     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2776   case ISD::INTRINSIC_VOID:
2777     return LowerINTRINSIC_VOID(Op, DAG);
2778   case ISD::BSWAP:
2779   case ISD::BITREVERSE: {
2780     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2781     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2782     MVT VT = Op.getSimpleValueType();
2783     SDLoc DL(Op);
2784     // Start with the maximum immediate value which is the bitwidth - 1.
2785     unsigned Imm = VT.getSizeInBits() - 1;
2786     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2787     if (Op.getOpcode() == ISD::BSWAP)
2788       Imm &= ~0x7U;
2789     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2790                        DAG.getConstant(Imm, DL, VT));
2791   }
2792   case ISD::FSHL:
2793   case ISD::FSHR: {
2794     MVT VT = Op.getSimpleValueType();
2795     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2796     SDLoc DL(Op);
2797     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2798     // use log(XLen) bits. Mask the shift amount accordingly to prevent
2799     // accidentally setting the extra bit.
2800     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2801     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2802                                 DAG.getConstant(ShAmtWidth, DL, VT));
2803     // fshl and fshr concatenate their operands in the same order. fsr and fsl
2804     // instruction use different orders. fshl will return its first operand for
2805     // shift of zero, fshr will return its second operand. fsl and fsr both
2806     // return rs1 so the ISD nodes need to have different operand orders.
2807     // Shift amount is in rs2.
2808     SDValue Op0 = Op.getOperand(0);
2809     SDValue Op1 = Op.getOperand(1);
2810     unsigned Opc = RISCVISD::FSL;
2811     if (Op.getOpcode() == ISD::FSHR) {
2812       std::swap(Op0, Op1);
2813       Opc = RISCVISD::FSR;
2814     }
2815     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
2816   }
2817   case ISD::TRUNCATE: {
2818     SDLoc DL(Op);
2819     MVT VT = Op.getSimpleValueType();
2820     // Only custom-lower vector truncates
2821     if (!VT.isVector())
2822       return Op;
2823 
2824     // Truncates to mask types are handled differently
2825     if (VT.getVectorElementType() == MVT::i1)
2826       return lowerVectorMaskTrunc(Op, DAG);
2827 
2828     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2829     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2830     // truncate by one power of two at a time.
2831     MVT DstEltVT = VT.getVectorElementType();
2832 
2833     SDValue Src = Op.getOperand(0);
2834     MVT SrcVT = Src.getSimpleValueType();
2835     MVT SrcEltVT = SrcVT.getVectorElementType();
2836 
2837     assert(DstEltVT.bitsLT(SrcEltVT) &&
2838            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2839            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2840            "Unexpected vector truncate lowering");
2841 
2842     MVT ContainerVT = SrcVT;
2843     if (SrcVT.isFixedLengthVector()) {
2844       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2845       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2846     }
2847 
2848     SDValue Result = Src;
2849     SDValue Mask, VL;
2850     std::tie(Mask, VL) =
2851         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2852     LLVMContext &Context = *DAG.getContext();
2853     const ElementCount Count = ContainerVT.getVectorElementCount();
2854     do {
2855       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2856       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2857       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2858                            Mask, VL);
2859     } while (SrcEltVT != DstEltVT);
2860 
2861     if (SrcVT.isFixedLengthVector())
2862       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2863 
2864     return Result;
2865   }
2866   case ISD::ANY_EXTEND:
2867   case ISD::ZERO_EXTEND:
2868     if (Op.getOperand(0).getValueType().isVector() &&
2869         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2870       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2871     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2872   case ISD::SIGN_EXTEND:
2873     if (Op.getOperand(0).getValueType().isVector() &&
2874         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2875       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2876     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2877   case ISD::SPLAT_VECTOR_PARTS:
2878     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2879   case ISD::INSERT_VECTOR_ELT:
2880     return lowerINSERT_VECTOR_ELT(Op, DAG);
2881   case ISD::EXTRACT_VECTOR_ELT:
2882     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2883   case ISD::VSCALE: {
2884     MVT VT = Op.getSimpleValueType();
2885     SDLoc DL(Op);
2886     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2887     // We define our scalable vector types for lmul=1 to use a 64 bit known
2888     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2889     // vscale as VLENB / 8.
2890     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
2891     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2892       // We assume VLENB is a multiple of 8. We manually choose the best shift
2893       // here because SimplifyDemandedBits isn't always able to simplify it.
2894       uint64_t Val = Op.getConstantOperandVal(0);
2895       if (isPowerOf2_64(Val)) {
2896         uint64_t Log2 = Log2_64(Val);
2897         if (Log2 < 3)
2898           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2899                              DAG.getConstant(3 - Log2, DL, VT));
2900         if (Log2 > 3)
2901           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2902                              DAG.getConstant(Log2 - 3, DL, VT));
2903         return VLENB;
2904       }
2905       // If the multiplier is a multiple of 8, scale it down to avoid needing
2906       // to shift the VLENB value.
2907       if ((Val % 8) == 0)
2908         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2909                            DAG.getConstant(Val / 8, DL, VT));
2910     }
2911 
2912     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2913                                  DAG.getConstant(3, DL, VT));
2914     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2915   }
2916   case ISD::FPOWI: {
2917     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
2918     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
2919     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
2920         Op.getOperand(1).getValueType() == MVT::i32) {
2921       SDLoc DL(Op);
2922       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
2923       SDValue Powi =
2924           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
2925       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
2926                          DAG.getIntPtrConstant(0, DL));
2927     }
2928     return SDValue();
2929   }
2930   case ISD::FP_EXTEND: {
2931     // RVV can only do fp_extend to types double the size as the source. We
2932     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2933     // via f32.
2934     SDLoc DL(Op);
2935     MVT VT = Op.getSimpleValueType();
2936     SDValue Src = Op.getOperand(0);
2937     MVT SrcVT = Src.getSimpleValueType();
2938 
2939     // Prepare any fixed-length vector operands.
2940     MVT ContainerVT = VT;
2941     if (SrcVT.isFixedLengthVector()) {
2942       ContainerVT = getContainerForFixedLengthVector(VT);
2943       MVT SrcContainerVT =
2944           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2945       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2946     }
2947 
2948     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2949         SrcVT.getVectorElementType() != MVT::f16) {
2950       // For scalable vectors, we only need to close the gap between
2951       // vXf16->vXf64.
2952       if (!VT.isFixedLengthVector())
2953         return Op;
2954       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2955       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2956       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2957     }
2958 
2959     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2960     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2961     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2962         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2963 
2964     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2965                                            DL, DAG, Subtarget);
2966     if (VT.isFixedLengthVector())
2967       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2968     return Extend;
2969   }
2970   case ISD::FP_ROUND: {
2971     // RVV can only do fp_round to types half the size as the source. We
2972     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2973     // conversion instruction.
2974     SDLoc DL(Op);
2975     MVT VT = Op.getSimpleValueType();
2976     SDValue Src = Op.getOperand(0);
2977     MVT SrcVT = Src.getSimpleValueType();
2978 
2979     // Prepare any fixed-length vector operands.
2980     MVT ContainerVT = VT;
2981     if (VT.isFixedLengthVector()) {
2982       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2983       ContainerVT =
2984           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2985       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2986     }
2987 
2988     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2989         SrcVT.getVectorElementType() != MVT::f64) {
2990       // For scalable vectors, we only need to close the gap between
2991       // vXf64<->vXf16.
2992       if (!VT.isFixedLengthVector())
2993         return Op;
2994       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2995       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2996       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2997     }
2998 
2999     SDValue Mask, VL;
3000     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3001 
3002     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3003     SDValue IntermediateRound =
3004         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3005     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3006                                           DL, DAG, Subtarget);
3007 
3008     if (VT.isFixedLengthVector())
3009       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3010     return Round;
3011   }
3012   case ISD::FP_TO_SINT:
3013   case ISD::FP_TO_UINT:
3014   case ISD::SINT_TO_FP:
3015   case ISD::UINT_TO_FP: {
3016     // RVV can only do fp<->int conversions to types half/double the size as
3017     // the source. We custom-lower any conversions that do two hops into
3018     // sequences.
3019     MVT VT = Op.getSimpleValueType();
3020     if (!VT.isVector())
3021       return Op;
3022     SDLoc DL(Op);
3023     SDValue Src = Op.getOperand(0);
3024     MVT EltVT = VT.getVectorElementType();
3025     MVT SrcVT = Src.getSimpleValueType();
3026     MVT SrcEltVT = SrcVT.getVectorElementType();
3027     unsigned EltSize = EltVT.getSizeInBits();
3028     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3029     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3030            "Unexpected vector element types");
3031 
3032     bool IsInt2FP = SrcEltVT.isInteger();
3033     // Widening conversions
3034     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3035       if (IsInt2FP) {
3036         // Do a regular integer sign/zero extension then convert to float.
3037         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3038                                       VT.getVectorElementCount());
3039         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3040                                  ? ISD::ZERO_EXTEND
3041                                  : ISD::SIGN_EXTEND;
3042         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3043         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3044       }
3045       // FP2Int
3046       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3047       // Do one doubling fp_extend then complete the operation by converting
3048       // to int.
3049       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3050       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3051       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3052     }
3053 
3054     // Narrowing conversions
3055     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3056       if (IsInt2FP) {
3057         // One narrowing int_to_fp, then an fp_round.
3058         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3059         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3060         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3061         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3062       }
3063       // FP2Int
3064       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3065       // representable by the integer, the result is poison.
3066       MVT IVecVT =
3067           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3068                            VT.getVectorElementCount());
3069       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3070       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3071     }
3072 
3073     // Scalable vectors can exit here. Patterns will handle equally-sized
3074     // conversions halving/doubling ones.
3075     if (!VT.isFixedLengthVector())
3076       return Op;
3077 
3078     // For fixed-length vectors we lower to a custom "VL" node.
3079     unsigned RVVOpc = 0;
3080     switch (Op.getOpcode()) {
3081     default:
3082       llvm_unreachable("Impossible opcode");
3083     case ISD::FP_TO_SINT:
3084       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3085       break;
3086     case ISD::FP_TO_UINT:
3087       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3088       break;
3089     case ISD::SINT_TO_FP:
3090       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3091       break;
3092     case ISD::UINT_TO_FP:
3093       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3094       break;
3095     }
3096 
3097     MVT ContainerVT, SrcContainerVT;
3098     // Derive the reference container type from the larger vector type.
3099     if (SrcEltSize > EltSize) {
3100       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3101       ContainerVT =
3102           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3103     } else {
3104       ContainerVT = getContainerForFixedLengthVector(VT);
3105       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3106     }
3107 
3108     SDValue Mask, VL;
3109     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3110 
3111     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3112     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3113     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3114   }
3115   case ISD::FP_TO_SINT_SAT:
3116   case ISD::FP_TO_UINT_SAT:
3117     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3118   case ISD::FTRUNC:
3119   case ISD::FCEIL:
3120   case ISD::FFLOOR:
3121     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3122   case ISD::VECREDUCE_ADD:
3123   case ISD::VECREDUCE_UMAX:
3124   case ISD::VECREDUCE_SMAX:
3125   case ISD::VECREDUCE_UMIN:
3126   case ISD::VECREDUCE_SMIN:
3127     return lowerVECREDUCE(Op, DAG);
3128   case ISD::VECREDUCE_AND:
3129   case ISD::VECREDUCE_OR:
3130   case ISD::VECREDUCE_XOR:
3131     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3132       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3133     return lowerVECREDUCE(Op, DAG);
3134   case ISD::VECREDUCE_FADD:
3135   case ISD::VECREDUCE_SEQ_FADD:
3136   case ISD::VECREDUCE_FMIN:
3137   case ISD::VECREDUCE_FMAX:
3138     return lowerFPVECREDUCE(Op, DAG);
3139   case ISD::VP_REDUCE_ADD:
3140   case ISD::VP_REDUCE_UMAX:
3141   case ISD::VP_REDUCE_SMAX:
3142   case ISD::VP_REDUCE_UMIN:
3143   case ISD::VP_REDUCE_SMIN:
3144   case ISD::VP_REDUCE_FADD:
3145   case ISD::VP_REDUCE_SEQ_FADD:
3146   case ISD::VP_REDUCE_FMIN:
3147   case ISD::VP_REDUCE_FMAX:
3148     return lowerVPREDUCE(Op, DAG);
3149   case ISD::VP_REDUCE_AND:
3150   case ISD::VP_REDUCE_OR:
3151   case ISD::VP_REDUCE_XOR:
3152     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3153       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3154     return lowerVPREDUCE(Op, DAG);
3155   case ISD::INSERT_SUBVECTOR:
3156     return lowerINSERT_SUBVECTOR(Op, DAG);
3157   case ISD::EXTRACT_SUBVECTOR:
3158     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3159   case ISD::STEP_VECTOR:
3160     return lowerSTEP_VECTOR(Op, DAG);
3161   case ISD::VECTOR_REVERSE:
3162     return lowerVECTOR_REVERSE(Op, DAG);
3163   case ISD::BUILD_VECTOR:
3164     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3165   case ISD::SPLAT_VECTOR:
3166     if (Op.getValueType().getVectorElementType() == MVT::i1)
3167       return lowerVectorMaskSplat(Op, DAG);
3168     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3169   case ISD::VECTOR_SHUFFLE:
3170     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3171   case ISD::CONCAT_VECTORS: {
3172     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3173     // better than going through the stack, as the default expansion does.
3174     SDLoc DL(Op);
3175     MVT VT = Op.getSimpleValueType();
3176     unsigned NumOpElts =
3177         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3178     SDValue Vec = DAG.getUNDEF(VT);
3179     for (const auto &OpIdx : enumerate(Op->ops())) {
3180       SDValue SubVec = OpIdx.value();
3181       // Don't insert undef subvectors.
3182       if (SubVec.isUndef())
3183         continue;
3184       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3185                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3186     }
3187     return Vec;
3188   }
3189   case ISD::LOAD:
3190     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3191       return V;
3192     if (Op.getValueType().isFixedLengthVector())
3193       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3194     return Op;
3195   case ISD::STORE:
3196     if (auto V = expandUnalignedRVVStore(Op, DAG))
3197       return V;
3198     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3199       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3200     return Op;
3201   case ISD::MLOAD:
3202   case ISD::VP_LOAD:
3203     return lowerMaskedLoad(Op, DAG);
3204   case ISD::MSTORE:
3205   case ISD::VP_STORE:
3206     return lowerMaskedStore(Op, DAG);
3207   case ISD::SETCC:
3208     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3209   case ISD::ADD:
3210     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3211   case ISD::SUB:
3212     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3213   case ISD::MUL:
3214     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3215   case ISD::MULHS:
3216     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3217   case ISD::MULHU:
3218     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3219   case ISD::AND:
3220     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3221                                               RISCVISD::AND_VL);
3222   case ISD::OR:
3223     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3224                                               RISCVISD::OR_VL);
3225   case ISD::XOR:
3226     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3227                                               RISCVISD::XOR_VL);
3228   case ISD::SDIV:
3229     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3230   case ISD::SREM:
3231     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3232   case ISD::UDIV:
3233     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3234   case ISD::UREM:
3235     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3236   case ISD::SHL:
3237   case ISD::SRA:
3238   case ISD::SRL:
3239     if (Op.getSimpleValueType().isFixedLengthVector())
3240       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3241     // This can be called for an i32 shift amount that needs to be promoted.
3242     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3243            "Unexpected custom legalisation");
3244     return SDValue();
3245   case ISD::SADDSAT:
3246     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3247   case ISD::UADDSAT:
3248     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3249   case ISD::SSUBSAT:
3250     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3251   case ISD::USUBSAT:
3252     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3253   case ISD::FADD:
3254     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3255   case ISD::FSUB:
3256     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3257   case ISD::FMUL:
3258     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3259   case ISD::FDIV:
3260     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3261   case ISD::FNEG:
3262     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3263   case ISD::FABS:
3264     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3265   case ISD::FSQRT:
3266     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3267   case ISD::FMA:
3268     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3269   case ISD::SMIN:
3270     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3271   case ISD::SMAX:
3272     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3273   case ISD::UMIN:
3274     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3275   case ISD::UMAX:
3276     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3277   case ISD::FMINNUM:
3278     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3279   case ISD::FMAXNUM:
3280     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3281   case ISD::ABS:
3282     return lowerABS(Op, DAG);
3283   case ISD::CTLZ_ZERO_UNDEF:
3284   case ISD::CTTZ_ZERO_UNDEF:
3285     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3286   case ISD::VSELECT:
3287     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3288   case ISD::FCOPYSIGN:
3289     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3290   case ISD::MGATHER:
3291   case ISD::VP_GATHER:
3292     return lowerMaskedGather(Op, DAG);
3293   case ISD::MSCATTER:
3294   case ISD::VP_SCATTER:
3295     return lowerMaskedScatter(Op, DAG);
3296   case ISD::FLT_ROUNDS_:
3297     return lowerGET_ROUNDING(Op, DAG);
3298   case ISD::SET_ROUNDING:
3299     return lowerSET_ROUNDING(Op, DAG);
3300   case ISD::VP_SELECT:
3301     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3302   case ISD::VP_ADD:
3303     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3304   case ISD::VP_SUB:
3305     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3306   case ISD::VP_MUL:
3307     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3308   case ISD::VP_SDIV:
3309     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3310   case ISD::VP_UDIV:
3311     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3312   case ISD::VP_SREM:
3313     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3314   case ISD::VP_UREM:
3315     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3316   case ISD::VP_AND:
3317     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3318   case ISD::VP_OR:
3319     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3320   case ISD::VP_XOR:
3321     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3322   case ISD::VP_ASHR:
3323     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3324   case ISD::VP_LSHR:
3325     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3326   case ISD::VP_SHL:
3327     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3328   case ISD::VP_FADD:
3329     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3330   case ISD::VP_FSUB:
3331     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3332   case ISD::VP_FMUL:
3333     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3334   case ISD::VP_FDIV:
3335     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3336   }
3337 }
3338 
3339 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3340                              SelectionDAG &DAG, unsigned Flags) {
3341   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3342 }
3343 
3344 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3345                              SelectionDAG &DAG, unsigned Flags) {
3346   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3347                                    Flags);
3348 }
3349 
3350 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3351                              SelectionDAG &DAG, unsigned Flags) {
3352   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3353                                    N->getOffset(), Flags);
3354 }
3355 
3356 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3357                              SelectionDAG &DAG, unsigned Flags) {
3358   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3359 }
3360 
3361 template <class NodeTy>
3362 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3363                                      bool IsLocal) const {
3364   SDLoc DL(N);
3365   EVT Ty = getPointerTy(DAG.getDataLayout());
3366 
3367   if (isPositionIndependent()) {
3368     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3369     if (IsLocal)
3370       // Use PC-relative addressing to access the symbol. This generates the
3371       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3372       // %pcrel_lo(auipc)).
3373       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3374 
3375     // Use PC-relative addressing to access the GOT for this symbol, then load
3376     // the address from the GOT. This generates the pattern (PseudoLA sym),
3377     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3378     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3379   }
3380 
3381   switch (getTargetMachine().getCodeModel()) {
3382   default:
3383     report_fatal_error("Unsupported code model for lowering");
3384   case CodeModel::Small: {
3385     // Generate a sequence for accessing addresses within the first 2 GiB of
3386     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3387     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3388     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3389     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3390     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3391   }
3392   case CodeModel::Medium: {
3393     // Generate a sequence for accessing addresses within any 2GiB range within
3394     // the address space. This generates the pattern (PseudoLLA sym), which
3395     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3396     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3397     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3398   }
3399   }
3400 }
3401 
3402 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3403                                                 SelectionDAG &DAG) const {
3404   SDLoc DL(Op);
3405   EVT Ty = Op.getValueType();
3406   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3407   int64_t Offset = N->getOffset();
3408   MVT XLenVT = Subtarget.getXLenVT();
3409 
3410   const GlobalValue *GV = N->getGlobal();
3411   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3412   SDValue Addr = getAddr(N, DAG, IsLocal);
3413 
3414   // In order to maximise the opportunity for common subexpression elimination,
3415   // emit a separate ADD node for the global address offset instead of folding
3416   // it in the global address node. Later peephole optimisations may choose to
3417   // fold it back in when profitable.
3418   if (Offset != 0)
3419     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3420                        DAG.getConstant(Offset, DL, XLenVT));
3421   return Addr;
3422 }
3423 
3424 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3425                                                SelectionDAG &DAG) const {
3426   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3427 
3428   return getAddr(N, DAG);
3429 }
3430 
3431 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3432                                                SelectionDAG &DAG) const {
3433   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3434 
3435   return getAddr(N, DAG);
3436 }
3437 
3438 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3439                                             SelectionDAG &DAG) const {
3440   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3441 
3442   return getAddr(N, DAG);
3443 }
3444 
3445 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3446                                               SelectionDAG &DAG,
3447                                               bool UseGOT) const {
3448   SDLoc DL(N);
3449   EVT Ty = getPointerTy(DAG.getDataLayout());
3450   const GlobalValue *GV = N->getGlobal();
3451   MVT XLenVT = Subtarget.getXLenVT();
3452 
3453   if (UseGOT) {
3454     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3455     // load the address from the GOT and add the thread pointer. This generates
3456     // the pattern (PseudoLA_TLS_IE sym), which expands to
3457     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3458     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3459     SDValue Load =
3460         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3461 
3462     // Add the thread pointer.
3463     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3464     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3465   }
3466 
3467   // Generate a sequence for accessing the address relative to the thread
3468   // pointer, with the appropriate adjustment for the thread pointer offset.
3469   // This generates the pattern
3470   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3471   SDValue AddrHi =
3472       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3473   SDValue AddrAdd =
3474       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3475   SDValue AddrLo =
3476       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3477 
3478   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3479   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3480   SDValue MNAdd = SDValue(
3481       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3482       0);
3483   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3484 }
3485 
3486 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3487                                                SelectionDAG &DAG) const {
3488   SDLoc DL(N);
3489   EVT Ty = getPointerTy(DAG.getDataLayout());
3490   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3491   const GlobalValue *GV = N->getGlobal();
3492 
3493   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3494   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3495   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3496   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3497   SDValue Load =
3498       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3499 
3500   // Prepare argument list to generate call.
3501   ArgListTy Args;
3502   ArgListEntry Entry;
3503   Entry.Node = Load;
3504   Entry.Ty = CallTy;
3505   Args.push_back(Entry);
3506 
3507   // Setup call to __tls_get_addr.
3508   TargetLowering::CallLoweringInfo CLI(DAG);
3509   CLI.setDebugLoc(DL)
3510       .setChain(DAG.getEntryNode())
3511       .setLibCallee(CallingConv::C, CallTy,
3512                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3513                     std::move(Args));
3514 
3515   return LowerCallTo(CLI).first;
3516 }
3517 
3518 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3519                                                    SelectionDAG &DAG) const {
3520   SDLoc DL(Op);
3521   EVT Ty = Op.getValueType();
3522   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3523   int64_t Offset = N->getOffset();
3524   MVT XLenVT = Subtarget.getXLenVT();
3525 
3526   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3527 
3528   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3529       CallingConv::GHC)
3530     report_fatal_error("In GHC calling convention TLS is not supported");
3531 
3532   SDValue Addr;
3533   switch (Model) {
3534   case TLSModel::LocalExec:
3535     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3536     break;
3537   case TLSModel::InitialExec:
3538     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3539     break;
3540   case TLSModel::LocalDynamic:
3541   case TLSModel::GeneralDynamic:
3542     Addr = getDynamicTLSAddr(N, DAG);
3543     break;
3544   }
3545 
3546   // In order to maximise the opportunity for common subexpression elimination,
3547   // emit a separate ADD node for the global address offset instead of folding
3548   // it in the global address node. Later peephole optimisations may choose to
3549   // fold it back in when profitable.
3550   if (Offset != 0)
3551     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3552                        DAG.getConstant(Offset, DL, XLenVT));
3553   return Addr;
3554 }
3555 
3556 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3557   SDValue CondV = Op.getOperand(0);
3558   SDValue TrueV = Op.getOperand(1);
3559   SDValue FalseV = Op.getOperand(2);
3560   SDLoc DL(Op);
3561   MVT VT = Op.getSimpleValueType();
3562   MVT XLenVT = Subtarget.getXLenVT();
3563 
3564   // Lower vector SELECTs to VSELECTs by splatting the condition.
3565   if (VT.isVector()) {
3566     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3567     SDValue CondSplat = VT.isScalableVector()
3568                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3569                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3570     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3571   }
3572 
3573   // If the result type is XLenVT and CondV is the output of a SETCC node
3574   // which also operated on XLenVT inputs, then merge the SETCC node into the
3575   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3576   // compare+branch instructions. i.e.:
3577   // (select (setcc lhs, rhs, cc), truev, falsev)
3578   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3579   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3580       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3581     SDValue LHS = CondV.getOperand(0);
3582     SDValue RHS = CondV.getOperand(1);
3583     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3584     ISD::CondCode CCVal = CC->get();
3585 
3586     // Special case for a select of 2 constants that have a diffence of 1.
3587     // Normally this is done by DAGCombine, but if the select is introduced by
3588     // type legalization or op legalization, we miss it. Restricting to SETLT
3589     // case for now because that is what signed saturating add/sub need.
3590     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3591     // but we would probably want to swap the true/false values if the condition
3592     // is SETGE/SETLE to avoid an XORI.
3593     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3594         CCVal == ISD::SETLT) {
3595       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3596       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3597       if (TrueVal - 1 == FalseVal)
3598         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3599       if (TrueVal + 1 == FalseVal)
3600         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3601     }
3602 
3603     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3604 
3605     SDValue TargetCC = DAG.getCondCode(CCVal);
3606     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3607     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3608   }
3609 
3610   // Otherwise:
3611   // (select condv, truev, falsev)
3612   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3613   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3614   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3615 
3616   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3617 
3618   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3619 }
3620 
3621 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3622   SDValue CondV = Op.getOperand(1);
3623   SDLoc DL(Op);
3624   MVT XLenVT = Subtarget.getXLenVT();
3625 
3626   if (CondV.getOpcode() == ISD::SETCC &&
3627       CondV.getOperand(0).getValueType() == XLenVT) {
3628     SDValue LHS = CondV.getOperand(0);
3629     SDValue RHS = CondV.getOperand(1);
3630     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3631 
3632     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3633 
3634     SDValue TargetCC = DAG.getCondCode(CCVal);
3635     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3636                        LHS, RHS, TargetCC, Op.getOperand(2));
3637   }
3638 
3639   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3640                      CondV, DAG.getConstant(0, DL, XLenVT),
3641                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3642 }
3643 
3644 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3645   MachineFunction &MF = DAG.getMachineFunction();
3646   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3647 
3648   SDLoc DL(Op);
3649   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3650                                  getPointerTy(MF.getDataLayout()));
3651 
3652   // vastart just stores the address of the VarArgsFrameIndex slot into the
3653   // memory location argument.
3654   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3655   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3656                       MachinePointerInfo(SV));
3657 }
3658 
3659 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3660                                             SelectionDAG &DAG) const {
3661   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3662   MachineFunction &MF = DAG.getMachineFunction();
3663   MachineFrameInfo &MFI = MF.getFrameInfo();
3664   MFI.setFrameAddressIsTaken(true);
3665   Register FrameReg = RI.getFrameRegister(MF);
3666   int XLenInBytes = Subtarget.getXLen() / 8;
3667 
3668   EVT VT = Op.getValueType();
3669   SDLoc DL(Op);
3670   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3671   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3672   while (Depth--) {
3673     int Offset = -(XLenInBytes * 2);
3674     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3675                               DAG.getIntPtrConstant(Offset, DL));
3676     FrameAddr =
3677         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3678   }
3679   return FrameAddr;
3680 }
3681 
3682 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3683                                              SelectionDAG &DAG) const {
3684   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3685   MachineFunction &MF = DAG.getMachineFunction();
3686   MachineFrameInfo &MFI = MF.getFrameInfo();
3687   MFI.setReturnAddressIsTaken(true);
3688   MVT XLenVT = Subtarget.getXLenVT();
3689   int XLenInBytes = Subtarget.getXLen() / 8;
3690 
3691   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3692     return SDValue();
3693 
3694   EVT VT = Op.getValueType();
3695   SDLoc DL(Op);
3696   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3697   if (Depth) {
3698     int Off = -XLenInBytes;
3699     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3700     SDValue Offset = DAG.getConstant(Off, DL, VT);
3701     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3702                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3703                        MachinePointerInfo());
3704   }
3705 
3706   // Return the value of the return address register, marking it an implicit
3707   // live-in.
3708   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3709   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3710 }
3711 
3712 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3713                                                  SelectionDAG &DAG) const {
3714   SDLoc DL(Op);
3715   SDValue Lo = Op.getOperand(0);
3716   SDValue Hi = Op.getOperand(1);
3717   SDValue Shamt = Op.getOperand(2);
3718   EVT VT = Lo.getValueType();
3719 
3720   // if Shamt-XLEN < 0: // Shamt < XLEN
3721   //   Lo = Lo << Shamt
3722   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3723   // else:
3724   //   Lo = 0
3725   //   Hi = Lo << (Shamt-XLEN)
3726 
3727   SDValue Zero = DAG.getConstant(0, DL, VT);
3728   SDValue One = DAG.getConstant(1, DL, VT);
3729   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3730   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3731   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3732   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3733 
3734   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3735   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3736   SDValue ShiftRightLo =
3737       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3738   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3739   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3740   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3741 
3742   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3743 
3744   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3745   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3746 
3747   SDValue Parts[2] = {Lo, Hi};
3748   return DAG.getMergeValues(Parts, DL);
3749 }
3750 
3751 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3752                                                   bool IsSRA) const {
3753   SDLoc DL(Op);
3754   SDValue Lo = Op.getOperand(0);
3755   SDValue Hi = Op.getOperand(1);
3756   SDValue Shamt = Op.getOperand(2);
3757   EVT VT = Lo.getValueType();
3758 
3759   // SRA expansion:
3760   //   if Shamt-XLEN < 0: // Shamt < XLEN
3761   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3762   //     Hi = Hi >>s Shamt
3763   //   else:
3764   //     Lo = Hi >>s (Shamt-XLEN);
3765   //     Hi = Hi >>s (XLEN-1)
3766   //
3767   // SRL expansion:
3768   //   if Shamt-XLEN < 0: // Shamt < XLEN
3769   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3770   //     Hi = Hi >>u Shamt
3771   //   else:
3772   //     Lo = Hi >>u (Shamt-XLEN);
3773   //     Hi = 0;
3774 
3775   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3776 
3777   SDValue Zero = DAG.getConstant(0, DL, VT);
3778   SDValue One = DAG.getConstant(1, DL, VT);
3779   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3780   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3781   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3782   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3783 
3784   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3785   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3786   SDValue ShiftLeftHi =
3787       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3788   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3789   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3790   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3791   SDValue HiFalse =
3792       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3793 
3794   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3795 
3796   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3797   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3798 
3799   SDValue Parts[2] = {Lo, Hi};
3800   return DAG.getMergeValues(Parts, DL);
3801 }
3802 
3803 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3804 // legal equivalently-sized i8 type, so we can use that as a go-between.
3805 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3806                                                   SelectionDAG &DAG) const {
3807   SDLoc DL(Op);
3808   MVT VT = Op.getSimpleValueType();
3809   SDValue SplatVal = Op.getOperand(0);
3810   // All-zeros or all-ones splats are handled specially.
3811   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3812     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3813     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3814   }
3815   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3816     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3817     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3818   }
3819   MVT XLenVT = Subtarget.getXLenVT();
3820   assert(SplatVal.getValueType() == XLenVT &&
3821          "Unexpected type for i1 splat value");
3822   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3823   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3824                          DAG.getConstant(1, DL, XLenVT));
3825   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3826   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3827   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3828 }
3829 
3830 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3831 // illegal (currently only vXi64 RV32).
3832 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3833 // them to SPLAT_VECTOR_I64
3834 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3835                                                      SelectionDAG &DAG) const {
3836   SDLoc DL(Op);
3837   MVT VecVT = Op.getSimpleValueType();
3838   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3839          "Unexpected SPLAT_VECTOR_PARTS lowering");
3840 
3841   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3842   SDValue Lo = Op.getOperand(0);
3843   SDValue Hi = Op.getOperand(1);
3844 
3845   if (VecVT.isFixedLengthVector()) {
3846     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3847     SDLoc DL(Op);
3848     SDValue Mask, VL;
3849     std::tie(Mask, VL) =
3850         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3851 
3852     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3853     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3854   }
3855 
3856   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3857     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3858     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3859     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3860     // node in order to try and match RVV vector/scalar instructions.
3861     if ((LoC >> 31) == HiC)
3862       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3863   }
3864 
3865   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3866   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3867       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3868       Hi.getConstantOperandVal(1) == 31)
3869     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3870 
3871   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3872   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3873                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
3874 }
3875 
3876 // Custom-lower extensions from mask vectors by using a vselect either with 1
3877 // for zero/any-extension or -1 for sign-extension:
3878 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3879 // Note that any-extension is lowered identically to zero-extension.
3880 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3881                                                 int64_t ExtTrueVal) const {
3882   SDLoc DL(Op);
3883   MVT VecVT = Op.getSimpleValueType();
3884   SDValue Src = Op.getOperand(0);
3885   // Only custom-lower extensions from mask types
3886   assert(Src.getValueType().isVector() &&
3887          Src.getValueType().getVectorElementType() == MVT::i1);
3888 
3889   MVT XLenVT = Subtarget.getXLenVT();
3890   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3891   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3892 
3893   if (VecVT.isScalableVector()) {
3894     // Be careful not to introduce illegal scalar types at this stage, and be
3895     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3896     // illegal and must be expanded. Since we know that the constants are
3897     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3898     bool IsRV32E64 =
3899         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3900 
3901     if (!IsRV32E64) {
3902       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3903       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3904     } else {
3905       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3906       SplatTrueVal =
3907           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3908     }
3909 
3910     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3911   }
3912 
3913   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3914   MVT I1ContainerVT =
3915       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3916 
3917   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3918 
3919   SDValue Mask, VL;
3920   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3921 
3922   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3923   SplatTrueVal =
3924       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3925   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3926                                SplatTrueVal, SplatZero, VL);
3927 
3928   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3929 }
3930 
3931 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3932     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3933   MVT ExtVT = Op.getSimpleValueType();
3934   // Only custom-lower extensions from fixed-length vector types.
3935   if (!ExtVT.isFixedLengthVector())
3936     return Op;
3937   MVT VT = Op.getOperand(0).getSimpleValueType();
3938   // Grab the canonical container type for the extended type. Infer the smaller
3939   // type from that to ensure the same number of vector elements, as we know
3940   // the LMUL will be sufficient to hold the smaller type.
3941   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3942   // Get the extended container type manually to ensure the same number of
3943   // vector elements between source and dest.
3944   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3945                                      ContainerExtVT.getVectorElementCount());
3946 
3947   SDValue Op1 =
3948       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3949 
3950   SDLoc DL(Op);
3951   SDValue Mask, VL;
3952   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3953 
3954   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3955 
3956   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3957 }
3958 
3959 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3960 // setcc operation:
3961 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3962 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3963                                                   SelectionDAG &DAG) const {
3964   SDLoc DL(Op);
3965   EVT MaskVT = Op.getValueType();
3966   // Only expect to custom-lower truncations to mask types
3967   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3968          "Unexpected type for vector mask lowering");
3969   SDValue Src = Op.getOperand(0);
3970   MVT VecVT = Src.getSimpleValueType();
3971 
3972   // If this is a fixed vector, we need to convert it to a scalable vector.
3973   MVT ContainerVT = VecVT;
3974   if (VecVT.isFixedLengthVector()) {
3975     ContainerVT = getContainerForFixedLengthVector(VecVT);
3976     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3977   }
3978 
3979   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3980   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3981 
3982   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3983   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3984 
3985   if (VecVT.isScalableVector()) {
3986     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3987     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3988   }
3989 
3990   SDValue Mask, VL;
3991   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3992 
3993   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3994   SDValue Trunc =
3995       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3996   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3997                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3998   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3999 }
4000 
4001 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4002 // first position of a vector, and that vector is slid up to the insert index.
4003 // By limiting the active vector length to index+1 and merging with the
4004 // original vector (with an undisturbed tail policy for elements >= VL), we
4005 // achieve the desired result of leaving all elements untouched except the one
4006 // at VL-1, which is replaced with the desired value.
4007 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4008                                                     SelectionDAG &DAG) const {
4009   SDLoc DL(Op);
4010   MVT VecVT = Op.getSimpleValueType();
4011   SDValue Vec = Op.getOperand(0);
4012   SDValue Val = Op.getOperand(1);
4013   SDValue Idx = Op.getOperand(2);
4014 
4015   if (VecVT.getVectorElementType() == MVT::i1) {
4016     // FIXME: For now we just promote to an i8 vector and insert into that,
4017     // but this is probably not optimal.
4018     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4019     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4020     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4021     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4022   }
4023 
4024   MVT ContainerVT = VecVT;
4025   // If the operand is a fixed-length vector, convert to a scalable one.
4026   if (VecVT.isFixedLengthVector()) {
4027     ContainerVT = getContainerForFixedLengthVector(VecVT);
4028     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4029   }
4030 
4031   MVT XLenVT = Subtarget.getXLenVT();
4032 
4033   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4034   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4035   // Even i64-element vectors on RV32 can be lowered without scalar
4036   // legalization if the most-significant 32 bits of the value are not affected
4037   // by the sign-extension of the lower 32 bits.
4038   // TODO: We could also catch sign extensions of a 32-bit value.
4039   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4040     const auto *CVal = cast<ConstantSDNode>(Val);
4041     if (isInt<32>(CVal->getSExtValue())) {
4042       IsLegalInsert = true;
4043       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4044     }
4045   }
4046 
4047   SDValue Mask, VL;
4048   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4049 
4050   SDValue ValInVec;
4051 
4052   if (IsLegalInsert) {
4053     unsigned Opc =
4054         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4055     if (isNullConstant(Idx)) {
4056       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4057       if (!VecVT.isFixedLengthVector())
4058         return Vec;
4059       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4060     }
4061     ValInVec =
4062         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4063   } else {
4064     // On RV32, i64-element vectors must be specially handled to place the
4065     // value at element 0, by using two vslide1up instructions in sequence on
4066     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4067     // this.
4068     SDValue One = DAG.getConstant(1, DL, XLenVT);
4069     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4070     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4071     MVT I32ContainerVT =
4072         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4073     SDValue I32Mask =
4074         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4075     // Limit the active VL to two.
4076     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4077     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4078     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4079     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
4080                            InsertI64VL);
4081     // First slide in the hi value, then the lo in underneath it.
4082     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4083                            ValHi, I32Mask, InsertI64VL);
4084     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4085                            ValLo, I32Mask, InsertI64VL);
4086     // Bitcast back to the right container type.
4087     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4088   }
4089 
4090   // Now that the value is in a vector, slide it into position.
4091   SDValue InsertVL =
4092       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4093   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4094                                 ValInVec, Idx, Mask, InsertVL);
4095   if (!VecVT.isFixedLengthVector())
4096     return Slideup;
4097   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4098 }
4099 
4100 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4101 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4102 // types this is done using VMV_X_S to allow us to glean information about the
4103 // sign bits of the result.
4104 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4105                                                      SelectionDAG &DAG) const {
4106   SDLoc DL(Op);
4107   SDValue Idx = Op.getOperand(1);
4108   SDValue Vec = Op.getOperand(0);
4109   EVT EltVT = Op.getValueType();
4110   MVT VecVT = Vec.getSimpleValueType();
4111   MVT XLenVT = Subtarget.getXLenVT();
4112 
4113   if (VecVT.getVectorElementType() == MVT::i1) {
4114     // FIXME: For now we just promote to an i8 vector and extract from that,
4115     // but this is probably not optimal.
4116     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4117     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4118     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4119   }
4120 
4121   // If this is a fixed vector, we need to convert it to a scalable vector.
4122   MVT ContainerVT = VecVT;
4123   if (VecVT.isFixedLengthVector()) {
4124     ContainerVT = getContainerForFixedLengthVector(VecVT);
4125     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4126   }
4127 
4128   // If the index is 0, the vector is already in the right position.
4129   if (!isNullConstant(Idx)) {
4130     // Use a VL of 1 to avoid processing more elements than we need.
4131     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4132     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4133     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4134     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4135                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4136   }
4137 
4138   if (!EltVT.isInteger()) {
4139     // Floating-point extracts are handled in TableGen.
4140     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4141                        DAG.getConstant(0, DL, XLenVT));
4142   }
4143 
4144   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4145   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4146 }
4147 
4148 // Some RVV intrinsics may claim that they want an integer operand to be
4149 // promoted or expanded.
4150 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4151                                           const RISCVSubtarget &Subtarget) {
4152   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4153           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4154          "Unexpected opcode");
4155 
4156   if (!Subtarget.hasVInstructions())
4157     return SDValue();
4158 
4159   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4160   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4161   SDLoc DL(Op);
4162 
4163   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4164       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4165   if (!II || !II->hasSplatOperand())
4166     return SDValue();
4167 
4168   unsigned SplatOp = II->SplatOperand + 1 + HasChain;
4169   assert(SplatOp < Op.getNumOperands());
4170 
4171   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4172   SDValue &ScalarOp = Operands[SplatOp];
4173   MVT OpVT = ScalarOp.getSimpleValueType();
4174   MVT XLenVT = Subtarget.getXLenVT();
4175 
4176   // If this isn't a scalar, or its type is XLenVT we're done.
4177   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4178     return SDValue();
4179 
4180   // Simplest case is that the operand needs to be promoted to XLenVT.
4181   if (OpVT.bitsLT(XLenVT)) {
4182     // If the operand is a constant, sign extend to increase our chances
4183     // of being able to use a .vi instruction. ANY_EXTEND would become a
4184     // a zero extend and the simm5 check in isel would fail.
4185     // FIXME: Should we ignore the upper bits in isel instead?
4186     unsigned ExtOpc =
4187         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4188     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4189     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4190   }
4191 
4192   // Use the previous operand to get the vXi64 VT. The result might be a mask
4193   // VT for compares. Using the previous operand assumes that the previous
4194   // operand will never have a smaller element size than a scalar operand and
4195   // that a widening operation never uses SEW=64.
4196   // NOTE: If this fails the below assert, we can probably just find the
4197   // element count from any operand or result and use it to construct the VT.
4198   assert(II->SplatOperand > 0 && "Unexpected splat operand!");
4199   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4200 
4201   // The more complex case is when the scalar is larger than XLenVT.
4202   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4203          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4204 
4205   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4206   // on the instruction to sign-extend since SEW>XLEN.
4207   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4208     if (isInt<32>(CVal->getSExtValue())) {
4209       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4210       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4211     }
4212   }
4213 
4214   // We need to convert the scalar to a splat vector.
4215   // FIXME: Can we implicitly truncate the scalar if it is known to
4216   // be sign extended?
4217   SDValue VL = Op.getOperand(II->VLOperand + 1 + HasChain);
4218   assert(VL.getValueType() == XLenVT);
4219   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
4220   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4221 }
4222 
4223 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4224                                                      SelectionDAG &DAG) const {
4225   unsigned IntNo = Op.getConstantOperandVal(0);
4226   SDLoc DL(Op);
4227   MVT XLenVT = Subtarget.getXLenVT();
4228 
4229   switch (IntNo) {
4230   default:
4231     break; // Don't custom lower most intrinsics.
4232   case Intrinsic::thread_pointer: {
4233     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4234     return DAG.getRegister(RISCV::X4, PtrVT);
4235   }
4236   case Intrinsic::riscv_orc_b:
4237     // Lower to the GORCI encoding for orc.b.
4238     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
4239                        DAG.getConstant(7, DL, XLenVT));
4240   case Intrinsic::riscv_grev:
4241   case Intrinsic::riscv_gorc: {
4242     unsigned Opc =
4243         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4244     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4245   }
4246   case Intrinsic::riscv_shfl:
4247   case Intrinsic::riscv_unshfl: {
4248     unsigned Opc =
4249         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4250     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4251   }
4252   case Intrinsic::riscv_bcompress:
4253   case Intrinsic::riscv_bdecompress: {
4254     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4255                                                        : RISCVISD::BDECOMPRESS;
4256     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4257   }
4258   case Intrinsic::riscv_bfp:
4259     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4260                        Op.getOperand(2));
4261   case Intrinsic::riscv_fsl:
4262     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4263                        Op.getOperand(2), Op.getOperand(3));
4264   case Intrinsic::riscv_fsr:
4265     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4266                        Op.getOperand(2), Op.getOperand(3));
4267   case Intrinsic::riscv_vmv_x_s:
4268     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4269     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4270                        Op.getOperand(1));
4271   case Intrinsic::riscv_vmv_v_x:
4272     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4273                             Op.getSimpleValueType(), DL, DAG, Subtarget);
4274   case Intrinsic::riscv_vfmv_v_f:
4275     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4276                        Op.getOperand(1), Op.getOperand(2));
4277   case Intrinsic::riscv_vmv_s_x: {
4278     SDValue Scalar = Op.getOperand(2);
4279 
4280     if (Scalar.getValueType().bitsLE(XLenVT)) {
4281       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4282       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4283                          Op.getOperand(1), Scalar, Op.getOperand(3));
4284     }
4285 
4286     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4287 
4288     // This is an i64 value that lives in two scalar registers. We have to
4289     // insert this in a convoluted way. First we build vXi64 splat containing
4290     // the/ two values that we assemble using some bit math. Next we'll use
4291     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4292     // to merge element 0 from our splat into the source vector.
4293     // FIXME: This is probably not the best way to do this, but it is
4294     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4295     // point.
4296     //   sw lo, (a0)
4297     //   sw hi, 4(a0)
4298     //   vlse vX, (a0)
4299     //
4300     //   vid.v      vVid
4301     //   vmseq.vx   mMask, vVid, 0
4302     //   vmerge.vvm vDest, vSrc, vVal, mMask
4303     MVT VT = Op.getSimpleValueType();
4304     SDValue Vec = Op.getOperand(1);
4305     SDValue VL = Op.getOperand(3);
4306 
4307     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
4308     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4309                                       DAG.getConstant(0, DL, MVT::i32), VL);
4310 
4311     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4312     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4313     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4314     SDValue SelectCond =
4315         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4316                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4317     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4318                        Vec, VL);
4319   }
4320   case Intrinsic::riscv_vslide1up:
4321   case Intrinsic::riscv_vslide1down:
4322   case Intrinsic::riscv_vslide1up_mask:
4323   case Intrinsic::riscv_vslide1down_mask: {
4324     // We need to special case these when the scalar is larger than XLen.
4325     unsigned NumOps = Op.getNumOperands();
4326     bool IsMasked = NumOps == 7;
4327     unsigned OpOffset = IsMasked ? 1 : 0;
4328     SDValue Scalar = Op.getOperand(2 + OpOffset);
4329     if (Scalar.getValueType().bitsLE(XLenVT))
4330       break;
4331 
4332     // Splatting a sign extended constant is fine.
4333     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4334       if (isInt<32>(CVal->getSExtValue()))
4335         break;
4336 
4337     MVT VT = Op.getSimpleValueType();
4338     assert(VT.getVectorElementType() == MVT::i64 &&
4339            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4340 
4341     // Convert the vector source to the equivalent nxvXi32 vector.
4342     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4343     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
4344 
4345     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4346                                    DAG.getConstant(0, DL, XLenVT));
4347     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4348                                    DAG.getConstant(1, DL, XLenVT));
4349 
4350     // Double the VL since we halved SEW.
4351     SDValue VL = Op.getOperand(NumOps - (1 + OpOffset));
4352     SDValue I32VL =
4353         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4354 
4355     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4356     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4357 
4358     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4359     // instructions.
4360     if (IntNo == Intrinsic::riscv_vslide1up ||
4361         IntNo == Intrinsic::riscv_vslide1up_mask) {
4362       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
4363                         I32Mask, I32VL);
4364       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
4365                         I32Mask, I32VL);
4366     } else {
4367       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
4368                         I32Mask, I32VL);
4369       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
4370                         I32Mask, I32VL);
4371     }
4372 
4373     // Convert back to nxvXi64.
4374     Vec = DAG.getBitcast(VT, Vec);
4375 
4376     if (!IsMasked)
4377       return Vec;
4378 
4379     // Apply mask after the operation.
4380     SDValue Mask = Op.getOperand(NumOps - 3);
4381     SDValue MaskedOff = Op.getOperand(1);
4382     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4383   }
4384   }
4385 
4386   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4387 }
4388 
4389 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4390                                                     SelectionDAG &DAG) const {
4391   unsigned IntNo = Op.getConstantOperandVal(1);
4392   switch (IntNo) {
4393   default:
4394     break;
4395   case Intrinsic::riscv_masked_strided_load: {
4396     SDLoc DL(Op);
4397     MVT XLenVT = Subtarget.getXLenVT();
4398 
4399     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4400     // the selection of the masked intrinsics doesn't do this for us.
4401     SDValue Mask = Op.getOperand(5);
4402     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4403 
4404     MVT VT = Op->getSimpleValueType(0);
4405     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4406 
4407     SDValue PassThru = Op.getOperand(2);
4408     if (!IsUnmasked) {
4409       MVT MaskVT =
4410           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4411       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4412       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4413     }
4414 
4415     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4416 
4417     SDValue IntID = DAG.getTargetConstant(
4418         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4419         XLenVT);
4420 
4421     auto *Load = cast<MemIntrinsicSDNode>(Op);
4422     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4423     if (!IsUnmasked)
4424       Ops.push_back(PassThru);
4425     Ops.push_back(Op.getOperand(3)); // Ptr
4426     Ops.push_back(Op.getOperand(4)); // Stride
4427     if (!IsUnmasked)
4428       Ops.push_back(Mask);
4429     Ops.push_back(VL);
4430     if (!IsUnmasked) {
4431       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4432       Ops.push_back(Policy);
4433     }
4434 
4435     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4436     SDValue Result =
4437         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4438                                 Load->getMemoryVT(), Load->getMemOperand());
4439     SDValue Chain = Result.getValue(1);
4440     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4441     return DAG.getMergeValues({Result, Chain}, DL);
4442   }
4443   }
4444 
4445   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4446 }
4447 
4448 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4449                                                  SelectionDAG &DAG) const {
4450   unsigned IntNo = Op.getConstantOperandVal(1);
4451   switch (IntNo) {
4452   default:
4453     break;
4454   case Intrinsic::riscv_masked_strided_store: {
4455     SDLoc DL(Op);
4456     MVT XLenVT = Subtarget.getXLenVT();
4457 
4458     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4459     // the selection of the masked intrinsics doesn't do this for us.
4460     SDValue Mask = Op.getOperand(5);
4461     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4462 
4463     SDValue Val = Op.getOperand(2);
4464     MVT VT = Val.getSimpleValueType();
4465     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4466 
4467     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4468     if (!IsUnmasked) {
4469       MVT MaskVT =
4470           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4471       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4472     }
4473 
4474     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4475 
4476     SDValue IntID = DAG.getTargetConstant(
4477         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4478         XLenVT);
4479 
4480     auto *Store = cast<MemIntrinsicSDNode>(Op);
4481     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4482     Ops.push_back(Val);
4483     Ops.push_back(Op.getOperand(3)); // Ptr
4484     Ops.push_back(Op.getOperand(4)); // Stride
4485     if (!IsUnmasked)
4486       Ops.push_back(Mask);
4487     Ops.push_back(VL);
4488 
4489     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4490                                    Ops, Store->getMemoryVT(),
4491                                    Store->getMemOperand());
4492   }
4493   }
4494 
4495   return SDValue();
4496 }
4497 
4498 static MVT getLMUL1VT(MVT VT) {
4499   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4500          "Unexpected vector MVT");
4501   return MVT::getScalableVectorVT(
4502       VT.getVectorElementType(),
4503       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4504 }
4505 
4506 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4507   switch (ISDOpcode) {
4508   default:
4509     llvm_unreachable("Unhandled reduction");
4510   case ISD::VECREDUCE_ADD:
4511     return RISCVISD::VECREDUCE_ADD_VL;
4512   case ISD::VECREDUCE_UMAX:
4513     return RISCVISD::VECREDUCE_UMAX_VL;
4514   case ISD::VECREDUCE_SMAX:
4515     return RISCVISD::VECREDUCE_SMAX_VL;
4516   case ISD::VECREDUCE_UMIN:
4517     return RISCVISD::VECREDUCE_UMIN_VL;
4518   case ISD::VECREDUCE_SMIN:
4519     return RISCVISD::VECREDUCE_SMIN_VL;
4520   case ISD::VECREDUCE_AND:
4521     return RISCVISD::VECREDUCE_AND_VL;
4522   case ISD::VECREDUCE_OR:
4523     return RISCVISD::VECREDUCE_OR_VL;
4524   case ISD::VECREDUCE_XOR:
4525     return RISCVISD::VECREDUCE_XOR_VL;
4526   }
4527 }
4528 
4529 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4530                                                          SelectionDAG &DAG,
4531                                                          bool IsVP) const {
4532   SDLoc DL(Op);
4533   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4534   MVT VecVT = Vec.getSimpleValueType();
4535   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4536           Op.getOpcode() == ISD::VECREDUCE_OR ||
4537           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4538           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4539           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4540           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4541          "Unexpected reduction lowering");
4542 
4543   MVT XLenVT = Subtarget.getXLenVT();
4544   assert(Op.getValueType() == XLenVT &&
4545          "Expected reduction output to be legalized to XLenVT");
4546 
4547   MVT ContainerVT = VecVT;
4548   if (VecVT.isFixedLengthVector()) {
4549     ContainerVT = getContainerForFixedLengthVector(VecVT);
4550     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4551   }
4552 
4553   SDValue Mask, VL;
4554   if (IsVP) {
4555     Mask = Op.getOperand(2);
4556     VL = Op.getOperand(3);
4557   } else {
4558     std::tie(Mask, VL) =
4559         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4560   }
4561 
4562   unsigned BaseOpc;
4563   ISD::CondCode CC;
4564   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4565 
4566   switch (Op.getOpcode()) {
4567   default:
4568     llvm_unreachable("Unhandled reduction");
4569   case ISD::VECREDUCE_AND:
4570   case ISD::VP_REDUCE_AND: {
4571     // vcpop ~x == 0
4572     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4573     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4574     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4575     CC = ISD::SETEQ;
4576     BaseOpc = ISD::AND;
4577     break;
4578   }
4579   case ISD::VECREDUCE_OR:
4580   case ISD::VP_REDUCE_OR:
4581     // vcpop x != 0
4582     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4583     CC = ISD::SETNE;
4584     BaseOpc = ISD::OR;
4585     break;
4586   case ISD::VECREDUCE_XOR:
4587   case ISD::VP_REDUCE_XOR: {
4588     // ((vcpop x) & 1) != 0
4589     SDValue One = DAG.getConstant(1, DL, XLenVT);
4590     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4591     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4592     CC = ISD::SETNE;
4593     BaseOpc = ISD::XOR;
4594     break;
4595   }
4596   }
4597 
4598   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4599 
4600   if (!IsVP)
4601     return SetCC;
4602 
4603   // Now include the start value in the operation.
4604   // Note that we must return the start value when no elements are operated
4605   // upon. The vcpop instructions we've emitted in each case above will return
4606   // 0 for an inactive vector, and so we've already received the neutral value:
4607   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4608   // can simply include the start value.
4609   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4610 }
4611 
4612 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4613                                             SelectionDAG &DAG) const {
4614   SDLoc DL(Op);
4615   SDValue Vec = Op.getOperand(0);
4616   EVT VecEVT = Vec.getValueType();
4617 
4618   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4619 
4620   // Due to ordering in legalize types we may have a vector type that needs to
4621   // be split. Do that manually so we can get down to a legal type.
4622   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4623          TargetLowering::TypeSplitVector) {
4624     SDValue Lo, Hi;
4625     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4626     VecEVT = Lo.getValueType();
4627     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4628   }
4629 
4630   // TODO: The type may need to be widened rather than split. Or widened before
4631   // it can be split.
4632   if (!isTypeLegal(VecEVT))
4633     return SDValue();
4634 
4635   MVT VecVT = VecEVT.getSimpleVT();
4636   MVT VecEltVT = VecVT.getVectorElementType();
4637   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4638 
4639   MVT ContainerVT = VecVT;
4640   if (VecVT.isFixedLengthVector()) {
4641     ContainerVT = getContainerForFixedLengthVector(VecVT);
4642     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4643   }
4644 
4645   MVT M1VT = getLMUL1VT(ContainerVT);
4646   MVT XLenVT = Subtarget.getXLenVT();
4647 
4648   SDValue Mask, VL;
4649   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4650 
4651   SDValue NeutralElem =
4652       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
4653   SDValue IdentitySplat = lowerScalarSplat(
4654       NeutralElem, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
4655   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
4656                                   IdentitySplat, Mask, VL);
4657   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4658                              DAG.getConstant(0, DL, XLenVT));
4659   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4660 }
4661 
4662 // Given a reduction op, this function returns the matching reduction opcode,
4663 // the vector SDValue and the scalar SDValue required to lower this to a
4664 // RISCVISD node.
4665 static std::tuple<unsigned, SDValue, SDValue>
4666 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
4667   SDLoc DL(Op);
4668   auto Flags = Op->getFlags();
4669   unsigned Opcode = Op.getOpcode();
4670   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
4671   switch (Opcode) {
4672   default:
4673     llvm_unreachable("Unhandled reduction");
4674   case ISD::VECREDUCE_FADD: {
4675     // Use positive zero if we can. It is cheaper to materialize.
4676     SDValue Zero =
4677         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
4678     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
4679   }
4680   case ISD::VECREDUCE_SEQ_FADD:
4681     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
4682                            Op.getOperand(0));
4683   case ISD::VECREDUCE_FMIN:
4684     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
4685                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4686   case ISD::VECREDUCE_FMAX:
4687     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
4688                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4689   }
4690 }
4691 
4692 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
4693                                               SelectionDAG &DAG) const {
4694   SDLoc DL(Op);
4695   MVT VecEltVT = Op.getSimpleValueType();
4696 
4697   unsigned RVVOpcode;
4698   SDValue VectorVal, ScalarVal;
4699   std::tie(RVVOpcode, VectorVal, ScalarVal) =
4700       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
4701   MVT VecVT = VectorVal.getSimpleValueType();
4702 
4703   MVT ContainerVT = VecVT;
4704   if (VecVT.isFixedLengthVector()) {
4705     ContainerVT = getContainerForFixedLengthVector(VecVT);
4706     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4707   }
4708 
4709   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4710   MVT XLenVT = Subtarget.getXLenVT();
4711 
4712   SDValue Mask, VL;
4713   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4714 
4715   SDValue ScalarSplat = lowerScalarSplat(
4716       ScalarVal, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
4717   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
4718                                   VectorVal, ScalarSplat, Mask, VL);
4719   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4720                      DAG.getConstant(0, DL, XLenVT));
4721 }
4722 
4723 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
4724   switch (ISDOpcode) {
4725   default:
4726     llvm_unreachable("Unhandled reduction");
4727   case ISD::VP_REDUCE_ADD:
4728     return RISCVISD::VECREDUCE_ADD_VL;
4729   case ISD::VP_REDUCE_UMAX:
4730     return RISCVISD::VECREDUCE_UMAX_VL;
4731   case ISD::VP_REDUCE_SMAX:
4732     return RISCVISD::VECREDUCE_SMAX_VL;
4733   case ISD::VP_REDUCE_UMIN:
4734     return RISCVISD::VECREDUCE_UMIN_VL;
4735   case ISD::VP_REDUCE_SMIN:
4736     return RISCVISD::VECREDUCE_SMIN_VL;
4737   case ISD::VP_REDUCE_AND:
4738     return RISCVISD::VECREDUCE_AND_VL;
4739   case ISD::VP_REDUCE_OR:
4740     return RISCVISD::VECREDUCE_OR_VL;
4741   case ISD::VP_REDUCE_XOR:
4742     return RISCVISD::VECREDUCE_XOR_VL;
4743   case ISD::VP_REDUCE_FADD:
4744     return RISCVISD::VECREDUCE_FADD_VL;
4745   case ISD::VP_REDUCE_SEQ_FADD:
4746     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
4747   case ISD::VP_REDUCE_FMAX:
4748     return RISCVISD::VECREDUCE_FMAX_VL;
4749   case ISD::VP_REDUCE_FMIN:
4750     return RISCVISD::VECREDUCE_FMIN_VL;
4751   }
4752 }
4753 
4754 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
4755                                            SelectionDAG &DAG) const {
4756   SDLoc DL(Op);
4757   SDValue Vec = Op.getOperand(1);
4758   EVT VecEVT = Vec.getValueType();
4759 
4760   // TODO: The type may need to be widened rather than split. Or widened before
4761   // it can be split.
4762   if (!isTypeLegal(VecEVT))
4763     return SDValue();
4764 
4765   MVT VecVT = VecEVT.getSimpleVT();
4766   MVT VecEltVT = VecVT.getVectorElementType();
4767   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
4768 
4769   MVT ContainerVT = VecVT;
4770   if (VecVT.isFixedLengthVector()) {
4771     ContainerVT = getContainerForFixedLengthVector(VecVT);
4772     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4773   }
4774 
4775   SDValue VL = Op.getOperand(3);
4776   SDValue Mask = Op.getOperand(2);
4777 
4778   MVT M1VT = getLMUL1VT(ContainerVT);
4779   MVT XLenVT = Subtarget.getXLenVT();
4780   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
4781 
4782   SDValue StartSplat =
4783       lowerScalarSplat(Op.getOperand(0), DAG.getConstant(1, DL, XLenVT), M1VT,
4784                        DL, DAG, Subtarget);
4785   SDValue Reduction =
4786       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
4787   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
4788                              DAG.getConstant(0, DL, XLenVT));
4789   if (!VecVT.isInteger())
4790     return Elt0;
4791   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4792 }
4793 
4794 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4795                                                    SelectionDAG &DAG) const {
4796   SDValue Vec = Op.getOperand(0);
4797   SDValue SubVec = Op.getOperand(1);
4798   MVT VecVT = Vec.getSimpleValueType();
4799   MVT SubVecVT = SubVec.getSimpleValueType();
4800 
4801   SDLoc DL(Op);
4802   MVT XLenVT = Subtarget.getXLenVT();
4803   unsigned OrigIdx = Op.getConstantOperandVal(2);
4804   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4805 
4806   // We don't have the ability to slide mask vectors up indexed by their i1
4807   // elements; the smallest we can do is i8. Often we are able to bitcast to
4808   // equivalent i8 vectors. Note that when inserting a fixed-length vector
4809   // into a scalable one, we might not necessarily have enough scalable
4810   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4811   if (SubVecVT.getVectorElementType() == MVT::i1 &&
4812       (OrigIdx != 0 || !Vec.isUndef())) {
4813     if (VecVT.getVectorMinNumElements() >= 8 &&
4814         SubVecVT.getVectorMinNumElements() >= 8) {
4815       assert(OrigIdx % 8 == 0 && "Invalid index");
4816       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4817              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4818              "Unexpected mask vector lowering");
4819       OrigIdx /= 8;
4820       SubVecVT =
4821           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4822                            SubVecVT.isScalableVector());
4823       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4824                                VecVT.isScalableVector());
4825       Vec = DAG.getBitcast(VecVT, Vec);
4826       SubVec = DAG.getBitcast(SubVecVT, SubVec);
4827     } else {
4828       // We can't slide this mask vector up indexed by its i1 elements.
4829       // This poses a problem when we wish to insert a scalable vector which
4830       // can't be re-expressed as a larger type. Just choose the slow path and
4831       // extend to a larger type, then truncate back down.
4832       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4833       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4834       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4835       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4836       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4837                         Op.getOperand(2));
4838       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4839       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4840     }
4841   }
4842 
4843   // If the subvector vector is a fixed-length type, we cannot use subregister
4844   // manipulation to simplify the codegen; we don't know which register of a
4845   // LMUL group contains the specific subvector as we only know the minimum
4846   // register size. Therefore we must slide the vector group up the full
4847   // amount.
4848   if (SubVecVT.isFixedLengthVector()) {
4849     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
4850       return Op;
4851     MVT ContainerVT = VecVT;
4852     if (VecVT.isFixedLengthVector()) {
4853       ContainerVT = getContainerForFixedLengthVector(VecVT);
4854       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4855     }
4856     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4857                          DAG.getUNDEF(ContainerVT), SubVec,
4858                          DAG.getConstant(0, DL, XLenVT));
4859     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
4860       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
4861       return DAG.getBitcast(Op.getValueType(), SubVec);
4862     }
4863     SDValue Mask =
4864         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4865     // Set the vector length to only the number of elements we care about. Note
4866     // that for slideup this includes the offset.
4867     SDValue VL =
4868         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4869     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4870     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4871                                   SubVec, SlideupAmt, Mask, VL);
4872     if (VecVT.isFixedLengthVector())
4873       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4874     return DAG.getBitcast(Op.getValueType(), Slideup);
4875   }
4876 
4877   unsigned SubRegIdx, RemIdx;
4878   std::tie(SubRegIdx, RemIdx) =
4879       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4880           VecVT, SubVecVT, OrigIdx, TRI);
4881 
4882   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4883   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4884                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4885                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4886 
4887   // 1. If the Idx has been completely eliminated and this subvector's size is
4888   // a vector register or a multiple thereof, or the surrounding elements are
4889   // undef, then this is a subvector insert which naturally aligns to a vector
4890   // register. These can easily be handled using subregister manipulation.
4891   // 2. If the subvector is smaller than a vector register, then the insertion
4892   // must preserve the undisturbed elements of the register. We do this by
4893   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4894   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4895   // subvector within the vector register, and an INSERT_SUBVECTOR of that
4896   // LMUL=1 type back into the larger vector (resolving to another subregister
4897   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4898   // to avoid allocating a large register group to hold our subvector.
4899   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4900     return Op;
4901 
4902   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4903   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4904   // (in our case undisturbed). This means we can set up a subvector insertion
4905   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4906   // size of the subvector.
4907   MVT InterSubVT = VecVT;
4908   SDValue AlignedExtract = Vec;
4909   unsigned AlignedIdx = OrigIdx - RemIdx;
4910   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4911     InterSubVT = getLMUL1VT(VecVT);
4912     // Extract a subvector equal to the nearest full vector register type. This
4913     // should resolve to a EXTRACT_SUBREG instruction.
4914     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4915                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4916   }
4917 
4918   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4919   // For scalable vectors this must be further multiplied by vscale.
4920   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4921 
4922   SDValue Mask, VL;
4923   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4924 
4925   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4926   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4927   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4928   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4929 
4930   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4931                        DAG.getUNDEF(InterSubVT), SubVec,
4932                        DAG.getConstant(0, DL, XLenVT));
4933 
4934   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4935                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4936 
4937   // If required, insert this subvector back into the correct vector register.
4938   // This should resolve to an INSERT_SUBREG instruction.
4939   if (VecVT.bitsGT(InterSubVT))
4940     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4941                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4942 
4943   // We might have bitcast from a mask type: cast back to the original type if
4944   // required.
4945   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4946 }
4947 
4948 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4949                                                     SelectionDAG &DAG) const {
4950   SDValue Vec = Op.getOperand(0);
4951   MVT SubVecVT = Op.getSimpleValueType();
4952   MVT VecVT = Vec.getSimpleValueType();
4953 
4954   SDLoc DL(Op);
4955   MVT XLenVT = Subtarget.getXLenVT();
4956   unsigned OrigIdx = Op.getConstantOperandVal(1);
4957   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4958 
4959   // We don't have the ability to slide mask vectors down indexed by their i1
4960   // elements; the smallest we can do is i8. Often we are able to bitcast to
4961   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4962   // from a scalable one, we might not necessarily have enough scalable
4963   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4964   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4965     if (VecVT.getVectorMinNumElements() >= 8 &&
4966         SubVecVT.getVectorMinNumElements() >= 8) {
4967       assert(OrigIdx % 8 == 0 && "Invalid index");
4968       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4969              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4970              "Unexpected mask vector lowering");
4971       OrigIdx /= 8;
4972       SubVecVT =
4973           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4974                            SubVecVT.isScalableVector());
4975       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4976                                VecVT.isScalableVector());
4977       Vec = DAG.getBitcast(VecVT, Vec);
4978     } else {
4979       // We can't slide this mask vector down, indexed by its i1 elements.
4980       // This poses a problem when we wish to extract a scalable vector which
4981       // can't be re-expressed as a larger type. Just choose the slow path and
4982       // extend to a larger type, then truncate back down.
4983       // TODO: We could probably improve this when extracting certain fixed
4984       // from fixed, where we can extract as i8 and shift the correct element
4985       // right to reach the desired subvector?
4986       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4987       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4988       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4989       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4990                         Op.getOperand(1));
4991       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4992       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4993     }
4994   }
4995 
4996   // If the subvector vector is a fixed-length type, we cannot use subregister
4997   // manipulation to simplify the codegen; we don't know which register of a
4998   // LMUL group contains the specific subvector as we only know the minimum
4999   // register size. Therefore we must slide the vector group down the full
5000   // amount.
5001   if (SubVecVT.isFixedLengthVector()) {
5002     // With an index of 0 this is a cast-like subvector, which can be performed
5003     // with subregister operations.
5004     if (OrigIdx == 0)
5005       return Op;
5006     MVT ContainerVT = VecVT;
5007     if (VecVT.isFixedLengthVector()) {
5008       ContainerVT = getContainerForFixedLengthVector(VecVT);
5009       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5010     }
5011     SDValue Mask =
5012         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5013     // Set the vector length to only the number of elements we care about. This
5014     // avoids sliding down elements we're going to discard straight away.
5015     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5016     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5017     SDValue Slidedown =
5018         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5019                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5020     // Now we can use a cast-like subvector extract to get the result.
5021     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5022                             DAG.getConstant(0, DL, XLenVT));
5023     return DAG.getBitcast(Op.getValueType(), Slidedown);
5024   }
5025 
5026   unsigned SubRegIdx, RemIdx;
5027   std::tie(SubRegIdx, RemIdx) =
5028       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5029           VecVT, SubVecVT, OrigIdx, TRI);
5030 
5031   // If the Idx has been completely eliminated then this is a subvector extract
5032   // which naturally aligns to a vector register. These can easily be handled
5033   // using subregister manipulation.
5034   if (RemIdx == 0)
5035     return Op;
5036 
5037   // Else we must shift our vector register directly to extract the subvector.
5038   // Do this using VSLIDEDOWN.
5039 
5040   // If the vector type is an LMUL-group type, extract a subvector equal to the
5041   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5042   // instruction.
5043   MVT InterSubVT = VecVT;
5044   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5045     InterSubVT = getLMUL1VT(VecVT);
5046     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5047                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5048   }
5049 
5050   // Slide this vector register down by the desired number of elements in order
5051   // to place the desired subvector starting at element 0.
5052   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5053   // For scalable vectors this must be further multiplied by vscale.
5054   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5055 
5056   SDValue Mask, VL;
5057   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5058   SDValue Slidedown =
5059       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5060                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5061 
5062   // Now the vector is in the right position, extract our final subvector. This
5063   // should resolve to a COPY.
5064   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5065                           DAG.getConstant(0, DL, XLenVT));
5066 
5067   // We might have bitcast from a mask type: cast back to the original type if
5068   // required.
5069   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5070 }
5071 
5072 // Lower step_vector to the vid instruction. Any non-identity step value must
5073 // be accounted for my manual expansion.
5074 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5075                                               SelectionDAG &DAG) const {
5076   SDLoc DL(Op);
5077   MVT VT = Op.getSimpleValueType();
5078   MVT XLenVT = Subtarget.getXLenVT();
5079   SDValue Mask, VL;
5080   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5081   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5082   uint64_t StepValImm = Op.getConstantOperandVal(0);
5083   if (StepValImm != 1) {
5084     if (isPowerOf2_64(StepValImm)) {
5085       SDValue StepVal =
5086           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
5087                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5088       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5089     } else {
5090       SDValue StepVal = lowerScalarSplat(
5091           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
5092           DL, DAG, Subtarget);
5093       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5094     }
5095   }
5096   return StepVec;
5097 }
5098 
5099 // Implement vector_reverse using vrgather.vv with indices determined by
5100 // subtracting the id of each element from (VLMAX-1). This will convert
5101 // the indices like so:
5102 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5103 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5104 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5105                                                  SelectionDAG &DAG) const {
5106   SDLoc DL(Op);
5107   MVT VecVT = Op.getSimpleValueType();
5108   unsigned EltSize = VecVT.getScalarSizeInBits();
5109   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5110 
5111   unsigned MaxVLMAX = 0;
5112   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5113   if (VectorBitsMax != 0)
5114     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5115 
5116   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5117   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5118 
5119   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5120   // to use vrgatherei16.vv.
5121   // TODO: It's also possible to use vrgatherei16.vv for other types to
5122   // decrease register width for the index calculation.
5123   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5124     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5125     // Reverse each half, then reassemble them in reverse order.
5126     // NOTE: It's also possible that after splitting that VLMAX no longer
5127     // requires vrgatherei16.vv.
5128     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5129       SDValue Lo, Hi;
5130       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5131       EVT LoVT, HiVT;
5132       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5133       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5134       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5135       // Reassemble the low and high pieces reversed.
5136       // FIXME: This is a CONCAT_VECTORS.
5137       SDValue Res =
5138           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5139                       DAG.getIntPtrConstant(0, DL));
5140       return DAG.getNode(
5141           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5142           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5143     }
5144 
5145     // Just promote the int type to i16 which will double the LMUL.
5146     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5147     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5148   }
5149 
5150   MVT XLenVT = Subtarget.getXLenVT();
5151   SDValue Mask, VL;
5152   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5153 
5154   // Calculate VLMAX-1 for the desired SEW.
5155   unsigned MinElts = VecVT.getVectorMinNumElements();
5156   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5157                               DAG.getConstant(MinElts, DL, XLenVT));
5158   SDValue VLMinus1 =
5159       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5160 
5161   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5162   bool IsRV32E64 =
5163       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5164   SDValue SplatVL;
5165   if (!IsRV32E64)
5166     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5167   else
5168     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
5169 
5170   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5171   SDValue Indices =
5172       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5173 
5174   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5175 }
5176 
5177 SDValue
5178 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5179                                                      SelectionDAG &DAG) const {
5180   SDLoc DL(Op);
5181   auto *Load = cast<LoadSDNode>(Op);
5182 
5183   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5184                                         Load->getMemoryVT(),
5185                                         *Load->getMemOperand()) &&
5186          "Expecting a correctly-aligned load");
5187 
5188   MVT VT = Op.getSimpleValueType();
5189   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5190 
5191   SDValue VL =
5192       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5193 
5194   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5195   SDValue NewLoad = DAG.getMemIntrinsicNode(
5196       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5197       Load->getMemoryVT(), Load->getMemOperand());
5198 
5199   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5200   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5201 }
5202 
5203 SDValue
5204 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5205                                                       SelectionDAG &DAG) const {
5206   SDLoc DL(Op);
5207   auto *Store = cast<StoreSDNode>(Op);
5208 
5209   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5210                                         Store->getMemoryVT(),
5211                                         *Store->getMemOperand()) &&
5212          "Expecting a correctly-aligned store");
5213 
5214   SDValue StoreVal = Store->getValue();
5215   MVT VT = StoreVal.getSimpleValueType();
5216 
5217   // If the size less than a byte, we need to pad with zeros to make a byte.
5218   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5219     VT = MVT::v8i1;
5220     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5221                            DAG.getConstant(0, DL, VT), StoreVal,
5222                            DAG.getIntPtrConstant(0, DL));
5223   }
5224 
5225   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5226 
5227   SDValue VL =
5228       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5229 
5230   SDValue NewValue =
5231       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5232   return DAG.getMemIntrinsicNode(
5233       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5234       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5235       Store->getMemoryVT(), Store->getMemOperand());
5236 }
5237 
5238 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5239                                              SelectionDAG &DAG) const {
5240   SDLoc DL(Op);
5241   MVT VT = Op.getSimpleValueType();
5242 
5243   const auto *MemSD = cast<MemSDNode>(Op);
5244   EVT MemVT = MemSD->getMemoryVT();
5245   MachineMemOperand *MMO = MemSD->getMemOperand();
5246   SDValue Chain = MemSD->getChain();
5247   SDValue BasePtr = MemSD->getBasePtr();
5248 
5249   SDValue Mask, PassThru, VL;
5250   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5251     Mask = VPLoad->getMask();
5252     PassThru = DAG.getUNDEF(VT);
5253     VL = VPLoad->getVectorLength();
5254   } else {
5255     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5256     Mask = MLoad->getMask();
5257     PassThru = MLoad->getPassThru();
5258   }
5259 
5260   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5261 
5262   MVT XLenVT = Subtarget.getXLenVT();
5263 
5264   MVT ContainerVT = VT;
5265   if (VT.isFixedLengthVector()) {
5266     ContainerVT = getContainerForFixedLengthVector(VT);
5267     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5268     if (!IsUnmasked) {
5269       MVT MaskVT =
5270           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5271       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5272     }
5273   }
5274 
5275   if (!VL)
5276     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5277 
5278   unsigned IntID =
5279       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5280   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5281   if (!IsUnmasked)
5282     Ops.push_back(PassThru);
5283   Ops.push_back(BasePtr);
5284   if (!IsUnmasked)
5285     Ops.push_back(Mask);
5286   Ops.push_back(VL);
5287   if (!IsUnmasked)
5288     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5289 
5290   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5291 
5292   SDValue Result =
5293       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5294   Chain = Result.getValue(1);
5295 
5296   if (VT.isFixedLengthVector())
5297     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5298 
5299   return DAG.getMergeValues({Result, Chain}, DL);
5300 }
5301 
5302 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5303                                               SelectionDAG &DAG) const {
5304   SDLoc DL(Op);
5305 
5306   const auto *MemSD = cast<MemSDNode>(Op);
5307   EVT MemVT = MemSD->getMemoryVT();
5308   MachineMemOperand *MMO = MemSD->getMemOperand();
5309   SDValue Chain = MemSD->getChain();
5310   SDValue BasePtr = MemSD->getBasePtr();
5311   SDValue Val, Mask, VL;
5312 
5313   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5314     Val = VPStore->getValue();
5315     Mask = VPStore->getMask();
5316     VL = VPStore->getVectorLength();
5317   } else {
5318     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5319     Val = MStore->getValue();
5320     Mask = MStore->getMask();
5321   }
5322 
5323   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5324 
5325   MVT VT = Val.getSimpleValueType();
5326   MVT XLenVT = Subtarget.getXLenVT();
5327 
5328   MVT ContainerVT = VT;
5329   if (VT.isFixedLengthVector()) {
5330     ContainerVT = getContainerForFixedLengthVector(VT);
5331 
5332     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5333     if (!IsUnmasked) {
5334       MVT MaskVT =
5335           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5336       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5337     }
5338   }
5339 
5340   if (!VL)
5341     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5342 
5343   unsigned IntID =
5344       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5345   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5346   Ops.push_back(Val);
5347   Ops.push_back(BasePtr);
5348   if (!IsUnmasked)
5349     Ops.push_back(Mask);
5350   Ops.push_back(VL);
5351 
5352   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5353                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5354 }
5355 
5356 SDValue
5357 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5358                                                       SelectionDAG &DAG) const {
5359   MVT InVT = Op.getOperand(0).getSimpleValueType();
5360   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5361 
5362   MVT VT = Op.getSimpleValueType();
5363 
5364   SDValue Op1 =
5365       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5366   SDValue Op2 =
5367       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5368 
5369   SDLoc DL(Op);
5370   SDValue VL =
5371       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5372 
5373   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5374   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5375 
5376   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5377                             Op.getOperand(2), Mask, VL);
5378 
5379   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5380 }
5381 
5382 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5383     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5384   MVT VT = Op.getSimpleValueType();
5385 
5386   if (VT.getVectorElementType() == MVT::i1)
5387     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5388 
5389   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5390 }
5391 
5392 SDValue
5393 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5394                                                       SelectionDAG &DAG) const {
5395   unsigned Opc;
5396   switch (Op.getOpcode()) {
5397   default: llvm_unreachable("Unexpected opcode!");
5398   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5399   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5400   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5401   }
5402 
5403   return lowerToScalableOp(Op, DAG, Opc);
5404 }
5405 
5406 // Lower vector ABS to smax(X, sub(0, X)).
5407 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5408   SDLoc DL(Op);
5409   MVT VT = Op.getSimpleValueType();
5410   SDValue X = Op.getOperand(0);
5411 
5412   assert(VT.isFixedLengthVector() && "Unexpected type");
5413 
5414   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5415   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5416 
5417   SDValue Mask, VL;
5418   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5419 
5420   SDValue SplatZero =
5421       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5422                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5423   SDValue NegX =
5424       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5425   SDValue Max =
5426       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5427 
5428   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5429 }
5430 
5431 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5432     SDValue Op, SelectionDAG &DAG) const {
5433   SDLoc DL(Op);
5434   MVT VT = Op.getSimpleValueType();
5435   SDValue Mag = Op.getOperand(0);
5436   SDValue Sign = Op.getOperand(1);
5437   assert(Mag.getValueType() == Sign.getValueType() &&
5438          "Can only handle COPYSIGN with matching types.");
5439 
5440   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5441   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5442   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5443 
5444   SDValue Mask, VL;
5445   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5446 
5447   SDValue CopySign =
5448       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5449 
5450   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5451 }
5452 
5453 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5454     SDValue Op, SelectionDAG &DAG) const {
5455   MVT VT = Op.getSimpleValueType();
5456   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5457 
5458   MVT I1ContainerVT =
5459       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5460 
5461   SDValue CC =
5462       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5463   SDValue Op1 =
5464       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5465   SDValue Op2 =
5466       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5467 
5468   SDLoc DL(Op);
5469   SDValue Mask, VL;
5470   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5471 
5472   SDValue Select =
5473       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5474 
5475   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5476 }
5477 
5478 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5479                                                unsigned NewOpc,
5480                                                bool HasMask) const {
5481   MVT VT = Op.getSimpleValueType();
5482   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5483 
5484   // Create list of operands by converting existing ones to scalable types.
5485   SmallVector<SDValue, 6> Ops;
5486   for (const SDValue &V : Op->op_values()) {
5487     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5488 
5489     // Pass through non-vector operands.
5490     if (!V.getValueType().isVector()) {
5491       Ops.push_back(V);
5492       continue;
5493     }
5494 
5495     // "cast" fixed length vector to a scalable vector.
5496     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5497            "Only fixed length vectors are supported!");
5498     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5499   }
5500 
5501   SDLoc DL(Op);
5502   SDValue Mask, VL;
5503   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5504   if (HasMask)
5505     Ops.push_back(Mask);
5506   Ops.push_back(VL);
5507 
5508   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5509   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5510 }
5511 
5512 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5513 // * Operands of each node are assumed to be in the same order.
5514 // * The EVL operand is promoted from i32 to i64 on RV64.
5515 // * Fixed-length vectors are converted to their scalable-vector container
5516 //   types.
5517 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5518                                        unsigned RISCVISDOpc) const {
5519   SDLoc DL(Op);
5520   MVT VT = Op.getSimpleValueType();
5521   SmallVector<SDValue, 4> Ops;
5522 
5523   for (const auto &OpIdx : enumerate(Op->ops())) {
5524     SDValue V = OpIdx.value();
5525     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5526     // Pass through operands which aren't fixed-length vectors.
5527     if (!V.getValueType().isFixedLengthVector()) {
5528       Ops.push_back(V);
5529       continue;
5530     }
5531     // "cast" fixed length vector to a scalable vector.
5532     MVT OpVT = V.getSimpleValueType();
5533     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5534     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5535            "Only fixed length vectors are supported!");
5536     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5537   }
5538 
5539   if (!VT.isFixedLengthVector())
5540     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5541 
5542   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5543 
5544   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5545 
5546   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5547 }
5548 
5549 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
5550                                             unsigned MaskOpc,
5551                                             unsigned VecOpc) const {
5552   MVT VT = Op.getSimpleValueType();
5553   if (VT.getVectorElementType() != MVT::i1)
5554     return lowerVPOp(Op, DAG, VecOpc);
5555 
5556   // It is safe to drop mask parameter as masked-off elements are undef.
5557   SDValue Op1 = Op->getOperand(0);
5558   SDValue Op2 = Op->getOperand(1);
5559   SDValue VL = Op->getOperand(3);
5560 
5561   MVT ContainerVT = VT;
5562   const bool IsFixed = VT.isFixedLengthVector();
5563   if (IsFixed) {
5564     ContainerVT = getContainerForFixedLengthVector(VT);
5565     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
5566     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
5567   }
5568 
5569   SDLoc DL(Op);
5570   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
5571   if (!IsFixed)
5572     return Val;
5573   return convertFromScalableVector(VT, Val, DAG, Subtarget);
5574 }
5575 
5576 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5577 // matched to a RVV indexed load. The RVV indexed load instructions only
5578 // support the "unsigned unscaled" addressing mode; indices are implicitly
5579 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5580 // signed or scaled indexing is extended to the XLEN value type and scaled
5581 // accordingly.
5582 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5583                                                SelectionDAG &DAG) const {
5584   SDLoc DL(Op);
5585   MVT VT = Op.getSimpleValueType();
5586 
5587   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5588   EVT MemVT = MemSD->getMemoryVT();
5589   MachineMemOperand *MMO = MemSD->getMemOperand();
5590   SDValue Chain = MemSD->getChain();
5591   SDValue BasePtr = MemSD->getBasePtr();
5592 
5593   ISD::LoadExtType LoadExtType;
5594   SDValue Index, Mask, PassThru, VL;
5595 
5596   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5597     Index = VPGN->getIndex();
5598     Mask = VPGN->getMask();
5599     PassThru = DAG.getUNDEF(VT);
5600     VL = VPGN->getVectorLength();
5601     // VP doesn't support extending loads.
5602     LoadExtType = ISD::NON_EXTLOAD;
5603   } else {
5604     // Else it must be a MGATHER.
5605     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5606     Index = MGN->getIndex();
5607     Mask = MGN->getMask();
5608     PassThru = MGN->getPassThru();
5609     LoadExtType = MGN->getExtensionType();
5610   }
5611 
5612   MVT IndexVT = Index.getSimpleValueType();
5613   MVT XLenVT = Subtarget.getXLenVT();
5614 
5615   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5616          "Unexpected VTs!");
5617   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5618   // Targets have to explicitly opt-in for extending vector loads.
5619   assert(LoadExtType == ISD::NON_EXTLOAD &&
5620          "Unexpected extending MGATHER/VP_GATHER");
5621   (void)LoadExtType;
5622 
5623   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5624   // the selection of the masked intrinsics doesn't do this for us.
5625   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5626 
5627   MVT ContainerVT = VT;
5628   if (VT.isFixedLengthVector()) {
5629     // We need to use the larger of the result and index type to determine the
5630     // scalable type to use so we don't increase LMUL for any operand/result.
5631     if (VT.bitsGE(IndexVT)) {
5632       ContainerVT = getContainerForFixedLengthVector(VT);
5633       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5634                                  ContainerVT.getVectorElementCount());
5635     } else {
5636       IndexVT = getContainerForFixedLengthVector(IndexVT);
5637       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
5638                                      IndexVT.getVectorElementCount());
5639     }
5640 
5641     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5642 
5643     if (!IsUnmasked) {
5644       MVT MaskVT =
5645           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5646       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5647       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5648     }
5649   }
5650 
5651   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
5652       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5653       Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
5654   }
5655 
5656   if (!VL)
5657     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5658 
5659   unsigned IntID =
5660       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
5661   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5662   if (!IsUnmasked)
5663     Ops.push_back(PassThru);
5664   Ops.push_back(BasePtr);
5665   Ops.push_back(Index);
5666   if (!IsUnmasked)
5667     Ops.push_back(Mask);
5668   Ops.push_back(VL);
5669   if (!IsUnmasked)
5670     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5671 
5672   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5673   SDValue Result =
5674       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5675   Chain = Result.getValue(1);
5676 
5677   if (VT.isFixedLengthVector())
5678     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5679 
5680   return DAG.getMergeValues({Result, Chain}, DL);
5681 }
5682 
5683 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
5684 // matched to a RVV indexed store. The RVV indexed store instructions only
5685 // support the "unsigned unscaled" addressing mode; indices are implicitly
5686 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5687 // signed or scaled indexing is extended to the XLEN value type and scaled
5688 // accordingly.
5689 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
5690                                                 SelectionDAG &DAG) const {
5691   SDLoc DL(Op);
5692   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5693   EVT MemVT = MemSD->getMemoryVT();
5694   MachineMemOperand *MMO = MemSD->getMemOperand();
5695   SDValue Chain = MemSD->getChain();
5696   SDValue BasePtr = MemSD->getBasePtr();
5697 
5698   bool IsTruncatingStore = false;
5699   SDValue Index, Mask, Val, VL;
5700 
5701   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
5702     Index = VPSN->getIndex();
5703     Mask = VPSN->getMask();
5704     Val = VPSN->getValue();
5705     VL = VPSN->getVectorLength();
5706     // VP doesn't support truncating stores.
5707     IsTruncatingStore = false;
5708   } else {
5709     // Else it must be a MSCATTER.
5710     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
5711     Index = MSN->getIndex();
5712     Mask = MSN->getMask();
5713     Val = MSN->getValue();
5714     IsTruncatingStore = MSN->isTruncatingStore();
5715   }
5716 
5717   MVT VT = Val.getSimpleValueType();
5718   MVT IndexVT = Index.getSimpleValueType();
5719   MVT XLenVT = Subtarget.getXLenVT();
5720 
5721   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5722          "Unexpected VTs!");
5723   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5724   // Targets have to explicitly opt-in for extending vector loads and
5725   // truncating vector stores.
5726   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
5727   (void)IsTruncatingStore;
5728 
5729   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5730   // the selection of the masked intrinsics doesn't do this for us.
5731   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5732 
5733   MVT ContainerVT = VT;
5734   if (VT.isFixedLengthVector()) {
5735     // We need to use the larger of the value and index type to determine the
5736     // scalable type to use so we don't increase LMUL for any operand/result.
5737     if (VT.bitsGE(IndexVT)) {
5738       ContainerVT = getContainerForFixedLengthVector(VT);
5739       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5740                                  ContainerVT.getVectorElementCount());
5741     } else {
5742       IndexVT = getContainerForFixedLengthVector(IndexVT);
5743       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
5744                                      IndexVT.getVectorElementCount());
5745     }
5746 
5747     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5748     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5749 
5750     if (!IsUnmasked) {
5751       MVT MaskVT =
5752           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5753       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5754     }
5755   }
5756 
5757   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
5758       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5759       Index = DAG.getNode(ISD::TRUNCATE, DL, IndexVT, Index);
5760   }
5761 
5762   if (!VL)
5763     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5764 
5765   unsigned IntID =
5766       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
5767   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5768   Ops.push_back(Val);
5769   Ops.push_back(BasePtr);
5770   Ops.push_back(Index);
5771   if (!IsUnmasked)
5772     Ops.push_back(Mask);
5773   Ops.push_back(VL);
5774 
5775   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5776                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5777 }
5778 
5779 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
5780                                                SelectionDAG &DAG) const {
5781   const MVT XLenVT = Subtarget.getXLenVT();
5782   SDLoc DL(Op);
5783   SDValue Chain = Op->getOperand(0);
5784   SDValue SysRegNo = DAG.getTargetConstant(
5785       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5786   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
5787   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
5788 
5789   // Encoding used for rounding mode in RISCV differs from that used in
5790   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
5791   // table, which consists of a sequence of 4-bit fields, each representing
5792   // corresponding FLT_ROUNDS mode.
5793   static const int Table =
5794       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
5795       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
5796       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
5797       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
5798       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
5799 
5800   SDValue Shift =
5801       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
5802   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5803                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5804   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5805                                DAG.getConstant(7, DL, XLenVT));
5806 
5807   return DAG.getMergeValues({Masked, Chain}, DL);
5808 }
5809 
5810 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
5811                                                SelectionDAG &DAG) const {
5812   const MVT XLenVT = Subtarget.getXLenVT();
5813   SDLoc DL(Op);
5814   SDValue Chain = Op->getOperand(0);
5815   SDValue RMValue = Op->getOperand(1);
5816   SDValue SysRegNo = DAG.getTargetConstant(
5817       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5818 
5819   // Encoding used for rounding mode in RISCV differs from that used in
5820   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
5821   // a table, which consists of a sequence of 4-bit fields, each representing
5822   // corresponding RISCV mode.
5823   static const unsigned Table =
5824       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
5825       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
5826       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
5827       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
5828       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
5829 
5830   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
5831                               DAG.getConstant(2, DL, XLenVT));
5832   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5833                                 DAG.getConstant(Table, DL, XLenVT), Shift);
5834   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5835                         DAG.getConstant(0x7, DL, XLenVT));
5836   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
5837                      RMValue);
5838 }
5839 
5840 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
5841   switch (IntNo) {
5842   default:
5843     llvm_unreachable("Unexpected Intrinsic");
5844   case Intrinsic::riscv_grev:
5845     return RISCVISD::GREVW;
5846   case Intrinsic::riscv_gorc:
5847     return RISCVISD::GORCW;
5848   case Intrinsic::riscv_bcompress:
5849     return RISCVISD::BCOMPRESSW;
5850   case Intrinsic::riscv_bdecompress:
5851     return RISCVISD::BDECOMPRESSW;
5852   case Intrinsic::riscv_bfp:
5853     return RISCVISD::BFPW;
5854   case Intrinsic::riscv_fsl:
5855     return RISCVISD::FSLW;
5856   case Intrinsic::riscv_fsr:
5857     return RISCVISD::FSRW;
5858   }
5859 }
5860 
5861 // Converts the given intrinsic to a i64 operation with any extension.
5862 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
5863                                          unsigned IntNo) {
5864   SDLoc DL(N);
5865   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
5866   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5867   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5868   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
5869   // ReplaceNodeResults requires we maintain the same type for the return value.
5870   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5871 }
5872 
5873 // Returns the opcode of the target-specific SDNode that implements the 32-bit
5874 // form of the given Opcode.
5875 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
5876   switch (Opcode) {
5877   default:
5878     llvm_unreachable("Unexpected opcode");
5879   case ISD::SHL:
5880     return RISCVISD::SLLW;
5881   case ISD::SRA:
5882     return RISCVISD::SRAW;
5883   case ISD::SRL:
5884     return RISCVISD::SRLW;
5885   case ISD::SDIV:
5886     return RISCVISD::DIVW;
5887   case ISD::UDIV:
5888     return RISCVISD::DIVUW;
5889   case ISD::UREM:
5890     return RISCVISD::REMUW;
5891   case ISD::ROTL:
5892     return RISCVISD::ROLW;
5893   case ISD::ROTR:
5894     return RISCVISD::RORW;
5895   case RISCVISD::GREV:
5896     return RISCVISD::GREVW;
5897   case RISCVISD::GORC:
5898     return RISCVISD::GORCW;
5899   }
5900 }
5901 
5902 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
5903 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
5904 // otherwise be promoted to i64, making it difficult to select the
5905 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
5906 // type i8/i16/i32 is lost.
5907 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
5908                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
5909   SDLoc DL(N);
5910   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5911   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
5912   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
5913   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5914   // ReplaceNodeResults requires we maintain the same type for the return value.
5915   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5916 }
5917 
5918 // Converts the given 32-bit operation to a i64 operation with signed extension
5919 // semantic to reduce the signed extension instructions.
5920 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
5921   SDLoc DL(N);
5922   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5923   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5924   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
5925   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5926                                DAG.getValueType(MVT::i32));
5927   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
5928 }
5929 
5930 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
5931                                              SmallVectorImpl<SDValue> &Results,
5932                                              SelectionDAG &DAG) const {
5933   SDLoc DL(N);
5934   switch (N->getOpcode()) {
5935   default:
5936     llvm_unreachable("Don't know how to custom type legalize this operation!");
5937   case ISD::STRICT_FP_TO_SINT:
5938   case ISD::STRICT_FP_TO_UINT:
5939   case ISD::FP_TO_SINT:
5940   case ISD::FP_TO_UINT: {
5941     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5942            "Unexpected custom legalisation");
5943     bool IsStrict = N->isStrictFPOpcode();
5944     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
5945                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
5946     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
5947     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
5948         TargetLowering::TypeSoftenFloat) {
5949       if (!isTypeLegal(Op0.getValueType()))
5950         return;
5951       if (IsStrict) {
5952         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
5953                                 : RISCVISD::STRICT_FCVT_WU_RV64;
5954         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
5955         SDValue Res = DAG.getNode(
5956             Opc, DL, VTs, N->getOperand(0), Op0,
5957             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
5958         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5959         Results.push_back(Res.getValue(1));
5960         return;
5961       }
5962       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
5963       SDValue Res =
5964           DAG.getNode(Opc, DL, MVT::i64, Op0,
5965                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
5966       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5967       return;
5968     }
5969     // If the FP type needs to be softened, emit a library call using the 'si'
5970     // version. If we left it to default legalization we'd end up with 'di'. If
5971     // the FP type doesn't need to be softened just let generic type
5972     // legalization promote the result type.
5973     RTLIB::Libcall LC;
5974     if (IsSigned)
5975       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
5976     else
5977       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
5978     MakeLibCallOptions CallOptions;
5979     EVT OpVT = Op0.getValueType();
5980     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5981     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5982     SDValue Result;
5983     std::tie(Result, Chain) =
5984         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5985     Results.push_back(Result);
5986     if (IsStrict)
5987       Results.push_back(Chain);
5988     break;
5989   }
5990   case ISD::READCYCLECOUNTER: {
5991     assert(!Subtarget.is64Bit() &&
5992            "READCYCLECOUNTER only has custom type legalization on riscv32");
5993 
5994     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5995     SDValue RCW =
5996         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5997 
5998     Results.push_back(
5999         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6000     Results.push_back(RCW.getValue(2));
6001     break;
6002   }
6003   case ISD::MUL: {
6004     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6005     unsigned XLen = Subtarget.getXLen();
6006     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6007     if (Size > XLen) {
6008       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6009       SDValue LHS = N->getOperand(0);
6010       SDValue RHS = N->getOperand(1);
6011       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6012 
6013       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6014       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6015       // We need exactly one side to be unsigned.
6016       if (LHSIsU == RHSIsU)
6017         return;
6018 
6019       auto MakeMULPair = [&](SDValue S, SDValue U) {
6020         MVT XLenVT = Subtarget.getXLenVT();
6021         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6022         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6023         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6024         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6025         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6026       };
6027 
6028       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6029       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6030 
6031       // The other operand should be signed, but still prefer MULH when
6032       // possible.
6033       if (RHSIsU && LHSIsS && !RHSIsS)
6034         Results.push_back(MakeMULPair(LHS, RHS));
6035       else if (LHSIsU && RHSIsS && !LHSIsS)
6036         Results.push_back(MakeMULPair(RHS, LHS));
6037 
6038       return;
6039     }
6040     LLVM_FALLTHROUGH;
6041   }
6042   case ISD::ADD:
6043   case ISD::SUB:
6044     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6045            "Unexpected custom legalisation");
6046     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6047     break;
6048   case ISD::SHL:
6049   case ISD::SRA:
6050   case ISD::SRL:
6051     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6052            "Unexpected custom legalisation");
6053     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6054       Results.push_back(customLegalizeToWOp(N, DAG));
6055       break;
6056     }
6057 
6058     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6059     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6060     // shift amount.
6061     if (N->getOpcode() == ISD::SHL) {
6062       SDLoc DL(N);
6063       SDValue NewOp0 =
6064           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6065       SDValue NewOp1 =
6066           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6067       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6068       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6069                                    DAG.getValueType(MVT::i32));
6070       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6071     }
6072 
6073     break;
6074   case ISD::ROTL:
6075   case ISD::ROTR:
6076     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6077            "Unexpected custom legalisation");
6078     Results.push_back(customLegalizeToWOp(N, DAG));
6079     break;
6080   case ISD::CTTZ:
6081   case ISD::CTTZ_ZERO_UNDEF:
6082   case ISD::CTLZ:
6083   case ISD::CTLZ_ZERO_UNDEF: {
6084     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6085            "Unexpected custom legalisation");
6086 
6087     SDValue NewOp0 =
6088         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6089     bool IsCTZ =
6090         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6091     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6092     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6093     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6094     return;
6095   }
6096   case ISD::SDIV:
6097   case ISD::UDIV:
6098   case ISD::UREM: {
6099     MVT VT = N->getSimpleValueType(0);
6100     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6101            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6102            "Unexpected custom legalisation");
6103     // Don't promote division/remainder by constant since we should expand those
6104     // to multiply by magic constant.
6105     // FIXME: What if the expansion is disabled for minsize.
6106     if (N->getOperand(1).getOpcode() == ISD::Constant)
6107       return;
6108 
6109     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6110     // the upper 32 bits. For other types we need to sign or zero extend
6111     // based on the opcode.
6112     unsigned ExtOpc = ISD::ANY_EXTEND;
6113     if (VT != MVT::i32)
6114       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6115                                            : ISD::ZERO_EXTEND;
6116 
6117     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6118     break;
6119   }
6120   case ISD::UADDO:
6121   case ISD::USUBO: {
6122     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6123            "Unexpected custom legalisation");
6124     bool IsAdd = N->getOpcode() == ISD::UADDO;
6125     // Create an ADDW or SUBW.
6126     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6127     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6128     SDValue Res =
6129         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6130     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6131                       DAG.getValueType(MVT::i32));
6132 
6133     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6134     // Since the inputs are sign extended from i32, this is equivalent to
6135     // comparing the lower 32 bits.
6136     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6137     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6138                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6139 
6140     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6141     Results.push_back(Overflow);
6142     return;
6143   }
6144   case ISD::UADDSAT:
6145   case ISD::USUBSAT: {
6146     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6147            "Unexpected custom legalisation");
6148     if (Subtarget.hasStdExtZbb()) {
6149       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6150       // sign extend allows overflow of the lower 32 bits to be detected on
6151       // the promoted size.
6152       SDValue LHS =
6153           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6154       SDValue RHS =
6155           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6156       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6157       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6158       return;
6159     }
6160 
6161     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6162     // promotion for UADDO/USUBO.
6163     Results.push_back(expandAddSubSat(N, DAG));
6164     return;
6165   }
6166   case ISD::BITCAST: {
6167     EVT VT = N->getValueType(0);
6168     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6169     SDValue Op0 = N->getOperand(0);
6170     EVT Op0VT = Op0.getValueType();
6171     MVT XLenVT = Subtarget.getXLenVT();
6172     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6173       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6174       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6175     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6176                Subtarget.hasStdExtF()) {
6177       SDValue FPConv =
6178           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6179       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6180     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6181                isTypeLegal(Op0VT)) {
6182       // Custom-legalize bitcasts from fixed-length vector types to illegal
6183       // scalar types in order to improve codegen. Bitcast the vector to a
6184       // one-element vector type whose element type is the same as the result
6185       // type, and extract the first element.
6186       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6187       if (isTypeLegal(BVT)) {
6188         SDValue BVec = DAG.getBitcast(BVT, Op0);
6189         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6190                                       DAG.getConstant(0, DL, XLenVT)));
6191       }
6192     }
6193     break;
6194   }
6195   case RISCVISD::GREV:
6196   case RISCVISD::GORC: {
6197     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6198            "Unexpected custom legalisation");
6199     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6200     // This is similar to customLegalizeToWOp, except that we pass the second
6201     // operand (a TargetConstant) straight through: it is already of type
6202     // XLenVT.
6203     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6204     SDValue NewOp0 =
6205         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6206     SDValue NewOp1 =
6207         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6208     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6209     // ReplaceNodeResults requires we maintain the same type for the return
6210     // value.
6211     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6212     break;
6213   }
6214   case RISCVISD::SHFL: {
6215     // There is no SHFLIW instruction, but we can just promote the operation.
6216     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6217            "Unexpected custom legalisation");
6218     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6219     SDValue NewOp0 =
6220         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6221     SDValue NewOp1 =
6222         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6223     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6224     // ReplaceNodeResults requires we maintain the same type for the return
6225     // value.
6226     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6227     break;
6228   }
6229   case ISD::BSWAP:
6230   case ISD::BITREVERSE: {
6231     MVT VT = N->getSimpleValueType(0);
6232     MVT XLenVT = Subtarget.getXLenVT();
6233     assert((VT == MVT::i8 || VT == MVT::i16 ||
6234             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6235            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6236     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6237     unsigned Imm = VT.getSizeInBits() - 1;
6238     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6239     if (N->getOpcode() == ISD::BSWAP)
6240       Imm &= ~0x7U;
6241     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6242     SDValue GREVI =
6243         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6244     // ReplaceNodeResults requires we maintain the same type for the return
6245     // value.
6246     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6247     break;
6248   }
6249   case ISD::FSHL:
6250   case ISD::FSHR: {
6251     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6252            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6253     SDValue NewOp0 =
6254         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6255     SDValue NewOp1 =
6256         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6257     SDValue NewShAmt =
6258         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6259     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6260     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6261     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6262                            DAG.getConstant(0x1f, DL, MVT::i64));
6263     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6264     // instruction use different orders. fshl will return its first operand for
6265     // shift of zero, fshr will return its second operand. fsl and fsr both
6266     // return rs1 so the ISD nodes need to have different operand orders.
6267     // Shift amount is in rs2.
6268     unsigned Opc = RISCVISD::FSLW;
6269     if (N->getOpcode() == ISD::FSHR) {
6270       std::swap(NewOp0, NewOp1);
6271       Opc = RISCVISD::FSRW;
6272     }
6273     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6274     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6275     break;
6276   }
6277   case ISD::EXTRACT_VECTOR_ELT: {
6278     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6279     // type is illegal (currently only vXi64 RV32).
6280     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6281     // transferred to the destination register. We issue two of these from the
6282     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6283     // first element.
6284     SDValue Vec = N->getOperand(0);
6285     SDValue Idx = N->getOperand(1);
6286 
6287     // The vector type hasn't been legalized yet so we can't issue target
6288     // specific nodes if it needs legalization.
6289     // FIXME: We would manually legalize if it's important.
6290     if (!isTypeLegal(Vec.getValueType()))
6291       return;
6292 
6293     MVT VecVT = Vec.getSimpleValueType();
6294 
6295     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6296            VecVT.getVectorElementType() == MVT::i64 &&
6297            "Unexpected EXTRACT_VECTOR_ELT legalization");
6298 
6299     // If this is a fixed vector, we need to convert it to a scalable vector.
6300     MVT ContainerVT = VecVT;
6301     if (VecVT.isFixedLengthVector()) {
6302       ContainerVT = getContainerForFixedLengthVector(VecVT);
6303       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6304     }
6305 
6306     MVT XLenVT = Subtarget.getXLenVT();
6307 
6308     // Use a VL of 1 to avoid processing more elements than we need.
6309     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6310     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6311     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6312 
6313     // Unless the index is known to be 0, we must slide the vector down to get
6314     // the desired element into index 0.
6315     if (!isNullConstant(Idx)) {
6316       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6317                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6318     }
6319 
6320     // Extract the lower XLEN bits of the correct vector element.
6321     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6322 
6323     // To extract the upper XLEN bits of the vector element, shift the first
6324     // element right by 32 bits and re-extract the lower XLEN bits.
6325     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6326                                      DAG.getConstant(32, DL, XLenVT), VL);
6327     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6328                                  ThirtyTwoV, Mask, VL);
6329 
6330     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6331 
6332     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6333     break;
6334   }
6335   case ISD::INTRINSIC_WO_CHAIN: {
6336     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6337     switch (IntNo) {
6338     default:
6339       llvm_unreachable(
6340           "Don't know how to custom type legalize this intrinsic!");
6341     case Intrinsic::riscv_grev:
6342     case Intrinsic::riscv_gorc:
6343     case Intrinsic::riscv_bcompress:
6344     case Intrinsic::riscv_bdecompress:
6345     case Intrinsic::riscv_bfp: {
6346       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6347              "Unexpected custom legalisation");
6348       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6349       break;
6350     }
6351     case Intrinsic::riscv_fsl:
6352     case Intrinsic::riscv_fsr: {
6353       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6354              "Unexpected custom legalisation");
6355       SDValue NewOp1 =
6356           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6357       SDValue NewOp2 =
6358           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6359       SDValue NewOp3 =
6360           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6361       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6362       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6363       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6364       break;
6365     }
6366     case Intrinsic::riscv_orc_b: {
6367       // Lower to the GORCI encoding for orc.b with the operand extended.
6368       SDValue NewOp =
6369           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6370       // If Zbp is enabled, use GORCIW which will sign extend the result.
6371       unsigned Opc =
6372           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6373       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6374                                 DAG.getConstant(7, DL, MVT::i64));
6375       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6376       return;
6377     }
6378     case Intrinsic::riscv_shfl:
6379     case Intrinsic::riscv_unshfl: {
6380       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6381              "Unexpected custom legalisation");
6382       SDValue NewOp1 =
6383           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6384       SDValue NewOp2 =
6385           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6386       unsigned Opc =
6387           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6388       if (isa<ConstantSDNode>(N->getOperand(2))) {
6389         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6390                              DAG.getConstant(0xf, DL, MVT::i64));
6391         Opc =
6392             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6393       }
6394       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6395       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6396       break;
6397     }
6398     case Intrinsic::riscv_vmv_x_s: {
6399       EVT VT = N->getValueType(0);
6400       MVT XLenVT = Subtarget.getXLenVT();
6401       if (VT.bitsLT(XLenVT)) {
6402         // Simple case just extract using vmv.x.s and truncate.
6403         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6404                                       Subtarget.getXLenVT(), N->getOperand(1));
6405         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6406         return;
6407       }
6408 
6409       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6410              "Unexpected custom legalization");
6411 
6412       // We need to do the move in two steps.
6413       SDValue Vec = N->getOperand(1);
6414       MVT VecVT = Vec.getSimpleValueType();
6415 
6416       // First extract the lower XLEN bits of the element.
6417       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6418 
6419       // To extract the upper XLEN bits of the vector element, shift the first
6420       // element right by 32 bits and re-extract the lower XLEN bits.
6421       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6422       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6423       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6424       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
6425                                        DAG.getConstant(32, DL, XLenVT), VL);
6426       SDValue LShr32 =
6427           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6428       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6429 
6430       Results.push_back(
6431           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6432       break;
6433     }
6434     }
6435     break;
6436   }
6437   case ISD::VECREDUCE_ADD:
6438   case ISD::VECREDUCE_AND:
6439   case ISD::VECREDUCE_OR:
6440   case ISD::VECREDUCE_XOR:
6441   case ISD::VECREDUCE_SMAX:
6442   case ISD::VECREDUCE_UMAX:
6443   case ISD::VECREDUCE_SMIN:
6444   case ISD::VECREDUCE_UMIN:
6445     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6446       Results.push_back(V);
6447     break;
6448   case ISD::VP_REDUCE_ADD:
6449   case ISD::VP_REDUCE_AND:
6450   case ISD::VP_REDUCE_OR:
6451   case ISD::VP_REDUCE_XOR:
6452   case ISD::VP_REDUCE_SMAX:
6453   case ISD::VP_REDUCE_UMAX:
6454   case ISD::VP_REDUCE_SMIN:
6455   case ISD::VP_REDUCE_UMIN:
6456     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6457       Results.push_back(V);
6458     break;
6459   case ISD::FLT_ROUNDS_: {
6460     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6461     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6462     Results.push_back(Res.getValue(0));
6463     Results.push_back(Res.getValue(1));
6464     break;
6465   }
6466   }
6467 }
6468 
6469 // A structure to hold one of the bit-manipulation patterns below. Together, a
6470 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6471 //   (or (and (shl x, 1), 0xAAAAAAAA),
6472 //       (and (srl x, 1), 0x55555555))
6473 struct RISCVBitmanipPat {
6474   SDValue Op;
6475   unsigned ShAmt;
6476   bool IsSHL;
6477 
6478   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6479     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6480   }
6481 };
6482 
6483 // Matches patterns of the form
6484 //   (and (shl x, C2), (C1 << C2))
6485 //   (and (srl x, C2), C1)
6486 //   (shl (and x, C1), C2)
6487 //   (srl (and x, (C1 << C2)), C2)
6488 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6489 // The expected masks for each shift amount are specified in BitmanipMasks where
6490 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6491 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6492 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6493 // XLen is 64.
6494 static Optional<RISCVBitmanipPat>
6495 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6496   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6497          "Unexpected number of masks");
6498   Optional<uint64_t> Mask;
6499   // Optionally consume a mask around the shift operation.
6500   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6501     Mask = Op.getConstantOperandVal(1);
6502     Op = Op.getOperand(0);
6503   }
6504   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6505     return None;
6506   bool IsSHL = Op.getOpcode() == ISD::SHL;
6507 
6508   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6509     return None;
6510   uint64_t ShAmt = Op.getConstantOperandVal(1);
6511 
6512   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6513   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6514     return None;
6515   // If we don't have enough masks for 64 bit, then we must be trying to
6516   // match SHFL so we're only allowed to shift 1/4 of the width.
6517   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6518     return None;
6519 
6520   SDValue Src = Op.getOperand(0);
6521 
6522   // The expected mask is shifted left when the AND is found around SHL
6523   // patterns.
6524   //   ((x >> 1) & 0x55555555)
6525   //   ((x << 1) & 0xAAAAAAAA)
6526   bool SHLExpMask = IsSHL;
6527 
6528   if (!Mask) {
6529     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6530     // the mask is all ones: consume that now.
6531     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6532       Mask = Src.getConstantOperandVal(1);
6533       Src = Src.getOperand(0);
6534       // The expected mask is now in fact shifted left for SRL, so reverse the
6535       // decision.
6536       //   ((x & 0xAAAAAAAA) >> 1)
6537       //   ((x & 0x55555555) << 1)
6538       SHLExpMask = !SHLExpMask;
6539     } else {
6540       // Use a default shifted mask of all-ones if there's no AND, truncated
6541       // down to the expected width. This simplifies the logic later on.
6542       Mask = maskTrailingOnes<uint64_t>(Width);
6543       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6544     }
6545   }
6546 
6547   unsigned MaskIdx = Log2_32(ShAmt);
6548   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6549 
6550   if (SHLExpMask)
6551     ExpMask <<= ShAmt;
6552 
6553   if (Mask != ExpMask)
6554     return None;
6555 
6556   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6557 }
6558 
6559 // Matches any of the following bit-manipulation patterns:
6560 //   (and (shl x, 1), (0x55555555 << 1))
6561 //   (and (srl x, 1), 0x55555555)
6562 //   (shl (and x, 0x55555555), 1)
6563 //   (srl (and x, (0x55555555 << 1)), 1)
6564 // where the shift amount and mask may vary thus:
6565 //   [1]  = 0x55555555 / 0xAAAAAAAA
6566 //   [2]  = 0x33333333 / 0xCCCCCCCC
6567 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6568 //   [8]  = 0x00FF00FF / 0xFF00FF00
6569 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6570 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6571 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6572   // These are the unshifted masks which we use to match bit-manipulation
6573   // patterns. They may be shifted left in certain circumstances.
6574   static const uint64_t BitmanipMasks[] = {
6575       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6576       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6577 
6578   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6579 }
6580 
6581 // Match the following pattern as a GREVI(W) operation
6582 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6583 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6584                                const RISCVSubtarget &Subtarget) {
6585   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6586   EVT VT = Op.getValueType();
6587 
6588   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6589     auto LHS = matchGREVIPat(Op.getOperand(0));
6590     auto RHS = matchGREVIPat(Op.getOperand(1));
6591     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6592       SDLoc DL(Op);
6593       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6594                          DAG.getConstant(LHS->ShAmt, DL, VT));
6595     }
6596   }
6597   return SDValue();
6598 }
6599 
6600 // Matches any the following pattern as a GORCI(W) operation
6601 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6602 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6603 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6604 // Note that with the variant of 3.,
6605 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6606 // the inner pattern will first be matched as GREVI and then the outer
6607 // pattern will be matched to GORC via the first rule above.
6608 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6609 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6610                                const RISCVSubtarget &Subtarget) {
6611   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6612   EVT VT = Op.getValueType();
6613 
6614   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6615     SDLoc DL(Op);
6616     SDValue Op0 = Op.getOperand(0);
6617     SDValue Op1 = Op.getOperand(1);
6618 
6619     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6620       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6621           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6622           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6623         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6624       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
6625       if ((Reverse.getOpcode() == ISD::ROTL ||
6626            Reverse.getOpcode() == ISD::ROTR) &&
6627           Reverse.getOperand(0) == X &&
6628           isa<ConstantSDNode>(Reverse.getOperand(1))) {
6629         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
6630         if (RotAmt == (VT.getSizeInBits() / 2))
6631           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
6632                              DAG.getConstant(RotAmt, DL, VT));
6633       }
6634       return SDValue();
6635     };
6636 
6637     // Check for either commutable permutation of (or (GREVI x, shamt), x)
6638     if (SDValue V = MatchOROfReverse(Op0, Op1))
6639       return V;
6640     if (SDValue V = MatchOROfReverse(Op1, Op0))
6641       return V;
6642 
6643     // OR is commutable so canonicalize its OR operand to the left
6644     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
6645       std::swap(Op0, Op1);
6646     if (Op0.getOpcode() != ISD::OR)
6647       return SDValue();
6648     SDValue OrOp0 = Op0.getOperand(0);
6649     SDValue OrOp1 = Op0.getOperand(1);
6650     auto LHS = matchGREVIPat(OrOp0);
6651     // OR is commutable so swap the operands and try again: x might have been
6652     // on the left
6653     if (!LHS) {
6654       std::swap(OrOp0, OrOp1);
6655       LHS = matchGREVIPat(OrOp0);
6656     }
6657     auto RHS = matchGREVIPat(Op1);
6658     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
6659       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
6660                          DAG.getConstant(LHS->ShAmt, DL, VT));
6661     }
6662   }
6663   return SDValue();
6664 }
6665 
6666 // Matches any of the following bit-manipulation patterns:
6667 //   (and (shl x, 1), (0x22222222 << 1))
6668 //   (and (srl x, 1), 0x22222222)
6669 //   (shl (and x, 0x22222222), 1)
6670 //   (srl (and x, (0x22222222 << 1)), 1)
6671 // where the shift amount and mask may vary thus:
6672 //   [1]  = 0x22222222 / 0x44444444
6673 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
6674 //   [4]  = 0x00F000F0 / 0x0F000F00
6675 //   [8]  = 0x0000FF00 / 0x00FF0000
6676 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
6677 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
6678   // These are the unshifted masks which we use to match bit-manipulation
6679   // patterns. They may be shifted left in certain circumstances.
6680   static const uint64_t BitmanipMasks[] = {
6681       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
6682       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
6683 
6684   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6685 }
6686 
6687 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
6688 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
6689                                const RISCVSubtarget &Subtarget) {
6690   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6691   EVT VT = Op.getValueType();
6692 
6693   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
6694     return SDValue();
6695 
6696   SDValue Op0 = Op.getOperand(0);
6697   SDValue Op1 = Op.getOperand(1);
6698 
6699   // Or is commutable so canonicalize the second OR to the LHS.
6700   if (Op0.getOpcode() != ISD::OR)
6701     std::swap(Op0, Op1);
6702   if (Op0.getOpcode() != ISD::OR)
6703     return SDValue();
6704 
6705   // We found an inner OR, so our operands are the operands of the inner OR
6706   // and the other operand of the outer OR.
6707   SDValue A = Op0.getOperand(0);
6708   SDValue B = Op0.getOperand(1);
6709   SDValue C = Op1;
6710 
6711   auto Match1 = matchSHFLPat(A);
6712   auto Match2 = matchSHFLPat(B);
6713 
6714   // If neither matched, we failed.
6715   if (!Match1 && !Match2)
6716     return SDValue();
6717 
6718   // We had at least one match. if one failed, try the remaining C operand.
6719   if (!Match1) {
6720     std::swap(A, C);
6721     Match1 = matchSHFLPat(A);
6722     if (!Match1)
6723       return SDValue();
6724   } else if (!Match2) {
6725     std::swap(B, C);
6726     Match2 = matchSHFLPat(B);
6727     if (!Match2)
6728       return SDValue();
6729   }
6730   assert(Match1 && Match2);
6731 
6732   // Make sure our matches pair up.
6733   if (!Match1->formsPairWith(*Match2))
6734     return SDValue();
6735 
6736   // All the remains is to make sure C is an AND with the same input, that masks
6737   // out the bits that are being shuffled.
6738   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
6739       C.getOperand(0) != Match1->Op)
6740     return SDValue();
6741 
6742   uint64_t Mask = C.getConstantOperandVal(1);
6743 
6744   static const uint64_t BitmanipMasks[] = {
6745       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
6746       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
6747   };
6748 
6749   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6750   unsigned MaskIdx = Log2_32(Match1->ShAmt);
6751   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6752 
6753   if (Mask != ExpMask)
6754     return SDValue();
6755 
6756   SDLoc DL(Op);
6757   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
6758                      DAG.getConstant(Match1->ShAmt, DL, VT));
6759 }
6760 
6761 // Optimize (add (shl x, c0), (shl y, c1)) ->
6762 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
6763 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
6764                                   const RISCVSubtarget &Subtarget) {
6765   // Perform this optimization only in the zba extension.
6766   if (!Subtarget.hasStdExtZba())
6767     return SDValue();
6768 
6769   // Skip for vector types and larger types.
6770   EVT VT = N->getValueType(0);
6771   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6772     return SDValue();
6773 
6774   // The two operand nodes must be SHL and have no other use.
6775   SDValue N0 = N->getOperand(0);
6776   SDValue N1 = N->getOperand(1);
6777   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
6778       !N0->hasOneUse() || !N1->hasOneUse())
6779     return SDValue();
6780 
6781   // Check c0 and c1.
6782   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6783   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
6784   if (!N0C || !N1C)
6785     return SDValue();
6786   int64_t C0 = N0C->getSExtValue();
6787   int64_t C1 = N1C->getSExtValue();
6788   if (C0 <= 0 || C1 <= 0)
6789     return SDValue();
6790 
6791   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
6792   int64_t Bits = std::min(C0, C1);
6793   int64_t Diff = std::abs(C0 - C1);
6794   if (Diff != 1 && Diff != 2 && Diff != 3)
6795     return SDValue();
6796 
6797   // Build nodes.
6798   SDLoc DL(N);
6799   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
6800   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
6801   SDValue NA0 =
6802       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
6803   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
6804   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
6805 }
6806 
6807 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
6808 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
6809 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
6810 // not undo itself, but they are redundant.
6811 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
6812   SDValue Src = N->getOperand(0);
6813 
6814   if (Src.getOpcode() != N->getOpcode())
6815     return SDValue();
6816 
6817   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
6818       !isa<ConstantSDNode>(Src.getOperand(1)))
6819     return SDValue();
6820 
6821   unsigned ShAmt1 = N->getConstantOperandVal(1);
6822   unsigned ShAmt2 = Src.getConstantOperandVal(1);
6823   Src = Src.getOperand(0);
6824 
6825   unsigned CombinedShAmt;
6826   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
6827     CombinedShAmt = ShAmt1 | ShAmt2;
6828   else
6829     CombinedShAmt = ShAmt1 ^ ShAmt2;
6830 
6831   if (CombinedShAmt == 0)
6832     return Src;
6833 
6834   SDLoc DL(N);
6835   return DAG.getNode(
6836       N->getOpcode(), DL, N->getValueType(0), Src,
6837       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
6838 }
6839 
6840 // Combine a constant select operand into its use:
6841 //
6842 // (and (select cond, -1, c), x)
6843 //   -> (select cond, x, (and x, c))  [AllOnes=1]
6844 // (or  (select cond, 0, c), x)
6845 //   -> (select cond, x, (or x, c))  [AllOnes=0]
6846 // (xor (select cond, 0, c), x)
6847 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
6848 // (add (select cond, 0, c), x)
6849 //   -> (select cond, x, (add x, c))  [AllOnes=0]
6850 // (sub x, (select cond, 0, c))
6851 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
6852 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6853                                    SelectionDAG &DAG, bool AllOnes) {
6854   EVT VT = N->getValueType(0);
6855 
6856   // Skip vectors.
6857   if (VT.isVector())
6858     return SDValue();
6859 
6860   if ((Slct.getOpcode() != ISD::SELECT &&
6861        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
6862       !Slct.hasOneUse())
6863     return SDValue();
6864 
6865   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
6866     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
6867   };
6868 
6869   bool SwapSelectOps;
6870   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
6871   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
6872   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
6873   SDValue NonConstantVal;
6874   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
6875     SwapSelectOps = false;
6876     NonConstantVal = FalseVal;
6877   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
6878     SwapSelectOps = true;
6879     NonConstantVal = TrueVal;
6880   } else
6881     return SDValue();
6882 
6883   // Slct is now know to be the desired identity constant when CC is true.
6884   TrueVal = OtherOp;
6885   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
6886   // Unless SwapSelectOps says the condition should be false.
6887   if (SwapSelectOps)
6888     std::swap(TrueVal, FalseVal);
6889 
6890   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
6891     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
6892                        {Slct.getOperand(0), Slct.getOperand(1),
6893                         Slct.getOperand(2), TrueVal, FalseVal});
6894 
6895   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
6896                      {Slct.getOperand(0), TrueVal, FalseVal});
6897 }
6898 
6899 // Attempt combineSelectAndUse on each operand of a commutative operator N.
6900 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
6901                                               bool AllOnes) {
6902   SDValue N0 = N->getOperand(0);
6903   SDValue N1 = N->getOperand(1);
6904   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
6905     return Result;
6906   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
6907     return Result;
6908   return SDValue();
6909 }
6910 
6911 // Transform (add (mul x, c0), c1) ->
6912 //           (add (mul (add x, c1/c0), c0), c1%c0).
6913 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
6914 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
6915 // to an infinite loop in DAGCombine if transformed.
6916 // Or transform (add (mul x, c0), c1) ->
6917 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
6918 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
6919 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
6920 // lead to an infinite loop in DAGCombine if transformed.
6921 // Or transform (add (mul x, c0), c1) ->
6922 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
6923 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
6924 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
6925 // lead to an infinite loop in DAGCombine if transformed.
6926 // Or transform (add (mul x, c0), c1) ->
6927 //              (mul (add x, c1/c0), c0).
6928 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
6929 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
6930                                      const RISCVSubtarget &Subtarget) {
6931   // Skip for vector types and larger types.
6932   EVT VT = N->getValueType(0);
6933   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6934     return SDValue();
6935   // The first operand node must be a MUL and has no other use.
6936   SDValue N0 = N->getOperand(0);
6937   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
6938     return SDValue();
6939   // Check if c0 and c1 match above conditions.
6940   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6941   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6942   if (!N0C || !N1C)
6943     return SDValue();
6944   int64_t C0 = N0C->getSExtValue();
6945   int64_t C1 = N1C->getSExtValue();
6946   int64_t CA, CB;
6947   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
6948     return SDValue();
6949   // Search for proper CA (non-zero) and CB that both are simm12.
6950   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
6951       !isInt<12>(C0 * (C1 / C0))) {
6952     CA = C1 / C0;
6953     CB = C1 % C0;
6954   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
6955              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
6956     CA = C1 / C0 + 1;
6957     CB = C1 % C0 - C0;
6958   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
6959              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
6960     CA = C1 / C0 - 1;
6961     CB = C1 % C0 + C0;
6962   } else
6963     return SDValue();
6964   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
6965   SDLoc DL(N);
6966   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
6967                              DAG.getConstant(CA, DL, VT));
6968   SDValue New1 =
6969       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
6970   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
6971 }
6972 
6973 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
6974                                  const RISCVSubtarget &Subtarget) {
6975   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
6976     return V;
6977   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
6978     return V;
6979   // fold (add (select lhs, rhs, cc, 0, y), x) ->
6980   //      (select lhs, rhs, cc, x, (add x, y))
6981   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6982 }
6983 
6984 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
6985   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
6986   //      (select lhs, rhs, cc, x, (sub x, y))
6987   SDValue N0 = N->getOperand(0);
6988   SDValue N1 = N->getOperand(1);
6989   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
6990 }
6991 
6992 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
6993   // fold (and (select lhs, rhs, cc, -1, y), x) ->
6994   //      (select lhs, rhs, cc, x, (and x, y))
6995   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
6996 }
6997 
6998 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
6999                                 const RISCVSubtarget &Subtarget) {
7000   if (Subtarget.hasStdExtZbp()) {
7001     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7002       return GREV;
7003     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7004       return GORC;
7005     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7006       return SHFL;
7007   }
7008 
7009   // fold (or (select cond, 0, y), x) ->
7010   //      (select cond, x, (or x, y))
7011   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7012 }
7013 
7014 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7015   // fold (xor (select cond, 0, y), x) ->
7016   //      (select cond, x, (xor x, y))
7017   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7018 }
7019 
7020 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7021 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7022 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7023 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7024 // ADDW/SUBW/MULW.
7025 static SDValue performANY_EXTENDCombine(SDNode *N,
7026                                         TargetLowering::DAGCombinerInfo &DCI,
7027                                         const RISCVSubtarget &Subtarget) {
7028   if (!Subtarget.is64Bit())
7029     return SDValue();
7030 
7031   SelectionDAG &DAG = DCI.DAG;
7032 
7033   SDValue Src = N->getOperand(0);
7034   EVT VT = N->getValueType(0);
7035   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7036     return SDValue();
7037 
7038   // The opcode must be one that can implicitly sign_extend.
7039   // FIXME: Additional opcodes.
7040   switch (Src.getOpcode()) {
7041   default:
7042     return SDValue();
7043   case ISD::MUL:
7044     if (!Subtarget.hasStdExtM())
7045       return SDValue();
7046     LLVM_FALLTHROUGH;
7047   case ISD::ADD:
7048   case ISD::SUB:
7049     break;
7050   }
7051 
7052   // Only handle cases where the result is used by a CopyToReg. That likely
7053   // means the value is a liveout of the basic block. This helps prevent
7054   // infinite combine loops like PR51206.
7055   if (none_of(N->uses(),
7056               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7057     return SDValue();
7058 
7059   SmallVector<SDNode *, 4> SetCCs;
7060   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7061                             UE = Src.getNode()->use_end();
7062        UI != UE; ++UI) {
7063     SDNode *User = *UI;
7064     if (User == N)
7065       continue;
7066     if (UI.getUse().getResNo() != Src.getResNo())
7067       continue;
7068     // All i32 setccs are legalized by sign extending operands.
7069     if (User->getOpcode() == ISD::SETCC) {
7070       SetCCs.push_back(User);
7071       continue;
7072     }
7073     // We don't know if we can extend this user.
7074     break;
7075   }
7076 
7077   // If we don't have any SetCCs, this isn't worthwhile.
7078   if (SetCCs.empty())
7079     return SDValue();
7080 
7081   SDLoc DL(N);
7082   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7083   DCI.CombineTo(N, SExt);
7084 
7085   // Promote all the setccs.
7086   for (SDNode *SetCC : SetCCs) {
7087     SmallVector<SDValue, 4> Ops;
7088 
7089     for (unsigned j = 0; j != 2; ++j) {
7090       SDValue SOp = SetCC->getOperand(j);
7091       if (SOp == Src)
7092         Ops.push_back(SExt);
7093       else
7094         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7095     }
7096 
7097     Ops.push_back(SetCC->getOperand(2));
7098     DCI.CombineTo(SetCC,
7099                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7100   }
7101   return SDValue(N, 0);
7102 }
7103 
7104 // Try to form VWMUL or VWMULU.
7105 // FIXME: Support VWMULSU.
7106 static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
7107                                     SelectionDAG &DAG) {
7108   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7109   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7110   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7111   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7112     return SDValue();
7113 
7114   SDValue Mask = N->getOperand(2);
7115   SDValue VL = N->getOperand(3);
7116 
7117   // Make sure the mask and VL match.
7118   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7119     return SDValue();
7120 
7121   MVT VT = N->getSimpleValueType(0);
7122 
7123   // Determine the narrow size for a widening multiply.
7124   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7125   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7126                                   VT.getVectorElementCount());
7127 
7128   SDLoc DL(N);
7129 
7130   // See if the other operand is the same opcode.
7131   if (Op0.getOpcode() == Op1.getOpcode()) {
7132     if (!Op1.hasOneUse())
7133       return SDValue();
7134 
7135     // Make sure the mask and VL match.
7136     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7137       return SDValue();
7138 
7139     Op1 = Op1.getOperand(0);
7140   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7141     // The operand is a splat of a scalar.
7142 
7143     // The VL must be the same.
7144     if (Op1.getOperand(1) != VL)
7145       return SDValue();
7146 
7147     // Get the scalar value.
7148     Op1 = Op1.getOperand(0);
7149 
7150     // See if have enough sign bits or zero bits in the scalar to use a
7151     // widening multiply by splatting to smaller element size.
7152     unsigned EltBits = VT.getScalarSizeInBits();
7153     unsigned ScalarBits = Op1.getValueSizeInBits();
7154     // Make sure we're getting all element bits from the scalar register.
7155     // FIXME: Support implicit sign extension of vmv.v.x?
7156     if (ScalarBits < EltBits)
7157       return SDValue();
7158 
7159     if (IsSignExt) {
7160       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
7161         return SDValue();
7162     } else {
7163       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7164       if (!DAG.MaskedValueIsZero(Op1, Mask))
7165         return SDValue();
7166     }
7167 
7168     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
7169   } else
7170     return SDValue();
7171 
7172   Op0 = Op0.getOperand(0);
7173 
7174   // Re-introduce narrower extends if needed.
7175   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7176   if (Op0.getValueType() != NarrowVT)
7177     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7178   if (Op1.getValueType() != NarrowVT)
7179     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7180 
7181   unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7182   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7183 }
7184 
7185 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7186   switch (Op.getOpcode()) {
7187   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7188   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7189   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7190   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7191   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7192   }
7193 
7194   return RISCVFPRndMode::Invalid;
7195 }
7196 
7197 // Fold
7198 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7199 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7200 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7201 //   (fp_to_int (fceil X))      -> fcvt X, rup
7202 //   (fp_to_int (fround X))     -> fcvt X, rmm
7203 static SDValue performFP_TO_INTCombine(SDNode *N,
7204                                        TargetLowering::DAGCombinerInfo &DCI,
7205                                        const RISCVSubtarget &Subtarget) {
7206   SelectionDAG &DAG = DCI.DAG;
7207   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7208   MVT XLenVT = Subtarget.getXLenVT();
7209 
7210   // Only handle XLen or i32 types. Other types narrower than XLen will
7211   // eventually be legalized to XLenVT.
7212   EVT VT = N->getValueType(0);
7213   if (VT != MVT::i32 && VT != XLenVT)
7214     return SDValue();
7215 
7216   SDValue Src = N->getOperand(0);
7217 
7218   // Ensure the FP type is also legal.
7219   if (!TLI.isTypeLegal(Src.getValueType()))
7220     return SDValue();
7221 
7222   // Don't do this for f16 with Zfhmin and not Zfh.
7223   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7224     return SDValue();
7225 
7226   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7227   if (FRM == RISCVFPRndMode::Invalid)
7228     return SDValue();
7229 
7230   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
7231 
7232   unsigned Opc;
7233   if (VT == XLenVT)
7234     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7235   else
7236     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7237 
7238   SDLoc DL(N);
7239   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
7240                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7241   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
7242 }
7243 
7244 // Fold
7245 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
7246 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
7247 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
7248 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
7249 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
7250 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
7251                                        TargetLowering::DAGCombinerInfo &DCI,
7252                                        const RISCVSubtarget &Subtarget) {
7253   SelectionDAG &DAG = DCI.DAG;
7254   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7255   MVT XLenVT = Subtarget.getXLenVT();
7256 
7257   // Only handle XLen types. Other types narrower than XLen will eventually be
7258   // legalized to XLenVT.
7259   EVT DstVT = N->getValueType(0);
7260   if (DstVT != XLenVT)
7261     return SDValue();
7262 
7263   SDValue Src = N->getOperand(0);
7264 
7265   // Ensure the FP type is also legal.
7266   if (!TLI.isTypeLegal(Src.getValueType()))
7267     return SDValue();
7268 
7269   // Don't do this for f16 with Zfhmin and not Zfh.
7270   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7271     return SDValue();
7272 
7273   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7274 
7275   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7276   if (FRM == RISCVFPRndMode::Invalid)
7277     return SDValue();
7278 
7279   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
7280 
7281   unsigned Opc;
7282   if (SatVT == DstVT)
7283     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7284   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
7285     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7286   else
7287     return SDValue();
7288   // FIXME: Support other SatVTs by clamping before or after the conversion.
7289 
7290   Src = Src.getOperand(0);
7291 
7292   SDLoc DL(N);
7293   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
7294                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7295 
7296   // RISCV FP-to-int conversions saturate to the destination register size, but
7297   // don't produce 0 for nan.
7298   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
7299   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
7300 }
7301 
7302 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7303                                                DAGCombinerInfo &DCI) const {
7304   SelectionDAG &DAG = DCI.DAG;
7305 
7306   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7307   // bits are demanded. N will be added to the Worklist if it was not deleted.
7308   // Caller should return SDValue(N, 0) if this returns true.
7309   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7310     SDValue Op = N->getOperand(OpNo);
7311     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7312     if (!SimplifyDemandedBits(Op, Mask, DCI))
7313       return false;
7314 
7315     if (N->getOpcode() != ISD::DELETED_NODE)
7316       DCI.AddToWorklist(N);
7317     return true;
7318   };
7319 
7320   switch (N->getOpcode()) {
7321   default:
7322     break;
7323   case RISCVISD::SplitF64: {
7324     SDValue Op0 = N->getOperand(0);
7325     // If the input to SplitF64 is just BuildPairF64 then the operation is
7326     // redundant. Instead, use BuildPairF64's operands directly.
7327     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
7328       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
7329 
7330     SDLoc DL(N);
7331 
7332     // It's cheaper to materialise two 32-bit integers than to load a double
7333     // from the constant pool and transfer it to integer registers through the
7334     // stack.
7335     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
7336       APInt V = C->getValueAPF().bitcastToAPInt();
7337       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
7338       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
7339       return DCI.CombineTo(N, Lo, Hi);
7340     }
7341 
7342     // This is a target-specific version of a DAGCombine performed in
7343     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7344     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7345     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7346     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7347         !Op0.getNode()->hasOneUse())
7348       break;
7349     SDValue NewSplitF64 =
7350         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
7351                     Op0.getOperand(0));
7352     SDValue Lo = NewSplitF64.getValue(0);
7353     SDValue Hi = NewSplitF64.getValue(1);
7354     APInt SignBit = APInt::getSignMask(32);
7355     if (Op0.getOpcode() == ISD::FNEG) {
7356       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
7357                                   DAG.getConstant(SignBit, DL, MVT::i32));
7358       return DCI.CombineTo(N, Lo, NewHi);
7359     }
7360     assert(Op0.getOpcode() == ISD::FABS);
7361     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
7362                                 DAG.getConstant(~SignBit, DL, MVT::i32));
7363     return DCI.CombineTo(N, Lo, NewHi);
7364   }
7365   case RISCVISD::SLLW:
7366   case RISCVISD::SRAW:
7367   case RISCVISD::SRLW:
7368   case RISCVISD::ROLW:
7369   case RISCVISD::RORW: {
7370     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7371     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7372         SimplifyDemandedLowBitsHelper(1, 5))
7373       return SDValue(N, 0);
7374     break;
7375   }
7376   case RISCVISD::CLZW:
7377   case RISCVISD::CTZW: {
7378     // Only the lower 32 bits of the first operand are read
7379     if (SimplifyDemandedLowBitsHelper(0, 32))
7380       return SDValue(N, 0);
7381     break;
7382   }
7383   case RISCVISD::GREV:
7384   case RISCVISD::GORC: {
7385     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
7386     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7387     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7388     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
7389       return SDValue(N, 0);
7390 
7391     return combineGREVI_GORCI(N, DAG);
7392   }
7393   case RISCVISD::GREVW:
7394   case RISCVISD::GORCW: {
7395     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7396     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7397         SimplifyDemandedLowBitsHelper(1, 5))
7398       return SDValue(N, 0);
7399 
7400     return combineGREVI_GORCI(N, DAG);
7401   }
7402   case RISCVISD::SHFL:
7403   case RISCVISD::UNSHFL: {
7404     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
7405     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7406     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7407     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
7408       return SDValue(N, 0);
7409 
7410     break;
7411   }
7412   case RISCVISD::SHFLW:
7413   case RISCVISD::UNSHFLW: {
7414     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
7415     SDValue LHS = N->getOperand(0);
7416     SDValue RHS = N->getOperand(1);
7417     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
7418     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
7419     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7420         SimplifyDemandedLowBitsHelper(1, 4))
7421       return SDValue(N, 0);
7422 
7423     break;
7424   }
7425   case RISCVISD::BCOMPRESSW:
7426   case RISCVISD::BDECOMPRESSW: {
7427     // Only the lower 32 bits of LHS and RHS are read.
7428     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7429         SimplifyDemandedLowBitsHelper(1, 32))
7430       return SDValue(N, 0);
7431 
7432     break;
7433   }
7434   case RISCVISD::FMV_X_ANYEXTH:
7435   case RISCVISD::FMV_X_ANYEXTW_RV64: {
7436     SDLoc DL(N);
7437     SDValue Op0 = N->getOperand(0);
7438     MVT VT = N->getSimpleValueType(0);
7439     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
7440     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
7441     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
7442     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
7443          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
7444         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7445          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
7446       assert(Op0.getOperand(0).getValueType() == VT &&
7447              "Unexpected value type!");
7448       return Op0.getOperand(0);
7449     }
7450 
7451     // This is a target-specific version of a DAGCombine performed in
7452     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7453     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7454     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7455     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7456         !Op0.getNode()->hasOneUse())
7457       break;
7458     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
7459     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
7460     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
7461     if (Op0.getOpcode() == ISD::FNEG)
7462       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
7463                          DAG.getConstant(SignBit, DL, VT));
7464 
7465     assert(Op0.getOpcode() == ISD::FABS);
7466     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
7467                        DAG.getConstant(~SignBit, DL, VT));
7468   }
7469   case ISD::ADD:
7470     return performADDCombine(N, DAG, Subtarget);
7471   case ISD::SUB:
7472     return performSUBCombine(N, DAG);
7473   case ISD::AND:
7474     return performANDCombine(N, DAG);
7475   case ISD::OR:
7476     return performORCombine(N, DAG, Subtarget);
7477   case ISD::XOR:
7478     return performXORCombine(N, DAG);
7479   case ISD::ANY_EXTEND:
7480     return performANY_EXTENDCombine(N, DCI, Subtarget);
7481   case ISD::ZERO_EXTEND:
7482     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
7483     // type legalization. This is safe because fp_to_uint produces poison if
7484     // it overflows.
7485     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
7486       SDValue Src = N->getOperand(0);
7487       if (Src.getOpcode() == ISD::FP_TO_UINT &&
7488           isTypeLegal(Src.getOperand(0).getValueType()))
7489         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
7490                            Src.getOperand(0));
7491       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
7492           isTypeLegal(Src.getOperand(1).getValueType())) {
7493         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
7494         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
7495                                   Src.getOperand(0), Src.getOperand(1));
7496         DCI.CombineTo(N, Res);
7497         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
7498         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
7499         return SDValue(N, 0); // Return N so it doesn't get rechecked.
7500       }
7501     }
7502     return SDValue();
7503   case RISCVISD::SELECT_CC: {
7504     // Transform
7505     SDValue LHS = N->getOperand(0);
7506     SDValue RHS = N->getOperand(1);
7507     SDValue TrueV = N->getOperand(3);
7508     SDValue FalseV = N->getOperand(4);
7509 
7510     // If the True and False values are the same, we don't need a select_cc.
7511     if (TrueV == FalseV)
7512       return TrueV;
7513 
7514     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
7515     if (!ISD::isIntEqualitySetCC(CCVal))
7516       break;
7517 
7518     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
7519     //      (select_cc X, Y, lt, trueV, falseV)
7520     // Sometimes the setcc is introduced after select_cc has been formed.
7521     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7522         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7523       // If we're looking for eq 0 instead of ne 0, we need to invert the
7524       // condition.
7525       bool Invert = CCVal == ISD::SETEQ;
7526       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7527       if (Invert)
7528         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7529 
7530       SDLoc DL(N);
7531       RHS = LHS.getOperand(1);
7532       LHS = LHS.getOperand(0);
7533       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7534 
7535       SDValue TargetCC = DAG.getCondCode(CCVal);
7536       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7537                          {LHS, RHS, TargetCC, TrueV, FalseV});
7538     }
7539 
7540     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
7541     //      (select_cc X, Y, eq/ne, trueV, falseV)
7542     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7543       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
7544                          {LHS.getOperand(0), LHS.getOperand(1),
7545                           N->getOperand(2), TrueV, FalseV});
7546     // (select_cc X, 1, setne, trueV, falseV) ->
7547     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
7548     // This can occur when legalizing some floating point comparisons.
7549     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7550     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7551       SDLoc DL(N);
7552       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7553       SDValue TargetCC = DAG.getCondCode(CCVal);
7554       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7555       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7556                          {LHS, RHS, TargetCC, TrueV, FalseV});
7557     }
7558 
7559     break;
7560   }
7561   case RISCVISD::BR_CC: {
7562     SDValue LHS = N->getOperand(1);
7563     SDValue RHS = N->getOperand(2);
7564     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
7565     if (!ISD::isIntEqualitySetCC(CCVal))
7566       break;
7567 
7568     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
7569     //      (br_cc X, Y, lt, dest)
7570     // Sometimes the setcc is introduced after br_cc has been formed.
7571     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7572         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7573       // If we're looking for eq 0 instead of ne 0, we need to invert the
7574       // condition.
7575       bool Invert = CCVal == ISD::SETEQ;
7576       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7577       if (Invert)
7578         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7579 
7580       SDLoc DL(N);
7581       RHS = LHS.getOperand(1);
7582       LHS = LHS.getOperand(0);
7583       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7584 
7585       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7586                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
7587                          N->getOperand(4));
7588     }
7589 
7590     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
7591     //      (br_cc X, Y, eq/ne, trueV, falseV)
7592     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7593       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
7594                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
7595                          N->getOperand(3), N->getOperand(4));
7596 
7597     // (br_cc X, 1, setne, br_cc) ->
7598     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
7599     // This can occur when legalizing some floating point comparisons.
7600     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7601     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7602       SDLoc DL(N);
7603       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7604       SDValue TargetCC = DAG.getCondCode(CCVal);
7605       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7606       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7607                          N->getOperand(0), LHS, RHS, TargetCC,
7608                          N->getOperand(4));
7609     }
7610     break;
7611   }
7612   case ISD::FP_TO_SINT:
7613   case ISD::FP_TO_UINT:
7614     return performFP_TO_INTCombine(N, DCI, Subtarget);
7615   case ISD::FP_TO_SINT_SAT:
7616   case ISD::FP_TO_UINT_SAT:
7617     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
7618   case ISD::FCOPYSIGN: {
7619     EVT VT = N->getValueType(0);
7620     if (!VT.isVector())
7621       break;
7622     // There is a form of VFSGNJ which injects the negated sign of its second
7623     // operand. Try and bubble any FNEG up after the extend/round to produce
7624     // this optimized pattern. Avoid modifying cases where FP_ROUND and
7625     // TRUNC=1.
7626     SDValue In2 = N->getOperand(1);
7627     // Avoid cases where the extend/round has multiple uses, as duplicating
7628     // those is typically more expensive than removing a fneg.
7629     if (!In2.hasOneUse())
7630       break;
7631     if (In2.getOpcode() != ISD::FP_EXTEND &&
7632         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
7633       break;
7634     In2 = In2.getOperand(0);
7635     if (In2.getOpcode() != ISD::FNEG)
7636       break;
7637     SDLoc DL(N);
7638     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
7639     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
7640                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
7641   }
7642   case ISD::MGATHER:
7643   case ISD::MSCATTER:
7644   case ISD::VP_GATHER:
7645   case ISD::VP_SCATTER: {
7646     if (!DCI.isBeforeLegalize())
7647       break;
7648     SDValue Index, ScaleOp;
7649     bool IsIndexScaled = false;
7650     bool IsIndexSigned = false;
7651     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
7652       Index = VPGSN->getIndex();
7653       ScaleOp = VPGSN->getScale();
7654       IsIndexScaled = VPGSN->isIndexScaled();
7655       IsIndexSigned = VPGSN->isIndexSigned();
7656     } else {
7657       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
7658       Index = MGSN->getIndex();
7659       ScaleOp = MGSN->getScale();
7660       IsIndexScaled = MGSN->isIndexScaled();
7661       IsIndexSigned = MGSN->isIndexSigned();
7662     }
7663     EVT IndexVT = Index.getValueType();
7664     MVT XLenVT = Subtarget.getXLenVT();
7665     // RISCV indexed loads only support the "unsigned unscaled" addressing
7666     // mode, so anything else must be manually legalized.
7667     bool NeedsIdxLegalization =
7668         IsIndexScaled ||
7669         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
7670     if (!NeedsIdxLegalization)
7671       break;
7672 
7673     SDLoc DL(N);
7674 
7675     // Any index legalization should first promote to XLenVT, so we don't lose
7676     // bits when scaling. This may create an illegal index type so we let
7677     // LLVM's legalization take care of the splitting.
7678     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
7679     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
7680       IndexVT = IndexVT.changeVectorElementType(XLenVT);
7681       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
7682                           DL, IndexVT, Index);
7683     }
7684 
7685     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
7686     if (IsIndexScaled && Scale != 1) {
7687       // Manually scale the indices by the element size.
7688       // TODO: Sanitize the scale operand here?
7689       // TODO: For VP nodes, should we use VP_SHL here?
7690       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
7691       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
7692       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
7693     }
7694 
7695     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
7696     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
7697       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
7698                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
7699                               VPGN->getScale(), VPGN->getMask(),
7700                               VPGN->getVectorLength()},
7701                              VPGN->getMemOperand(), NewIndexTy);
7702     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
7703       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
7704                               {VPSN->getChain(), VPSN->getValue(),
7705                                VPSN->getBasePtr(), Index, VPSN->getScale(),
7706                                VPSN->getMask(), VPSN->getVectorLength()},
7707                               VPSN->getMemOperand(), NewIndexTy);
7708     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
7709       return DAG.getMaskedGather(
7710           N->getVTList(), MGN->getMemoryVT(), DL,
7711           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
7712            MGN->getBasePtr(), Index, MGN->getScale()},
7713           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
7714     const auto *MSN = cast<MaskedScatterSDNode>(N);
7715     return DAG.getMaskedScatter(
7716         N->getVTList(), MSN->getMemoryVT(), DL,
7717         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
7718          Index, MSN->getScale()},
7719         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
7720   }
7721   case RISCVISD::SRA_VL:
7722   case RISCVISD::SRL_VL:
7723   case RISCVISD::SHL_VL: {
7724     SDValue ShAmt = N->getOperand(1);
7725     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7726       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7727       SDLoc DL(N);
7728       SDValue VL = N->getOperand(3);
7729       EVT VT = N->getValueType(0);
7730       ShAmt =
7731           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
7732       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
7733                          N->getOperand(2), N->getOperand(3));
7734     }
7735     break;
7736   }
7737   case ISD::SRA:
7738   case ISD::SRL:
7739   case ISD::SHL: {
7740     SDValue ShAmt = N->getOperand(1);
7741     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7742       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7743       SDLoc DL(N);
7744       EVT VT = N->getValueType(0);
7745       ShAmt =
7746           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
7747       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
7748     }
7749     break;
7750   }
7751   case RISCVISD::MUL_VL: {
7752     SDValue Op0 = N->getOperand(0);
7753     SDValue Op1 = N->getOperand(1);
7754     if (SDValue V = combineMUL_VLToVWMUL(N, Op0, Op1, DAG))
7755       return V;
7756     if (SDValue V = combineMUL_VLToVWMUL(N, Op1, Op0, DAG))
7757       return V;
7758     return SDValue();
7759   }
7760   case ISD::STORE: {
7761     auto *Store = cast<StoreSDNode>(N);
7762     SDValue Val = Store->getValue();
7763     // Combine store of vmv.x.s to vse with VL of 1.
7764     // FIXME: Support FP.
7765     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
7766       SDValue Src = Val.getOperand(0);
7767       EVT VecVT = Src.getValueType();
7768       EVT MemVT = Store->getMemoryVT();
7769       // The memory VT and the element type must match.
7770       if (VecVT.getVectorElementType() == MemVT) {
7771         SDLoc DL(N);
7772         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7773         return DAG.getStoreVP(
7774             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
7775             DAG.getConstant(1, DL, MaskVT),
7776             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
7777             Store->getMemOperand(), Store->getAddressingMode(),
7778             Store->isTruncatingStore(), /*IsCompress*/ false);
7779       }
7780     }
7781 
7782     break;
7783   }
7784   }
7785 
7786   return SDValue();
7787 }
7788 
7789 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
7790     const SDNode *N, CombineLevel Level) const {
7791   // The following folds are only desirable if `(OP _, c1 << c2)` can be
7792   // materialised in fewer instructions than `(OP _, c1)`:
7793   //
7794   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
7795   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
7796   SDValue N0 = N->getOperand(0);
7797   EVT Ty = N0.getValueType();
7798   if (Ty.isScalarInteger() &&
7799       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
7800     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7801     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
7802     if (C1 && C2) {
7803       const APInt &C1Int = C1->getAPIntValue();
7804       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
7805 
7806       // We can materialise `c1 << c2` into an add immediate, so it's "free",
7807       // and the combine should happen, to potentially allow further combines
7808       // later.
7809       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
7810           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
7811         return true;
7812 
7813       // We can materialise `c1` in an add immediate, so it's "free", and the
7814       // combine should be prevented.
7815       if (C1Int.getMinSignedBits() <= 64 &&
7816           isLegalAddImmediate(C1Int.getSExtValue()))
7817         return false;
7818 
7819       // Neither constant will fit into an immediate, so find materialisation
7820       // costs.
7821       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
7822                                               Subtarget.getFeatureBits(),
7823                                               /*CompressionCost*/true);
7824       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
7825           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
7826           /*CompressionCost*/true);
7827 
7828       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
7829       // combine should be prevented.
7830       if (C1Cost < ShiftedC1Cost)
7831         return false;
7832     }
7833   }
7834   return true;
7835 }
7836 
7837 bool RISCVTargetLowering::targetShrinkDemandedConstant(
7838     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7839     TargetLoweringOpt &TLO) const {
7840   // Delay this optimization as late as possible.
7841   if (!TLO.LegalOps)
7842     return false;
7843 
7844   EVT VT = Op.getValueType();
7845   if (VT.isVector())
7846     return false;
7847 
7848   // Only handle AND for now.
7849   if (Op.getOpcode() != ISD::AND)
7850     return false;
7851 
7852   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
7853   if (!C)
7854     return false;
7855 
7856   const APInt &Mask = C->getAPIntValue();
7857 
7858   // Clear all non-demanded bits initially.
7859   APInt ShrunkMask = Mask & DemandedBits;
7860 
7861   // Try to make a smaller immediate by setting undemanded bits.
7862 
7863   APInt ExpandedMask = Mask | ~DemandedBits;
7864 
7865   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
7866     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
7867   };
7868   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
7869     if (NewMask == Mask)
7870       return true;
7871     SDLoc DL(Op);
7872     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
7873     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
7874     return TLO.CombineTo(Op, NewOp);
7875   };
7876 
7877   // If the shrunk mask fits in sign extended 12 bits, let the target
7878   // independent code apply it.
7879   if (ShrunkMask.isSignedIntN(12))
7880     return false;
7881 
7882   // Preserve (and X, 0xffff) when zext.h is supported.
7883   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
7884     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
7885     if (IsLegalMask(NewMask))
7886       return UseMask(NewMask);
7887   }
7888 
7889   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
7890   if (VT == MVT::i64) {
7891     APInt NewMask = APInt(64, 0xffffffff);
7892     if (IsLegalMask(NewMask))
7893       return UseMask(NewMask);
7894   }
7895 
7896   // For the remaining optimizations, we need to be able to make a negative
7897   // number through a combination of mask and undemanded bits.
7898   if (!ExpandedMask.isNegative())
7899     return false;
7900 
7901   // What is the fewest number of bits we need to represent the negative number.
7902   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
7903 
7904   // Try to make a 12 bit negative immediate. If that fails try to make a 32
7905   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
7906   APInt NewMask = ShrunkMask;
7907   if (MinSignedBits <= 12)
7908     NewMask.setBitsFrom(11);
7909   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
7910     NewMask.setBitsFrom(31);
7911   else
7912     return false;
7913 
7914   // Check that our new mask is a subset of the demanded mask.
7915   assert(IsLegalMask(NewMask));
7916   return UseMask(NewMask);
7917 }
7918 
7919 static void computeGREV(APInt &Src, unsigned ShAmt) {
7920   ShAmt &= Src.getBitWidth() - 1;
7921   uint64_t x = Src.getZExtValue();
7922   if (ShAmt & 1)
7923     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
7924   if (ShAmt & 2)
7925     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
7926   if (ShAmt & 4)
7927     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
7928   if (ShAmt & 8)
7929     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
7930   if (ShAmt & 16)
7931     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
7932   if (ShAmt & 32)
7933     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
7934   Src = x;
7935 }
7936 
7937 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
7938                                                         KnownBits &Known,
7939                                                         const APInt &DemandedElts,
7940                                                         const SelectionDAG &DAG,
7941                                                         unsigned Depth) const {
7942   unsigned BitWidth = Known.getBitWidth();
7943   unsigned Opc = Op.getOpcode();
7944   assert((Opc >= ISD::BUILTIN_OP_END ||
7945           Opc == ISD::INTRINSIC_WO_CHAIN ||
7946           Opc == ISD::INTRINSIC_W_CHAIN ||
7947           Opc == ISD::INTRINSIC_VOID) &&
7948          "Should use MaskedValueIsZero if you don't know whether Op"
7949          " is a target node!");
7950 
7951   Known.resetAll();
7952   switch (Opc) {
7953   default: break;
7954   case RISCVISD::SELECT_CC: {
7955     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
7956     // If we don't know any bits, early out.
7957     if (Known.isUnknown())
7958       break;
7959     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
7960 
7961     // Only known if known in both the LHS and RHS.
7962     Known = KnownBits::commonBits(Known, Known2);
7963     break;
7964   }
7965   case RISCVISD::REMUW: {
7966     KnownBits Known2;
7967     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7968     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7969     // We only care about the lower 32 bits.
7970     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
7971     // Restore the original width by sign extending.
7972     Known = Known.sext(BitWidth);
7973     break;
7974   }
7975   case RISCVISD::DIVUW: {
7976     KnownBits Known2;
7977     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7978     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7979     // We only care about the lower 32 bits.
7980     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
7981     // Restore the original width by sign extending.
7982     Known = Known.sext(BitWidth);
7983     break;
7984   }
7985   case RISCVISD::CTZW: {
7986     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7987     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
7988     unsigned LowBits = Log2_32(PossibleTZ) + 1;
7989     Known.Zero.setBitsFrom(LowBits);
7990     break;
7991   }
7992   case RISCVISD::CLZW: {
7993     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7994     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
7995     unsigned LowBits = Log2_32(PossibleLZ) + 1;
7996     Known.Zero.setBitsFrom(LowBits);
7997     break;
7998   }
7999   case RISCVISD::GREV:
8000   case RISCVISD::GREVW: {
8001     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8002       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8003       if (Opc == RISCVISD::GREVW)
8004         Known = Known.trunc(32);
8005       unsigned ShAmt = C->getZExtValue();
8006       computeGREV(Known.Zero, ShAmt);
8007       computeGREV(Known.One, ShAmt);
8008       if (Opc == RISCVISD::GREVW)
8009         Known = Known.sext(BitWidth);
8010     }
8011     break;
8012   }
8013   case RISCVISD::READ_VLENB:
8014     // We assume VLENB is at least 16 bytes.
8015     Known.Zero.setLowBits(4);
8016     // We assume VLENB is no more than 65536 / 8 bytes.
8017     Known.Zero.setBitsFrom(14);
8018     break;
8019   case ISD::INTRINSIC_W_CHAIN: {
8020     unsigned IntNo = Op.getConstantOperandVal(1);
8021     switch (IntNo) {
8022     default:
8023       // We can't do anything for most intrinsics.
8024       break;
8025     case Intrinsic::riscv_vsetvli:
8026     case Intrinsic::riscv_vsetvlimax:
8027       // Assume that VL output is positive and would fit in an int32_t.
8028       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8029       if (BitWidth >= 32)
8030         Known.Zero.setBitsFrom(31);
8031       break;
8032     }
8033     break;
8034   }
8035   }
8036 }
8037 
8038 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8039     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8040     unsigned Depth) const {
8041   switch (Op.getOpcode()) {
8042   default:
8043     break;
8044   case RISCVISD::SELECT_CC: {
8045     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8046     if (Tmp == 1) return 1;  // Early out.
8047     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8048     return std::min(Tmp, Tmp2);
8049   }
8050   case RISCVISD::SLLW:
8051   case RISCVISD::SRAW:
8052   case RISCVISD::SRLW:
8053   case RISCVISD::DIVW:
8054   case RISCVISD::DIVUW:
8055   case RISCVISD::REMUW:
8056   case RISCVISD::ROLW:
8057   case RISCVISD::RORW:
8058   case RISCVISD::GREVW:
8059   case RISCVISD::GORCW:
8060   case RISCVISD::FSLW:
8061   case RISCVISD::FSRW:
8062   case RISCVISD::SHFLW:
8063   case RISCVISD::UNSHFLW:
8064   case RISCVISD::BCOMPRESSW:
8065   case RISCVISD::BDECOMPRESSW:
8066   case RISCVISD::BFPW:
8067   case RISCVISD::FCVT_W_RV64:
8068   case RISCVISD::FCVT_WU_RV64:
8069   case RISCVISD::STRICT_FCVT_W_RV64:
8070   case RISCVISD::STRICT_FCVT_WU_RV64:
8071     // TODO: As the result is sign-extended, this is conservatively correct. A
8072     // more precise answer could be calculated for SRAW depending on known
8073     // bits in the shift amount.
8074     return 33;
8075   case RISCVISD::SHFL:
8076   case RISCVISD::UNSHFL: {
8077     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8078     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8079     // will stay within the upper 32 bits. If there were more than 32 sign bits
8080     // before there will be at least 33 sign bits after.
8081     if (Op.getValueType() == MVT::i64 &&
8082         isa<ConstantSDNode>(Op.getOperand(1)) &&
8083         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8084       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8085       if (Tmp > 32)
8086         return 33;
8087     }
8088     break;
8089   }
8090   case RISCVISD::VMV_X_S:
8091     // The number of sign bits of the scalar result is computed by obtaining the
8092     // element type of the input vector operand, subtracting its width from the
8093     // XLEN, and then adding one (sign bit within the element type). If the
8094     // element type is wider than XLen, the least-significant XLEN bits are
8095     // taken.
8096     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
8097       return 1;
8098     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
8099   }
8100 
8101   return 1;
8102 }
8103 
8104 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
8105                                                   MachineBasicBlock *BB) {
8106   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
8107 
8108   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
8109   // Should the count have wrapped while it was being read, we need to try
8110   // again.
8111   // ...
8112   // read:
8113   // rdcycleh x3 # load high word of cycle
8114   // rdcycle  x2 # load low word of cycle
8115   // rdcycleh x4 # load high word of cycle
8116   // bne x3, x4, read # check if high word reads match, otherwise try again
8117   // ...
8118 
8119   MachineFunction &MF = *BB->getParent();
8120   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8121   MachineFunction::iterator It = ++BB->getIterator();
8122 
8123   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8124   MF.insert(It, LoopMBB);
8125 
8126   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8127   MF.insert(It, DoneMBB);
8128 
8129   // Transfer the remainder of BB and its successor edges to DoneMBB.
8130   DoneMBB->splice(DoneMBB->begin(), BB,
8131                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8132   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
8133 
8134   BB->addSuccessor(LoopMBB);
8135 
8136   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8137   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8138   Register LoReg = MI.getOperand(0).getReg();
8139   Register HiReg = MI.getOperand(1).getReg();
8140   DebugLoc DL = MI.getDebugLoc();
8141 
8142   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
8143   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
8144       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8145       .addReg(RISCV::X0);
8146   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
8147       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
8148       .addReg(RISCV::X0);
8149   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
8150       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8151       .addReg(RISCV::X0);
8152 
8153   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
8154       .addReg(HiReg)
8155       .addReg(ReadAgainReg)
8156       .addMBB(LoopMBB);
8157 
8158   LoopMBB->addSuccessor(LoopMBB);
8159   LoopMBB->addSuccessor(DoneMBB);
8160 
8161   MI.eraseFromParent();
8162 
8163   return DoneMBB;
8164 }
8165 
8166 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
8167                                              MachineBasicBlock *BB) {
8168   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
8169 
8170   MachineFunction &MF = *BB->getParent();
8171   DebugLoc DL = MI.getDebugLoc();
8172   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8173   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8174   Register LoReg = MI.getOperand(0).getReg();
8175   Register HiReg = MI.getOperand(1).getReg();
8176   Register SrcReg = MI.getOperand(2).getReg();
8177   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
8178   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8179 
8180   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
8181                           RI);
8182   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8183   MachineMemOperand *MMOLo =
8184       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
8185   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8186       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
8187   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
8188       .addFrameIndex(FI)
8189       .addImm(0)
8190       .addMemOperand(MMOLo);
8191   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
8192       .addFrameIndex(FI)
8193       .addImm(4)
8194       .addMemOperand(MMOHi);
8195   MI.eraseFromParent(); // The pseudo instruction is gone now.
8196   return BB;
8197 }
8198 
8199 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
8200                                                  MachineBasicBlock *BB) {
8201   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
8202          "Unexpected instruction");
8203 
8204   MachineFunction &MF = *BB->getParent();
8205   DebugLoc DL = MI.getDebugLoc();
8206   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8207   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8208   Register DstReg = MI.getOperand(0).getReg();
8209   Register LoReg = MI.getOperand(1).getReg();
8210   Register HiReg = MI.getOperand(2).getReg();
8211   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
8212   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8213 
8214   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8215   MachineMemOperand *MMOLo =
8216       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
8217   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8218       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
8219   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8220       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
8221       .addFrameIndex(FI)
8222       .addImm(0)
8223       .addMemOperand(MMOLo);
8224   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8225       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
8226       .addFrameIndex(FI)
8227       .addImm(4)
8228       .addMemOperand(MMOHi);
8229   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
8230   MI.eraseFromParent(); // The pseudo instruction is gone now.
8231   return BB;
8232 }
8233 
8234 static bool isSelectPseudo(MachineInstr &MI) {
8235   switch (MI.getOpcode()) {
8236   default:
8237     return false;
8238   case RISCV::Select_GPR_Using_CC_GPR:
8239   case RISCV::Select_FPR16_Using_CC_GPR:
8240   case RISCV::Select_FPR32_Using_CC_GPR:
8241   case RISCV::Select_FPR64_Using_CC_GPR:
8242     return true;
8243   }
8244 }
8245 
8246 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
8247                                         unsigned RelOpcode, unsigned EqOpcode,
8248                                         const RISCVSubtarget &Subtarget) {
8249   DebugLoc DL = MI.getDebugLoc();
8250   Register DstReg = MI.getOperand(0).getReg();
8251   Register Src1Reg = MI.getOperand(1).getReg();
8252   Register Src2Reg = MI.getOperand(2).getReg();
8253   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
8254   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
8255   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
8256 
8257   // Save the current FFLAGS.
8258   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
8259 
8260   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
8261                  .addReg(Src1Reg)
8262                  .addReg(Src2Reg);
8263   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8264     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
8265 
8266   // Restore the FFLAGS.
8267   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
8268       .addReg(SavedFFlags, RegState::Kill);
8269 
8270   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
8271   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
8272                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
8273                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
8274   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8275     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
8276 
8277   // Erase the pseudoinstruction.
8278   MI.eraseFromParent();
8279   return BB;
8280 }
8281 
8282 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
8283                                            MachineBasicBlock *BB,
8284                                            const RISCVSubtarget &Subtarget) {
8285   // To "insert" Select_* instructions, we actually have to insert the triangle
8286   // control-flow pattern.  The incoming instructions know the destination vreg
8287   // to set, the condition code register to branch on, the true/false values to
8288   // select between, and the condcode to use to select the appropriate branch.
8289   //
8290   // We produce the following control flow:
8291   //     HeadMBB
8292   //     |  \
8293   //     |  IfFalseMBB
8294   //     | /
8295   //    TailMBB
8296   //
8297   // When we find a sequence of selects we attempt to optimize their emission
8298   // by sharing the control flow. Currently we only handle cases where we have
8299   // multiple selects with the exact same condition (same LHS, RHS and CC).
8300   // The selects may be interleaved with other instructions if the other
8301   // instructions meet some requirements we deem safe:
8302   // - They are debug instructions. Otherwise,
8303   // - They do not have side-effects, do not access memory and their inputs do
8304   //   not depend on the results of the select pseudo-instructions.
8305   // The TrueV/FalseV operands of the selects cannot depend on the result of
8306   // previous selects in the sequence.
8307   // These conditions could be further relaxed. See the X86 target for a
8308   // related approach and more information.
8309   Register LHS = MI.getOperand(1).getReg();
8310   Register RHS = MI.getOperand(2).getReg();
8311   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
8312 
8313   SmallVector<MachineInstr *, 4> SelectDebugValues;
8314   SmallSet<Register, 4> SelectDests;
8315   SelectDests.insert(MI.getOperand(0).getReg());
8316 
8317   MachineInstr *LastSelectPseudo = &MI;
8318 
8319   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
8320        SequenceMBBI != E; ++SequenceMBBI) {
8321     if (SequenceMBBI->isDebugInstr())
8322       continue;
8323     else if (isSelectPseudo(*SequenceMBBI)) {
8324       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
8325           SequenceMBBI->getOperand(2).getReg() != RHS ||
8326           SequenceMBBI->getOperand(3).getImm() != CC ||
8327           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
8328           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
8329         break;
8330       LastSelectPseudo = &*SequenceMBBI;
8331       SequenceMBBI->collectDebugValues(SelectDebugValues);
8332       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
8333     } else {
8334       if (SequenceMBBI->hasUnmodeledSideEffects() ||
8335           SequenceMBBI->mayLoadOrStore())
8336         break;
8337       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
8338             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
8339           }))
8340         break;
8341     }
8342   }
8343 
8344   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
8345   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8346   DebugLoc DL = MI.getDebugLoc();
8347   MachineFunction::iterator I = ++BB->getIterator();
8348 
8349   MachineBasicBlock *HeadMBB = BB;
8350   MachineFunction *F = BB->getParent();
8351   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
8352   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
8353 
8354   F->insert(I, IfFalseMBB);
8355   F->insert(I, TailMBB);
8356 
8357   // Transfer debug instructions associated with the selects to TailMBB.
8358   for (MachineInstr *DebugInstr : SelectDebugValues) {
8359     TailMBB->push_back(DebugInstr->removeFromParent());
8360   }
8361 
8362   // Move all instructions after the sequence to TailMBB.
8363   TailMBB->splice(TailMBB->end(), HeadMBB,
8364                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
8365   // Update machine-CFG edges by transferring all successors of the current
8366   // block to the new block which will contain the Phi nodes for the selects.
8367   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
8368   // Set the successors for HeadMBB.
8369   HeadMBB->addSuccessor(IfFalseMBB);
8370   HeadMBB->addSuccessor(TailMBB);
8371 
8372   // Insert appropriate branch.
8373   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
8374     .addReg(LHS)
8375     .addReg(RHS)
8376     .addMBB(TailMBB);
8377 
8378   // IfFalseMBB just falls through to TailMBB.
8379   IfFalseMBB->addSuccessor(TailMBB);
8380 
8381   // Create PHIs for all of the select pseudo-instructions.
8382   auto SelectMBBI = MI.getIterator();
8383   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
8384   auto InsertionPoint = TailMBB->begin();
8385   while (SelectMBBI != SelectEnd) {
8386     auto Next = std::next(SelectMBBI);
8387     if (isSelectPseudo(*SelectMBBI)) {
8388       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
8389       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
8390               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
8391           .addReg(SelectMBBI->getOperand(4).getReg())
8392           .addMBB(HeadMBB)
8393           .addReg(SelectMBBI->getOperand(5).getReg())
8394           .addMBB(IfFalseMBB);
8395       SelectMBBI->eraseFromParent();
8396     }
8397     SelectMBBI = Next;
8398   }
8399 
8400   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
8401   return TailMBB;
8402 }
8403 
8404 MachineBasicBlock *
8405 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8406                                                  MachineBasicBlock *BB) const {
8407   switch (MI.getOpcode()) {
8408   default:
8409     llvm_unreachable("Unexpected instr type to insert");
8410   case RISCV::ReadCycleWide:
8411     assert(!Subtarget.is64Bit() &&
8412            "ReadCycleWrite is only to be used on riscv32");
8413     return emitReadCycleWidePseudo(MI, BB);
8414   case RISCV::Select_GPR_Using_CC_GPR:
8415   case RISCV::Select_FPR16_Using_CC_GPR:
8416   case RISCV::Select_FPR32_Using_CC_GPR:
8417   case RISCV::Select_FPR64_Using_CC_GPR:
8418     return emitSelectPseudo(MI, BB, Subtarget);
8419   case RISCV::BuildPairF64Pseudo:
8420     return emitBuildPairF64Pseudo(MI, BB);
8421   case RISCV::SplitF64Pseudo:
8422     return emitSplitF64Pseudo(MI, BB);
8423   case RISCV::PseudoQuietFLE_H:
8424     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
8425   case RISCV::PseudoQuietFLT_H:
8426     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
8427   case RISCV::PseudoQuietFLE_S:
8428     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
8429   case RISCV::PseudoQuietFLT_S:
8430     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
8431   case RISCV::PseudoQuietFLE_D:
8432     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
8433   case RISCV::PseudoQuietFLT_D:
8434     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
8435   }
8436 }
8437 
8438 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
8439                                                         SDNode *Node) const {
8440   // Add FRM dependency to any instructions with dynamic rounding mode.
8441   unsigned Opc = MI.getOpcode();
8442   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
8443   if (Idx < 0)
8444     return;
8445   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
8446     return;
8447   // If the instruction already reads FRM, don't add another read.
8448   if (MI.readsRegister(RISCV::FRM))
8449     return;
8450   MI.addOperand(
8451       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
8452 }
8453 
8454 // Calling Convention Implementation.
8455 // The expectations for frontend ABI lowering vary from target to target.
8456 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
8457 // details, but this is a longer term goal. For now, we simply try to keep the
8458 // role of the frontend as simple and well-defined as possible. The rules can
8459 // be summarised as:
8460 // * Never split up large scalar arguments. We handle them here.
8461 // * If a hardfloat calling convention is being used, and the struct may be
8462 // passed in a pair of registers (fp+fp, int+fp), and both registers are
8463 // available, then pass as two separate arguments. If either the GPRs or FPRs
8464 // are exhausted, then pass according to the rule below.
8465 // * If a struct could never be passed in registers or directly in a stack
8466 // slot (as it is larger than 2*XLEN and the floating point rules don't
8467 // apply), then pass it using a pointer with the byval attribute.
8468 // * If a struct is less than 2*XLEN, then coerce to either a two-element
8469 // word-sized array or a 2*XLEN scalar (depending on alignment).
8470 // * The frontend can determine whether a struct is returned by reference or
8471 // not based on its size and fields. If it will be returned by reference, the
8472 // frontend must modify the prototype so a pointer with the sret annotation is
8473 // passed as the first argument. This is not necessary for large scalar
8474 // returns.
8475 // * Struct return values and varargs should be coerced to structs containing
8476 // register-size fields in the same situations they would be for fixed
8477 // arguments.
8478 
8479 static const MCPhysReg ArgGPRs[] = {
8480   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
8481   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
8482 };
8483 static const MCPhysReg ArgFPR16s[] = {
8484   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
8485   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
8486 };
8487 static const MCPhysReg ArgFPR32s[] = {
8488   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
8489   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
8490 };
8491 static const MCPhysReg ArgFPR64s[] = {
8492   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
8493   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
8494 };
8495 // This is an interim calling convention and it may be changed in the future.
8496 static const MCPhysReg ArgVRs[] = {
8497     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
8498     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
8499     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
8500 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
8501                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
8502                                      RISCV::V20M2, RISCV::V22M2};
8503 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
8504                                      RISCV::V20M4};
8505 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
8506 
8507 // Pass a 2*XLEN argument that has been split into two XLEN values through
8508 // registers or the stack as necessary.
8509 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
8510                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
8511                                 MVT ValVT2, MVT LocVT2,
8512                                 ISD::ArgFlagsTy ArgFlags2) {
8513   unsigned XLenInBytes = XLen / 8;
8514   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8515     // At least one half can be passed via register.
8516     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
8517                                      VA1.getLocVT(), CCValAssign::Full));
8518   } else {
8519     // Both halves must be passed on the stack, with proper alignment.
8520     Align StackAlign =
8521         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
8522     State.addLoc(
8523         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
8524                             State.AllocateStack(XLenInBytes, StackAlign),
8525                             VA1.getLocVT(), CCValAssign::Full));
8526     State.addLoc(CCValAssign::getMem(
8527         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8528         LocVT2, CCValAssign::Full));
8529     return false;
8530   }
8531 
8532   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8533     // The second half can also be passed via register.
8534     State.addLoc(
8535         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
8536   } else {
8537     // The second half is passed via the stack, without additional alignment.
8538     State.addLoc(CCValAssign::getMem(
8539         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8540         LocVT2, CCValAssign::Full));
8541   }
8542 
8543   return false;
8544 }
8545 
8546 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
8547                                Optional<unsigned> FirstMaskArgument,
8548                                CCState &State, const RISCVTargetLowering &TLI) {
8549   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
8550   if (RC == &RISCV::VRRegClass) {
8551     // Assign the first mask argument to V0.
8552     // This is an interim calling convention and it may be changed in the
8553     // future.
8554     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
8555       return State.AllocateReg(RISCV::V0);
8556     return State.AllocateReg(ArgVRs);
8557   }
8558   if (RC == &RISCV::VRM2RegClass)
8559     return State.AllocateReg(ArgVRM2s);
8560   if (RC == &RISCV::VRM4RegClass)
8561     return State.AllocateReg(ArgVRM4s);
8562   if (RC == &RISCV::VRM8RegClass)
8563     return State.AllocateReg(ArgVRM8s);
8564   llvm_unreachable("Unhandled register class for ValueType");
8565 }
8566 
8567 // Implements the RISC-V calling convention. Returns true upon failure.
8568 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
8569                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
8570                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
8571                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
8572                      Optional<unsigned> FirstMaskArgument) {
8573   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
8574   assert(XLen == 32 || XLen == 64);
8575   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
8576 
8577   // Any return value split in to more than two values can't be returned
8578   // directly. Vectors are returned via the available vector registers.
8579   if (!LocVT.isVector() && IsRet && ValNo > 1)
8580     return true;
8581 
8582   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
8583   // variadic argument, or if no F16/F32 argument registers are available.
8584   bool UseGPRForF16_F32 = true;
8585   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
8586   // variadic argument, or if no F64 argument registers are available.
8587   bool UseGPRForF64 = true;
8588 
8589   switch (ABI) {
8590   default:
8591     llvm_unreachable("Unexpected ABI");
8592   case RISCVABI::ABI_ILP32:
8593   case RISCVABI::ABI_LP64:
8594     break;
8595   case RISCVABI::ABI_ILP32F:
8596   case RISCVABI::ABI_LP64F:
8597     UseGPRForF16_F32 = !IsFixed;
8598     break;
8599   case RISCVABI::ABI_ILP32D:
8600   case RISCVABI::ABI_LP64D:
8601     UseGPRForF16_F32 = !IsFixed;
8602     UseGPRForF64 = !IsFixed;
8603     break;
8604   }
8605 
8606   // FPR16, FPR32, and FPR64 alias each other.
8607   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
8608     UseGPRForF16_F32 = true;
8609     UseGPRForF64 = true;
8610   }
8611 
8612   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
8613   // similar local variables rather than directly checking against the target
8614   // ABI.
8615 
8616   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
8617     LocVT = XLenVT;
8618     LocInfo = CCValAssign::BCvt;
8619   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
8620     LocVT = MVT::i64;
8621     LocInfo = CCValAssign::BCvt;
8622   }
8623 
8624   // If this is a variadic argument, the RISC-V calling convention requires
8625   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
8626   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
8627   // be used regardless of whether the original argument was split during
8628   // legalisation or not. The argument will not be passed by registers if the
8629   // original type is larger than 2*XLEN, so the register alignment rule does
8630   // not apply.
8631   unsigned TwoXLenInBytes = (2 * XLen) / 8;
8632   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
8633       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
8634     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
8635     // Skip 'odd' register if necessary.
8636     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
8637       State.AllocateReg(ArgGPRs);
8638   }
8639 
8640   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
8641   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
8642       State.getPendingArgFlags();
8643 
8644   assert(PendingLocs.size() == PendingArgFlags.size() &&
8645          "PendingLocs and PendingArgFlags out of sync");
8646 
8647   // Handle passing f64 on RV32D with a soft float ABI or when floating point
8648   // registers are exhausted.
8649   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
8650     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
8651            "Can't lower f64 if it is split");
8652     // Depending on available argument GPRS, f64 may be passed in a pair of
8653     // GPRs, split between a GPR and the stack, or passed completely on the
8654     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
8655     // cases.
8656     Register Reg = State.AllocateReg(ArgGPRs);
8657     LocVT = MVT::i32;
8658     if (!Reg) {
8659       unsigned StackOffset = State.AllocateStack(8, Align(8));
8660       State.addLoc(
8661           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8662       return false;
8663     }
8664     if (!State.AllocateReg(ArgGPRs))
8665       State.AllocateStack(4, Align(4));
8666     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8667     return false;
8668   }
8669 
8670   // Fixed-length vectors are located in the corresponding scalable-vector
8671   // container types.
8672   if (ValVT.isFixedLengthVector())
8673     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8674 
8675   // Split arguments might be passed indirectly, so keep track of the pending
8676   // values. Split vectors are passed via a mix of registers and indirectly, so
8677   // treat them as we would any other argument.
8678   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
8679     LocVT = XLenVT;
8680     LocInfo = CCValAssign::Indirect;
8681     PendingLocs.push_back(
8682         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
8683     PendingArgFlags.push_back(ArgFlags);
8684     if (!ArgFlags.isSplitEnd()) {
8685       return false;
8686     }
8687   }
8688 
8689   // If the split argument only had two elements, it should be passed directly
8690   // in registers or on the stack.
8691   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
8692       PendingLocs.size() <= 2) {
8693     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
8694     // Apply the normal calling convention rules to the first half of the
8695     // split argument.
8696     CCValAssign VA = PendingLocs[0];
8697     ISD::ArgFlagsTy AF = PendingArgFlags[0];
8698     PendingLocs.clear();
8699     PendingArgFlags.clear();
8700     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
8701                                ArgFlags);
8702   }
8703 
8704   // Allocate to a register if possible, or else a stack slot.
8705   Register Reg;
8706   unsigned StoreSizeBytes = XLen / 8;
8707   Align StackAlign = Align(XLen / 8);
8708 
8709   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
8710     Reg = State.AllocateReg(ArgFPR16s);
8711   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
8712     Reg = State.AllocateReg(ArgFPR32s);
8713   else if (ValVT == MVT::f64 && !UseGPRForF64)
8714     Reg = State.AllocateReg(ArgFPR64s);
8715   else if (ValVT.isVector()) {
8716     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
8717     if (!Reg) {
8718       // For return values, the vector must be passed fully via registers or
8719       // via the stack.
8720       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
8721       // but we're using all of them.
8722       if (IsRet)
8723         return true;
8724       // Try using a GPR to pass the address
8725       if ((Reg = State.AllocateReg(ArgGPRs))) {
8726         LocVT = XLenVT;
8727         LocInfo = CCValAssign::Indirect;
8728       } else if (ValVT.isScalableVector()) {
8729         LocVT = XLenVT;
8730         LocInfo = CCValAssign::Indirect;
8731       } else {
8732         // Pass fixed-length vectors on the stack.
8733         LocVT = ValVT;
8734         StoreSizeBytes = ValVT.getStoreSize();
8735         // Align vectors to their element sizes, being careful for vXi1
8736         // vectors.
8737         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8738       }
8739     }
8740   } else {
8741     Reg = State.AllocateReg(ArgGPRs);
8742   }
8743 
8744   unsigned StackOffset =
8745       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
8746 
8747   // If we reach this point and PendingLocs is non-empty, we must be at the
8748   // end of a split argument that must be passed indirectly.
8749   if (!PendingLocs.empty()) {
8750     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
8751     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
8752 
8753     for (auto &It : PendingLocs) {
8754       if (Reg)
8755         It.convertToReg(Reg);
8756       else
8757         It.convertToMem(StackOffset);
8758       State.addLoc(It);
8759     }
8760     PendingLocs.clear();
8761     PendingArgFlags.clear();
8762     return false;
8763   }
8764 
8765   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
8766           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
8767          "Expected an XLenVT or vector types at this stage");
8768 
8769   if (Reg) {
8770     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8771     return false;
8772   }
8773 
8774   // When a floating-point value is passed on the stack, no bit-conversion is
8775   // needed.
8776   if (ValVT.isFloatingPoint()) {
8777     LocVT = ValVT;
8778     LocInfo = CCValAssign::Full;
8779   }
8780   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8781   return false;
8782 }
8783 
8784 template <typename ArgTy>
8785 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
8786   for (const auto &ArgIdx : enumerate(Args)) {
8787     MVT ArgVT = ArgIdx.value().VT;
8788     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
8789       return ArgIdx.index();
8790   }
8791   return None;
8792 }
8793 
8794 void RISCVTargetLowering::analyzeInputArgs(
8795     MachineFunction &MF, CCState &CCInfo,
8796     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
8797     RISCVCCAssignFn Fn) const {
8798   unsigned NumArgs = Ins.size();
8799   FunctionType *FType = MF.getFunction().getFunctionType();
8800 
8801   Optional<unsigned> FirstMaskArgument;
8802   if (Subtarget.hasVInstructions())
8803     FirstMaskArgument = preAssignMask(Ins);
8804 
8805   for (unsigned i = 0; i != NumArgs; ++i) {
8806     MVT ArgVT = Ins[i].VT;
8807     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
8808 
8809     Type *ArgTy = nullptr;
8810     if (IsRet)
8811       ArgTy = FType->getReturnType();
8812     else if (Ins[i].isOrigArg())
8813       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
8814 
8815     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8816     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8817            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
8818            FirstMaskArgument)) {
8819       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
8820                         << EVT(ArgVT).getEVTString() << '\n');
8821       llvm_unreachable(nullptr);
8822     }
8823   }
8824 }
8825 
8826 void RISCVTargetLowering::analyzeOutputArgs(
8827     MachineFunction &MF, CCState &CCInfo,
8828     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
8829     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
8830   unsigned NumArgs = Outs.size();
8831 
8832   Optional<unsigned> FirstMaskArgument;
8833   if (Subtarget.hasVInstructions())
8834     FirstMaskArgument = preAssignMask(Outs);
8835 
8836   for (unsigned i = 0; i != NumArgs; i++) {
8837     MVT ArgVT = Outs[i].VT;
8838     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8839     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
8840 
8841     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8842     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8843            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
8844            FirstMaskArgument)) {
8845       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
8846                         << EVT(ArgVT).getEVTString() << "\n");
8847       llvm_unreachable(nullptr);
8848     }
8849   }
8850 }
8851 
8852 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
8853 // values.
8854 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
8855                                    const CCValAssign &VA, const SDLoc &DL,
8856                                    const RISCVSubtarget &Subtarget) {
8857   switch (VA.getLocInfo()) {
8858   default:
8859     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8860   case CCValAssign::Full:
8861     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
8862       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
8863     break;
8864   case CCValAssign::BCvt:
8865     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8866       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
8867     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8868       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
8869     else
8870       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
8871     break;
8872   }
8873   return Val;
8874 }
8875 
8876 // The caller is responsible for loading the full value if the argument is
8877 // passed with CCValAssign::Indirect.
8878 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
8879                                 const CCValAssign &VA, const SDLoc &DL,
8880                                 const RISCVTargetLowering &TLI) {
8881   MachineFunction &MF = DAG.getMachineFunction();
8882   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8883   EVT LocVT = VA.getLocVT();
8884   SDValue Val;
8885   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
8886   Register VReg = RegInfo.createVirtualRegister(RC);
8887   RegInfo.addLiveIn(VA.getLocReg(), VReg);
8888   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
8889 
8890   if (VA.getLocInfo() == CCValAssign::Indirect)
8891     return Val;
8892 
8893   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
8894 }
8895 
8896 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
8897                                    const CCValAssign &VA, const SDLoc &DL,
8898                                    const RISCVSubtarget &Subtarget) {
8899   EVT LocVT = VA.getLocVT();
8900 
8901   switch (VA.getLocInfo()) {
8902   default:
8903     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8904   case CCValAssign::Full:
8905     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
8906       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
8907     break;
8908   case CCValAssign::BCvt:
8909     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8910       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
8911     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8912       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
8913     else
8914       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
8915     break;
8916   }
8917   return Val;
8918 }
8919 
8920 // The caller is responsible for loading the full value if the argument is
8921 // passed with CCValAssign::Indirect.
8922 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
8923                                 const CCValAssign &VA, const SDLoc &DL) {
8924   MachineFunction &MF = DAG.getMachineFunction();
8925   MachineFrameInfo &MFI = MF.getFrameInfo();
8926   EVT LocVT = VA.getLocVT();
8927   EVT ValVT = VA.getValVT();
8928   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
8929   if (ValVT.isScalableVector()) {
8930     // When the value is a scalable vector, we save the pointer which points to
8931     // the scalable vector value in the stack. The ValVT will be the pointer
8932     // type, instead of the scalable vector type.
8933     ValVT = LocVT;
8934   }
8935   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
8936                                  /*IsImmutable=*/true);
8937   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
8938   SDValue Val;
8939 
8940   ISD::LoadExtType ExtType;
8941   switch (VA.getLocInfo()) {
8942   default:
8943     llvm_unreachable("Unexpected CCValAssign::LocInfo");
8944   case CCValAssign::Full:
8945   case CCValAssign::Indirect:
8946   case CCValAssign::BCvt:
8947     ExtType = ISD::NON_EXTLOAD;
8948     break;
8949   }
8950   Val = DAG.getExtLoad(
8951       ExtType, DL, LocVT, Chain, FIN,
8952       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
8953   return Val;
8954 }
8955 
8956 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
8957                                        const CCValAssign &VA, const SDLoc &DL) {
8958   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
8959          "Unexpected VA");
8960   MachineFunction &MF = DAG.getMachineFunction();
8961   MachineFrameInfo &MFI = MF.getFrameInfo();
8962   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8963 
8964   if (VA.isMemLoc()) {
8965     // f64 is passed on the stack.
8966     int FI =
8967         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
8968     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8969     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
8970                        MachinePointerInfo::getFixedStack(MF, FI));
8971   }
8972 
8973   assert(VA.isRegLoc() && "Expected register VA assignment");
8974 
8975   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8976   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
8977   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
8978   SDValue Hi;
8979   if (VA.getLocReg() == RISCV::X17) {
8980     // Second half of f64 is passed on the stack.
8981     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
8982     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8983     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
8984                      MachinePointerInfo::getFixedStack(MF, FI));
8985   } else {
8986     // Second half of f64 is passed in another GPR.
8987     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8988     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
8989     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
8990   }
8991   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
8992 }
8993 
8994 // FastCC has less than 1% performance improvement for some particular
8995 // benchmark. But theoretically, it may has benenfit for some cases.
8996 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
8997                             unsigned ValNo, MVT ValVT, MVT LocVT,
8998                             CCValAssign::LocInfo LocInfo,
8999                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9000                             bool IsFixed, bool IsRet, Type *OrigTy,
9001                             const RISCVTargetLowering &TLI,
9002                             Optional<unsigned> FirstMaskArgument) {
9003 
9004   // X5 and X6 might be used for save-restore libcall.
9005   static const MCPhysReg GPRList[] = {
9006       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9007       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9008       RISCV::X29, RISCV::X30, RISCV::X31};
9009 
9010   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9011     if (unsigned Reg = State.AllocateReg(GPRList)) {
9012       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9013       return false;
9014     }
9015   }
9016 
9017   if (LocVT == MVT::f16) {
9018     static const MCPhysReg FPR16List[] = {
9019         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9020         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9021         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9022         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9023     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9024       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9025       return false;
9026     }
9027   }
9028 
9029   if (LocVT == MVT::f32) {
9030     static const MCPhysReg FPR32List[] = {
9031         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9032         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9033         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9034         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9035     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9036       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9037       return false;
9038     }
9039   }
9040 
9041   if (LocVT == MVT::f64) {
9042     static const MCPhysReg FPR64List[] = {
9043         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9044         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9045         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9046         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9047     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9048       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9049       return false;
9050     }
9051   }
9052 
9053   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9054     unsigned Offset4 = State.AllocateStack(4, Align(4));
9055     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9056     return false;
9057   }
9058 
9059   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9060     unsigned Offset5 = State.AllocateStack(8, Align(8));
9061     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9062     return false;
9063   }
9064 
9065   if (LocVT.isVector()) {
9066     if (unsigned Reg =
9067             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9068       // Fixed-length vectors are located in the corresponding scalable-vector
9069       // container types.
9070       if (ValVT.isFixedLengthVector())
9071         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9072       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9073     } else {
9074       // Try and pass the address via a "fast" GPR.
9075       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9076         LocInfo = CCValAssign::Indirect;
9077         LocVT = TLI.getSubtarget().getXLenVT();
9078         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9079       } else if (ValVT.isFixedLengthVector()) {
9080         auto StackAlign =
9081             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9082         unsigned StackOffset =
9083             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9084         State.addLoc(
9085             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9086       } else {
9087         // Can't pass scalable vectors on the stack.
9088         return true;
9089       }
9090     }
9091 
9092     return false;
9093   }
9094 
9095   return true; // CC didn't match.
9096 }
9097 
9098 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9099                          CCValAssign::LocInfo LocInfo,
9100                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9101 
9102   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9103     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
9104     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
9105     static const MCPhysReg GPRList[] = {
9106         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
9107         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
9108     if (unsigned Reg = State.AllocateReg(GPRList)) {
9109       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9110       return false;
9111     }
9112   }
9113 
9114   if (LocVT == MVT::f32) {
9115     // Pass in STG registers: F1, ..., F6
9116     //                        fs0 ... fs5
9117     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
9118                                           RISCV::F18_F, RISCV::F19_F,
9119                                           RISCV::F20_F, RISCV::F21_F};
9120     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9121       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9122       return false;
9123     }
9124   }
9125 
9126   if (LocVT == MVT::f64) {
9127     // Pass in STG registers: D1, ..., D6
9128     //                        fs6 ... fs11
9129     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
9130                                           RISCV::F24_D, RISCV::F25_D,
9131                                           RISCV::F26_D, RISCV::F27_D};
9132     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9133       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9134       return false;
9135     }
9136   }
9137 
9138   report_fatal_error("No registers left in GHC calling convention");
9139   return true;
9140 }
9141 
9142 // Transform physical registers into virtual registers.
9143 SDValue RISCVTargetLowering::LowerFormalArguments(
9144     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
9145     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
9146     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
9147 
9148   MachineFunction &MF = DAG.getMachineFunction();
9149 
9150   switch (CallConv) {
9151   default:
9152     report_fatal_error("Unsupported calling convention");
9153   case CallingConv::C:
9154   case CallingConv::Fast:
9155     break;
9156   case CallingConv::GHC:
9157     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
9158         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
9159       report_fatal_error(
9160         "GHC calling convention requires the F and D instruction set extensions");
9161   }
9162 
9163   const Function &Func = MF.getFunction();
9164   if (Func.hasFnAttribute("interrupt")) {
9165     if (!Func.arg_empty())
9166       report_fatal_error(
9167         "Functions with the interrupt attribute cannot have arguments!");
9168 
9169     StringRef Kind =
9170       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9171 
9172     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
9173       report_fatal_error(
9174         "Function interrupt attribute argument not supported!");
9175   }
9176 
9177   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9178   MVT XLenVT = Subtarget.getXLenVT();
9179   unsigned XLenInBytes = Subtarget.getXLen() / 8;
9180   // Used with vargs to acumulate store chains.
9181   std::vector<SDValue> OutChains;
9182 
9183   // Assign locations to all of the incoming arguments.
9184   SmallVector<CCValAssign, 16> ArgLocs;
9185   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9186 
9187   if (CallConv == CallingConv::GHC)
9188     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
9189   else
9190     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
9191                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9192                                                    : CC_RISCV);
9193 
9194   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
9195     CCValAssign &VA = ArgLocs[i];
9196     SDValue ArgValue;
9197     // Passing f64 on RV32D with a soft float ABI must be handled as a special
9198     // case.
9199     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
9200       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
9201     else if (VA.isRegLoc())
9202       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
9203     else
9204       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
9205 
9206     if (VA.getLocInfo() == CCValAssign::Indirect) {
9207       // If the original argument was split and passed by reference (e.g. i128
9208       // on RV32), we need to load all parts of it here (using the same
9209       // address). Vectors may be partly split to registers and partly to the
9210       // stack, in which case the base address is partly offset and subsequent
9211       // stores are relative to that.
9212       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
9213                                    MachinePointerInfo()));
9214       unsigned ArgIndex = Ins[i].OrigArgIndex;
9215       unsigned ArgPartOffset = Ins[i].PartOffset;
9216       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9217       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
9218         CCValAssign &PartVA = ArgLocs[i + 1];
9219         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
9220         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9221         if (PartVA.getValVT().isScalableVector())
9222           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9223         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
9224         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
9225                                      MachinePointerInfo()));
9226         ++i;
9227       }
9228       continue;
9229     }
9230     InVals.push_back(ArgValue);
9231   }
9232 
9233   if (IsVarArg) {
9234     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
9235     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
9236     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
9237     MachineFrameInfo &MFI = MF.getFrameInfo();
9238     MachineRegisterInfo &RegInfo = MF.getRegInfo();
9239     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
9240 
9241     // Offset of the first variable argument from stack pointer, and size of
9242     // the vararg save area. For now, the varargs save area is either zero or
9243     // large enough to hold a0-a7.
9244     int VaArgOffset, VarArgsSaveSize;
9245 
9246     // If all registers are allocated, then all varargs must be passed on the
9247     // stack and we don't need to save any argregs.
9248     if (ArgRegs.size() == Idx) {
9249       VaArgOffset = CCInfo.getNextStackOffset();
9250       VarArgsSaveSize = 0;
9251     } else {
9252       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
9253       VaArgOffset = -VarArgsSaveSize;
9254     }
9255 
9256     // Record the frame index of the first variable argument
9257     // which is a value necessary to VASTART.
9258     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9259     RVFI->setVarArgsFrameIndex(FI);
9260 
9261     // If saving an odd number of registers then create an extra stack slot to
9262     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
9263     // offsets to even-numbered registered remain 2*XLEN-aligned.
9264     if (Idx % 2) {
9265       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
9266       VarArgsSaveSize += XLenInBytes;
9267     }
9268 
9269     // Copy the integer registers that may have been used for passing varargs
9270     // to the vararg save area.
9271     for (unsigned I = Idx; I < ArgRegs.size();
9272          ++I, VaArgOffset += XLenInBytes) {
9273       const Register Reg = RegInfo.createVirtualRegister(RC);
9274       RegInfo.addLiveIn(ArgRegs[I], Reg);
9275       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
9276       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9277       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9278       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
9279                                    MachinePointerInfo::getFixedStack(MF, FI));
9280       cast<StoreSDNode>(Store.getNode())
9281           ->getMemOperand()
9282           ->setValue((Value *)nullptr);
9283       OutChains.push_back(Store);
9284     }
9285     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
9286   }
9287 
9288   // All stores are grouped in one node to allow the matching between
9289   // the size of Ins and InVals. This only happens for vararg functions.
9290   if (!OutChains.empty()) {
9291     OutChains.push_back(Chain);
9292     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
9293   }
9294 
9295   return Chain;
9296 }
9297 
9298 /// isEligibleForTailCallOptimization - Check whether the call is eligible
9299 /// for tail call optimization.
9300 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
9301 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
9302     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
9303     const SmallVector<CCValAssign, 16> &ArgLocs) const {
9304 
9305   auto &Callee = CLI.Callee;
9306   auto CalleeCC = CLI.CallConv;
9307   auto &Outs = CLI.Outs;
9308   auto &Caller = MF.getFunction();
9309   auto CallerCC = Caller.getCallingConv();
9310 
9311   // Exception-handling functions need a special set of instructions to
9312   // indicate a return to the hardware. Tail-calling another function would
9313   // probably break this.
9314   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
9315   // should be expanded as new function attributes are introduced.
9316   if (Caller.hasFnAttribute("interrupt"))
9317     return false;
9318 
9319   // Do not tail call opt if the stack is used to pass parameters.
9320   if (CCInfo.getNextStackOffset() != 0)
9321     return false;
9322 
9323   // Do not tail call opt if any parameters need to be passed indirectly.
9324   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
9325   // passed indirectly. So the address of the value will be passed in a
9326   // register, or if not available, then the address is put on the stack. In
9327   // order to pass indirectly, space on the stack often needs to be allocated
9328   // in order to store the value. In this case the CCInfo.getNextStackOffset()
9329   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
9330   // are passed CCValAssign::Indirect.
9331   for (auto &VA : ArgLocs)
9332     if (VA.getLocInfo() == CCValAssign::Indirect)
9333       return false;
9334 
9335   // Do not tail call opt if either caller or callee uses struct return
9336   // semantics.
9337   auto IsCallerStructRet = Caller.hasStructRetAttr();
9338   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
9339   if (IsCallerStructRet || IsCalleeStructRet)
9340     return false;
9341 
9342   // Externally-defined functions with weak linkage should not be
9343   // tail-called. The behaviour of branch instructions in this situation (as
9344   // used for tail calls) is implementation-defined, so we cannot rely on the
9345   // linker replacing the tail call with a return.
9346   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
9347     const GlobalValue *GV = G->getGlobal();
9348     if (GV->hasExternalWeakLinkage())
9349       return false;
9350   }
9351 
9352   // The callee has to preserve all registers the caller needs to preserve.
9353   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9354   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
9355   if (CalleeCC != CallerCC) {
9356     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
9357     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9358       return false;
9359   }
9360 
9361   // Byval parameters hand the function a pointer directly into the stack area
9362   // we want to reuse during a tail call. Working around this *is* possible
9363   // but less efficient and uglier in LowerCall.
9364   for (auto &Arg : Outs)
9365     if (Arg.Flags.isByVal())
9366       return false;
9367 
9368   return true;
9369 }
9370 
9371 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
9372   return DAG.getDataLayout().getPrefTypeAlign(
9373       VT.getTypeForEVT(*DAG.getContext()));
9374 }
9375 
9376 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
9377 // and output parameter nodes.
9378 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
9379                                        SmallVectorImpl<SDValue> &InVals) const {
9380   SelectionDAG &DAG = CLI.DAG;
9381   SDLoc &DL = CLI.DL;
9382   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
9383   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
9384   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
9385   SDValue Chain = CLI.Chain;
9386   SDValue Callee = CLI.Callee;
9387   bool &IsTailCall = CLI.IsTailCall;
9388   CallingConv::ID CallConv = CLI.CallConv;
9389   bool IsVarArg = CLI.IsVarArg;
9390   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9391   MVT XLenVT = Subtarget.getXLenVT();
9392 
9393   MachineFunction &MF = DAG.getMachineFunction();
9394 
9395   // Analyze the operands of the call, assigning locations to each operand.
9396   SmallVector<CCValAssign, 16> ArgLocs;
9397   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9398 
9399   if (CallConv == CallingConv::GHC)
9400     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
9401   else
9402     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
9403                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9404                                                     : CC_RISCV);
9405 
9406   // Check if it's really possible to do a tail call.
9407   if (IsTailCall)
9408     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
9409 
9410   if (IsTailCall)
9411     ++NumTailCalls;
9412   else if (CLI.CB && CLI.CB->isMustTailCall())
9413     report_fatal_error("failed to perform tail call elimination on a call "
9414                        "site marked musttail");
9415 
9416   // Get a count of how many bytes are to be pushed on the stack.
9417   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
9418 
9419   // Create local copies for byval args
9420   SmallVector<SDValue, 8> ByValArgs;
9421   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9422     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9423     if (!Flags.isByVal())
9424       continue;
9425 
9426     SDValue Arg = OutVals[i];
9427     unsigned Size = Flags.getByValSize();
9428     Align Alignment = Flags.getNonZeroByValAlign();
9429 
9430     int FI =
9431         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
9432     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9433     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
9434 
9435     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
9436                           /*IsVolatile=*/false,
9437                           /*AlwaysInline=*/false, IsTailCall,
9438                           MachinePointerInfo(), MachinePointerInfo());
9439     ByValArgs.push_back(FIPtr);
9440   }
9441 
9442   if (!IsTailCall)
9443     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
9444 
9445   // Copy argument values to their designated locations.
9446   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
9447   SmallVector<SDValue, 8> MemOpChains;
9448   SDValue StackPtr;
9449   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
9450     CCValAssign &VA = ArgLocs[i];
9451     SDValue ArgValue = OutVals[i];
9452     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9453 
9454     // Handle passing f64 on RV32D with a soft float ABI as a special case.
9455     bool IsF64OnRV32DSoftABI =
9456         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
9457     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
9458       SDValue SplitF64 = DAG.getNode(
9459           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
9460       SDValue Lo = SplitF64.getValue(0);
9461       SDValue Hi = SplitF64.getValue(1);
9462 
9463       Register RegLo = VA.getLocReg();
9464       RegsToPass.push_back(std::make_pair(RegLo, Lo));
9465 
9466       if (RegLo == RISCV::X17) {
9467         // Second half of f64 is passed on the stack.
9468         // Work out the address of the stack slot.
9469         if (!StackPtr.getNode())
9470           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9471         // Emit the store.
9472         MemOpChains.push_back(
9473             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
9474       } else {
9475         // Second half of f64 is passed in another GPR.
9476         assert(RegLo < RISCV::X31 && "Invalid register pair");
9477         Register RegHigh = RegLo + 1;
9478         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
9479       }
9480       continue;
9481     }
9482 
9483     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
9484     // as any other MemLoc.
9485 
9486     // Promote the value if needed.
9487     // For now, only handle fully promoted and indirect arguments.
9488     if (VA.getLocInfo() == CCValAssign::Indirect) {
9489       // Store the argument in a stack slot and pass its address.
9490       Align StackAlign =
9491           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
9492                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
9493       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
9494       // If the original argument was split (e.g. i128), we need
9495       // to store the required parts of it here (and pass just one address).
9496       // Vectors may be partly split to registers and partly to the stack, in
9497       // which case the base address is partly offset and subsequent stores are
9498       // relative to that.
9499       unsigned ArgIndex = Outs[i].OrigArgIndex;
9500       unsigned ArgPartOffset = Outs[i].PartOffset;
9501       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9502       // Calculate the total size to store. We don't have access to what we're
9503       // actually storing other than performing the loop and collecting the
9504       // info.
9505       SmallVector<std::pair<SDValue, SDValue>> Parts;
9506       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
9507         SDValue PartValue = OutVals[i + 1];
9508         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
9509         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9510         EVT PartVT = PartValue.getValueType();
9511         if (PartVT.isScalableVector())
9512           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9513         StoredSize += PartVT.getStoreSize();
9514         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
9515         Parts.push_back(std::make_pair(PartValue, Offset));
9516         ++i;
9517       }
9518       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
9519       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
9520       MemOpChains.push_back(
9521           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
9522                        MachinePointerInfo::getFixedStack(MF, FI)));
9523       for (const auto &Part : Parts) {
9524         SDValue PartValue = Part.first;
9525         SDValue PartOffset = Part.second;
9526         SDValue Address =
9527             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
9528         MemOpChains.push_back(
9529             DAG.getStore(Chain, DL, PartValue, Address,
9530                          MachinePointerInfo::getFixedStack(MF, FI)));
9531       }
9532       ArgValue = SpillSlot;
9533     } else {
9534       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
9535     }
9536 
9537     // Use local copy if it is a byval arg.
9538     if (Flags.isByVal())
9539       ArgValue = ByValArgs[j++];
9540 
9541     if (VA.isRegLoc()) {
9542       // Queue up the argument copies and emit them at the end.
9543       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
9544     } else {
9545       assert(VA.isMemLoc() && "Argument not register or memory");
9546       assert(!IsTailCall && "Tail call not allowed if stack is used "
9547                             "for passing parameters");
9548 
9549       // Work out the address of the stack slot.
9550       if (!StackPtr.getNode())
9551         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9552       SDValue Address =
9553           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
9554                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
9555 
9556       // Emit the store.
9557       MemOpChains.push_back(
9558           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
9559     }
9560   }
9561 
9562   // Join the stores, which are independent of one another.
9563   if (!MemOpChains.empty())
9564     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
9565 
9566   SDValue Glue;
9567 
9568   // Build a sequence of copy-to-reg nodes, chained and glued together.
9569   for (auto &Reg : RegsToPass) {
9570     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
9571     Glue = Chain.getValue(1);
9572   }
9573 
9574   // Validate that none of the argument registers have been marked as
9575   // reserved, if so report an error. Do the same for the return address if this
9576   // is not a tailcall.
9577   validateCCReservedRegs(RegsToPass, MF);
9578   if (!IsTailCall &&
9579       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
9580     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9581         MF.getFunction(),
9582         "Return address register required, but has been reserved."});
9583 
9584   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
9585   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
9586   // split it and then direct call can be matched by PseudoCALL.
9587   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
9588     const GlobalValue *GV = S->getGlobal();
9589 
9590     unsigned OpFlags = RISCVII::MO_CALL;
9591     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
9592       OpFlags = RISCVII::MO_PLT;
9593 
9594     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
9595   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
9596     unsigned OpFlags = RISCVII::MO_CALL;
9597 
9598     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
9599                                                  nullptr))
9600       OpFlags = RISCVII::MO_PLT;
9601 
9602     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
9603   }
9604 
9605   // The first call operand is the chain and the second is the target address.
9606   SmallVector<SDValue, 8> Ops;
9607   Ops.push_back(Chain);
9608   Ops.push_back(Callee);
9609 
9610   // Add argument registers to the end of the list so that they are
9611   // known live into the call.
9612   for (auto &Reg : RegsToPass)
9613     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
9614 
9615   if (!IsTailCall) {
9616     // Add a register mask operand representing the call-preserved registers.
9617     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
9618     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
9619     assert(Mask && "Missing call preserved mask for calling convention");
9620     Ops.push_back(DAG.getRegisterMask(Mask));
9621   }
9622 
9623   // Glue the call to the argument copies, if any.
9624   if (Glue.getNode())
9625     Ops.push_back(Glue);
9626 
9627   // Emit the call.
9628   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9629 
9630   if (IsTailCall) {
9631     MF.getFrameInfo().setHasTailCall();
9632     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
9633   }
9634 
9635   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
9636   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
9637   Glue = Chain.getValue(1);
9638 
9639   // Mark the end of the call, which is glued to the call itself.
9640   Chain = DAG.getCALLSEQ_END(Chain,
9641                              DAG.getConstant(NumBytes, DL, PtrVT, true),
9642                              DAG.getConstant(0, DL, PtrVT, true),
9643                              Glue, DL);
9644   Glue = Chain.getValue(1);
9645 
9646   // Assign locations to each value returned by this call.
9647   SmallVector<CCValAssign, 16> RVLocs;
9648   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
9649   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
9650 
9651   // Copy all of the result registers out of their specified physreg.
9652   for (auto &VA : RVLocs) {
9653     // Copy the value out
9654     SDValue RetValue =
9655         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
9656     // Glue the RetValue to the end of the call sequence
9657     Chain = RetValue.getValue(1);
9658     Glue = RetValue.getValue(2);
9659 
9660     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9661       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
9662       SDValue RetValue2 =
9663           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
9664       Chain = RetValue2.getValue(1);
9665       Glue = RetValue2.getValue(2);
9666       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
9667                              RetValue2);
9668     }
9669 
9670     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
9671 
9672     InVals.push_back(RetValue);
9673   }
9674 
9675   return Chain;
9676 }
9677 
9678 bool RISCVTargetLowering::CanLowerReturn(
9679     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
9680     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
9681   SmallVector<CCValAssign, 16> RVLocs;
9682   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
9683 
9684   Optional<unsigned> FirstMaskArgument;
9685   if (Subtarget.hasVInstructions())
9686     FirstMaskArgument = preAssignMask(Outs);
9687 
9688   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9689     MVT VT = Outs[i].VT;
9690     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9691     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9692     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
9693                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
9694                  *this, FirstMaskArgument))
9695       return false;
9696   }
9697   return true;
9698 }
9699 
9700 SDValue
9701 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
9702                                  bool IsVarArg,
9703                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
9704                                  const SmallVectorImpl<SDValue> &OutVals,
9705                                  const SDLoc &DL, SelectionDAG &DAG) const {
9706   const MachineFunction &MF = DAG.getMachineFunction();
9707   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9708 
9709   // Stores the assignment of the return value to a location.
9710   SmallVector<CCValAssign, 16> RVLocs;
9711 
9712   // Info about the registers and stack slot.
9713   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
9714                  *DAG.getContext());
9715 
9716   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
9717                     nullptr, CC_RISCV);
9718 
9719   if (CallConv == CallingConv::GHC && !RVLocs.empty())
9720     report_fatal_error("GHC functions return void only");
9721 
9722   SDValue Glue;
9723   SmallVector<SDValue, 4> RetOps(1, Chain);
9724 
9725   // Copy the result values into the output registers.
9726   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
9727     SDValue Val = OutVals[i];
9728     CCValAssign &VA = RVLocs[i];
9729     assert(VA.isRegLoc() && "Can only return in registers!");
9730 
9731     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9732       // Handle returning f64 on RV32D with a soft float ABI.
9733       assert(VA.isRegLoc() && "Expected return via registers");
9734       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
9735                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
9736       SDValue Lo = SplitF64.getValue(0);
9737       SDValue Hi = SplitF64.getValue(1);
9738       Register RegLo = VA.getLocReg();
9739       assert(RegLo < RISCV::X31 && "Invalid register pair");
9740       Register RegHi = RegLo + 1;
9741 
9742       if (STI.isRegisterReservedByUser(RegLo) ||
9743           STI.isRegisterReservedByUser(RegHi))
9744         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9745             MF.getFunction(),
9746             "Return value register required, but has been reserved."});
9747 
9748       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
9749       Glue = Chain.getValue(1);
9750       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
9751       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
9752       Glue = Chain.getValue(1);
9753       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
9754     } else {
9755       // Handle a 'normal' return.
9756       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
9757       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
9758 
9759       if (STI.isRegisterReservedByUser(VA.getLocReg()))
9760         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9761             MF.getFunction(),
9762             "Return value register required, but has been reserved."});
9763 
9764       // Guarantee that all emitted copies are stuck together.
9765       Glue = Chain.getValue(1);
9766       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
9767     }
9768   }
9769 
9770   RetOps[0] = Chain; // Update chain.
9771 
9772   // Add the glue node if we have it.
9773   if (Glue.getNode()) {
9774     RetOps.push_back(Glue);
9775   }
9776 
9777   unsigned RetOpc = RISCVISD::RET_FLAG;
9778   // Interrupt service routines use different return instructions.
9779   const Function &Func = DAG.getMachineFunction().getFunction();
9780   if (Func.hasFnAttribute("interrupt")) {
9781     if (!Func.getReturnType()->isVoidTy())
9782       report_fatal_error(
9783           "Functions with the interrupt attribute must have void return type!");
9784 
9785     MachineFunction &MF = DAG.getMachineFunction();
9786     StringRef Kind =
9787       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9788 
9789     if (Kind == "user")
9790       RetOpc = RISCVISD::URET_FLAG;
9791     else if (Kind == "supervisor")
9792       RetOpc = RISCVISD::SRET_FLAG;
9793     else
9794       RetOpc = RISCVISD::MRET_FLAG;
9795   }
9796 
9797   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
9798 }
9799 
9800 void RISCVTargetLowering::validateCCReservedRegs(
9801     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
9802     MachineFunction &MF) const {
9803   const Function &F = MF.getFunction();
9804   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9805 
9806   if (llvm::any_of(Regs, [&STI](auto Reg) {
9807         return STI.isRegisterReservedByUser(Reg.first);
9808       }))
9809     F.getContext().diagnose(DiagnosticInfoUnsupported{
9810         F, "Argument register required, but has been reserved."});
9811 }
9812 
9813 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
9814   return CI->isTailCall();
9815 }
9816 
9817 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
9818 #define NODE_NAME_CASE(NODE)                                                   \
9819   case RISCVISD::NODE:                                                         \
9820     return "RISCVISD::" #NODE;
9821   // clang-format off
9822   switch ((RISCVISD::NodeType)Opcode) {
9823   case RISCVISD::FIRST_NUMBER:
9824     break;
9825   NODE_NAME_CASE(RET_FLAG)
9826   NODE_NAME_CASE(URET_FLAG)
9827   NODE_NAME_CASE(SRET_FLAG)
9828   NODE_NAME_CASE(MRET_FLAG)
9829   NODE_NAME_CASE(CALL)
9830   NODE_NAME_CASE(SELECT_CC)
9831   NODE_NAME_CASE(BR_CC)
9832   NODE_NAME_CASE(BuildPairF64)
9833   NODE_NAME_CASE(SplitF64)
9834   NODE_NAME_CASE(TAIL)
9835   NODE_NAME_CASE(MULHSU)
9836   NODE_NAME_CASE(SLLW)
9837   NODE_NAME_CASE(SRAW)
9838   NODE_NAME_CASE(SRLW)
9839   NODE_NAME_CASE(DIVW)
9840   NODE_NAME_CASE(DIVUW)
9841   NODE_NAME_CASE(REMUW)
9842   NODE_NAME_CASE(ROLW)
9843   NODE_NAME_CASE(RORW)
9844   NODE_NAME_CASE(CLZW)
9845   NODE_NAME_CASE(CTZW)
9846   NODE_NAME_CASE(FSLW)
9847   NODE_NAME_CASE(FSRW)
9848   NODE_NAME_CASE(FSL)
9849   NODE_NAME_CASE(FSR)
9850   NODE_NAME_CASE(FMV_H_X)
9851   NODE_NAME_CASE(FMV_X_ANYEXTH)
9852   NODE_NAME_CASE(FMV_W_X_RV64)
9853   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
9854   NODE_NAME_CASE(FCVT_X)
9855   NODE_NAME_CASE(FCVT_XU)
9856   NODE_NAME_CASE(FCVT_W_RV64)
9857   NODE_NAME_CASE(FCVT_WU_RV64)
9858   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
9859   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
9860   NODE_NAME_CASE(READ_CYCLE_WIDE)
9861   NODE_NAME_CASE(GREV)
9862   NODE_NAME_CASE(GREVW)
9863   NODE_NAME_CASE(GORC)
9864   NODE_NAME_CASE(GORCW)
9865   NODE_NAME_CASE(SHFL)
9866   NODE_NAME_CASE(SHFLW)
9867   NODE_NAME_CASE(UNSHFL)
9868   NODE_NAME_CASE(UNSHFLW)
9869   NODE_NAME_CASE(BFP)
9870   NODE_NAME_CASE(BFPW)
9871   NODE_NAME_CASE(BCOMPRESS)
9872   NODE_NAME_CASE(BCOMPRESSW)
9873   NODE_NAME_CASE(BDECOMPRESS)
9874   NODE_NAME_CASE(BDECOMPRESSW)
9875   NODE_NAME_CASE(VMV_V_X_VL)
9876   NODE_NAME_CASE(VFMV_V_F_VL)
9877   NODE_NAME_CASE(VMV_X_S)
9878   NODE_NAME_CASE(VMV_S_X_VL)
9879   NODE_NAME_CASE(VFMV_S_F_VL)
9880   NODE_NAME_CASE(SPLAT_VECTOR_I64)
9881   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
9882   NODE_NAME_CASE(READ_VLENB)
9883   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
9884   NODE_NAME_CASE(VSLIDEUP_VL)
9885   NODE_NAME_CASE(VSLIDE1UP_VL)
9886   NODE_NAME_CASE(VSLIDEDOWN_VL)
9887   NODE_NAME_CASE(VSLIDE1DOWN_VL)
9888   NODE_NAME_CASE(VID_VL)
9889   NODE_NAME_CASE(VFNCVT_ROD_VL)
9890   NODE_NAME_CASE(VECREDUCE_ADD_VL)
9891   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
9892   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
9893   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
9894   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
9895   NODE_NAME_CASE(VECREDUCE_AND_VL)
9896   NODE_NAME_CASE(VECREDUCE_OR_VL)
9897   NODE_NAME_CASE(VECREDUCE_XOR_VL)
9898   NODE_NAME_CASE(VECREDUCE_FADD_VL)
9899   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
9900   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
9901   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
9902   NODE_NAME_CASE(ADD_VL)
9903   NODE_NAME_CASE(AND_VL)
9904   NODE_NAME_CASE(MUL_VL)
9905   NODE_NAME_CASE(OR_VL)
9906   NODE_NAME_CASE(SDIV_VL)
9907   NODE_NAME_CASE(SHL_VL)
9908   NODE_NAME_CASE(SREM_VL)
9909   NODE_NAME_CASE(SRA_VL)
9910   NODE_NAME_CASE(SRL_VL)
9911   NODE_NAME_CASE(SUB_VL)
9912   NODE_NAME_CASE(UDIV_VL)
9913   NODE_NAME_CASE(UREM_VL)
9914   NODE_NAME_CASE(XOR_VL)
9915   NODE_NAME_CASE(SADDSAT_VL)
9916   NODE_NAME_CASE(UADDSAT_VL)
9917   NODE_NAME_CASE(SSUBSAT_VL)
9918   NODE_NAME_CASE(USUBSAT_VL)
9919   NODE_NAME_CASE(FADD_VL)
9920   NODE_NAME_CASE(FSUB_VL)
9921   NODE_NAME_CASE(FMUL_VL)
9922   NODE_NAME_CASE(FDIV_VL)
9923   NODE_NAME_CASE(FNEG_VL)
9924   NODE_NAME_CASE(FABS_VL)
9925   NODE_NAME_CASE(FSQRT_VL)
9926   NODE_NAME_CASE(FMA_VL)
9927   NODE_NAME_CASE(FCOPYSIGN_VL)
9928   NODE_NAME_CASE(SMIN_VL)
9929   NODE_NAME_CASE(SMAX_VL)
9930   NODE_NAME_CASE(UMIN_VL)
9931   NODE_NAME_CASE(UMAX_VL)
9932   NODE_NAME_CASE(FMINNUM_VL)
9933   NODE_NAME_CASE(FMAXNUM_VL)
9934   NODE_NAME_CASE(MULHS_VL)
9935   NODE_NAME_CASE(MULHU_VL)
9936   NODE_NAME_CASE(FP_TO_SINT_VL)
9937   NODE_NAME_CASE(FP_TO_UINT_VL)
9938   NODE_NAME_CASE(SINT_TO_FP_VL)
9939   NODE_NAME_CASE(UINT_TO_FP_VL)
9940   NODE_NAME_CASE(FP_EXTEND_VL)
9941   NODE_NAME_CASE(FP_ROUND_VL)
9942   NODE_NAME_CASE(VWMUL_VL)
9943   NODE_NAME_CASE(VWMULU_VL)
9944   NODE_NAME_CASE(SETCC_VL)
9945   NODE_NAME_CASE(VSELECT_VL)
9946   NODE_NAME_CASE(VMAND_VL)
9947   NODE_NAME_CASE(VMOR_VL)
9948   NODE_NAME_CASE(VMXOR_VL)
9949   NODE_NAME_CASE(VMCLR_VL)
9950   NODE_NAME_CASE(VMSET_VL)
9951   NODE_NAME_CASE(VRGATHER_VX_VL)
9952   NODE_NAME_CASE(VRGATHER_VV_VL)
9953   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
9954   NODE_NAME_CASE(VSEXT_VL)
9955   NODE_NAME_CASE(VZEXT_VL)
9956   NODE_NAME_CASE(VCPOP_VL)
9957   NODE_NAME_CASE(VLE_VL)
9958   NODE_NAME_CASE(VSE_VL)
9959   NODE_NAME_CASE(READ_CSR)
9960   NODE_NAME_CASE(WRITE_CSR)
9961   NODE_NAME_CASE(SWAP_CSR)
9962   }
9963   // clang-format on
9964   return nullptr;
9965 #undef NODE_NAME_CASE
9966 }
9967 
9968 /// getConstraintType - Given a constraint letter, return the type of
9969 /// constraint it is for this target.
9970 RISCVTargetLowering::ConstraintType
9971 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
9972   if (Constraint.size() == 1) {
9973     switch (Constraint[0]) {
9974     default:
9975       break;
9976     case 'f':
9977       return C_RegisterClass;
9978     case 'I':
9979     case 'J':
9980     case 'K':
9981       return C_Immediate;
9982     case 'A':
9983       return C_Memory;
9984     case 'S': // A symbolic address
9985       return C_Other;
9986     }
9987   } else {
9988     if (Constraint == "vr" || Constraint == "vm")
9989       return C_RegisterClass;
9990   }
9991   return TargetLowering::getConstraintType(Constraint);
9992 }
9993 
9994 std::pair<unsigned, const TargetRegisterClass *>
9995 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9996                                                   StringRef Constraint,
9997                                                   MVT VT) const {
9998   // First, see if this is a constraint that directly corresponds to a
9999   // RISCV register class.
10000   if (Constraint.size() == 1) {
10001     switch (Constraint[0]) {
10002     case 'r':
10003       // TODO: Support fixed vectors up to XLen for P extension?
10004       if (VT.isVector())
10005         break;
10006       return std::make_pair(0U, &RISCV::GPRRegClass);
10007     case 'f':
10008       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10009         return std::make_pair(0U, &RISCV::FPR16RegClass);
10010       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10011         return std::make_pair(0U, &RISCV::FPR32RegClass);
10012       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10013         return std::make_pair(0U, &RISCV::FPR64RegClass);
10014       break;
10015     default:
10016       break;
10017     }
10018   } else if (Constraint == "vr") {
10019     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10020                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10021       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10022         return std::make_pair(0U, RC);
10023     }
10024   } else if (Constraint == "vm") {
10025     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10026       return std::make_pair(0U, &RISCV::VMV0RegClass);
10027   }
10028 
10029   // Clang will correctly decode the usage of register name aliases into their
10030   // official names. However, other frontends like `rustc` do not. This allows
10031   // users of these frontends to use the ABI names for registers in LLVM-style
10032   // register constraints.
10033   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10034                                .Case("{zero}", RISCV::X0)
10035                                .Case("{ra}", RISCV::X1)
10036                                .Case("{sp}", RISCV::X2)
10037                                .Case("{gp}", RISCV::X3)
10038                                .Case("{tp}", RISCV::X4)
10039                                .Case("{t0}", RISCV::X5)
10040                                .Case("{t1}", RISCV::X6)
10041                                .Case("{t2}", RISCV::X7)
10042                                .Cases("{s0}", "{fp}", RISCV::X8)
10043                                .Case("{s1}", RISCV::X9)
10044                                .Case("{a0}", RISCV::X10)
10045                                .Case("{a1}", RISCV::X11)
10046                                .Case("{a2}", RISCV::X12)
10047                                .Case("{a3}", RISCV::X13)
10048                                .Case("{a4}", RISCV::X14)
10049                                .Case("{a5}", RISCV::X15)
10050                                .Case("{a6}", RISCV::X16)
10051                                .Case("{a7}", RISCV::X17)
10052                                .Case("{s2}", RISCV::X18)
10053                                .Case("{s3}", RISCV::X19)
10054                                .Case("{s4}", RISCV::X20)
10055                                .Case("{s5}", RISCV::X21)
10056                                .Case("{s6}", RISCV::X22)
10057                                .Case("{s7}", RISCV::X23)
10058                                .Case("{s8}", RISCV::X24)
10059                                .Case("{s9}", RISCV::X25)
10060                                .Case("{s10}", RISCV::X26)
10061                                .Case("{s11}", RISCV::X27)
10062                                .Case("{t3}", RISCV::X28)
10063                                .Case("{t4}", RISCV::X29)
10064                                .Case("{t5}", RISCV::X30)
10065                                .Case("{t6}", RISCV::X31)
10066                                .Default(RISCV::NoRegister);
10067   if (XRegFromAlias != RISCV::NoRegister)
10068     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10069 
10070   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10071   // TableGen record rather than the AsmName to choose registers for InlineAsm
10072   // constraints, plus we want to match those names to the widest floating point
10073   // register type available, manually select floating point registers here.
10074   //
10075   // The second case is the ABI name of the register, so that frontends can also
10076   // use the ABI names in register constraint lists.
10077   if (Subtarget.hasStdExtF()) {
10078     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10079                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10080                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10081                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10082                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10083                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10084                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10085                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10086                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10087                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10088                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10089                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10090                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10091                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10092                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10093                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
10094                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
10095                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
10096                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
10097                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
10098                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
10099                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
10100                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
10101                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
10102                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
10103                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
10104                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
10105                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
10106                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
10107                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
10108                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
10109                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
10110                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
10111                         .Default(RISCV::NoRegister);
10112     if (FReg != RISCV::NoRegister) {
10113       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
10114       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
10115         unsigned RegNo = FReg - RISCV::F0_F;
10116         unsigned DReg = RISCV::F0_D + RegNo;
10117         return std::make_pair(DReg, &RISCV::FPR64RegClass);
10118       }
10119       if (VT == MVT::f32 || VT == MVT::Other)
10120         return std::make_pair(FReg, &RISCV::FPR32RegClass);
10121       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
10122         unsigned RegNo = FReg - RISCV::F0_F;
10123         unsigned HReg = RISCV::F0_H + RegNo;
10124         return std::make_pair(HReg, &RISCV::FPR16RegClass);
10125       }
10126     }
10127   }
10128 
10129   if (Subtarget.hasVInstructions()) {
10130     Register VReg = StringSwitch<Register>(Constraint.lower())
10131                         .Case("{v0}", RISCV::V0)
10132                         .Case("{v1}", RISCV::V1)
10133                         .Case("{v2}", RISCV::V2)
10134                         .Case("{v3}", RISCV::V3)
10135                         .Case("{v4}", RISCV::V4)
10136                         .Case("{v5}", RISCV::V5)
10137                         .Case("{v6}", RISCV::V6)
10138                         .Case("{v7}", RISCV::V7)
10139                         .Case("{v8}", RISCV::V8)
10140                         .Case("{v9}", RISCV::V9)
10141                         .Case("{v10}", RISCV::V10)
10142                         .Case("{v11}", RISCV::V11)
10143                         .Case("{v12}", RISCV::V12)
10144                         .Case("{v13}", RISCV::V13)
10145                         .Case("{v14}", RISCV::V14)
10146                         .Case("{v15}", RISCV::V15)
10147                         .Case("{v16}", RISCV::V16)
10148                         .Case("{v17}", RISCV::V17)
10149                         .Case("{v18}", RISCV::V18)
10150                         .Case("{v19}", RISCV::V19)
10151                         .Case("{v20}", RISCV::V20)
10152                         .Case("{v21}", RISCV::V21)
10153                         .Case("{v22}", RISCV::V22)
10154                         .Case("{v23}", RISCV::V23)
10155                         .Case("{v24}", RISCV::V24)
10156                         .Case("{v25}", RISCV::V25)
10157                         .Case("{v26}", RISCV::V26)
10158                         .Case("{v27}", RISCV::V27)
10159                         .Case("{v28}", RISCV::V28)
10160                         .Case("{v29}", RISCV::V29)
10161                         .Case("{v30}", RISCV::V30)
10162                         .Case("{v31}", RISCV::V31)
10163                         .Default(RISCV::NoRegister);
10164     if (VReg != RISCV::NoRegister) {
10165       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
10166         return std::make_pair(VReg, &RISCV::VMRegClass);
10167       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
10168         return std::make_pair(VReg, &RISCV::VRRegClass);
10169       for (const auto *RC :
10170            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10171         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
10172           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
10173           return std::make_pair(VReg, RC);
10174         }
10175       }
10176     }
10177   }
10178 
10179   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10180 }
10181 
10182 unsigned
10183 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
10184   // Currently only support length 1 constraints.
10185   if (ConstraintCode.size() == 1) {
10186     switch (ConstraintCode[0]) {
10187     case 'A':
10188       return InlineAsm::Constraint_A;
10189     default:
10190       break;
10191     }
10192   }
10193 
10194   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
10195 }
10196 
10197 void RISCVTargetLowering::LowerAsmOperandForConstraint(
10198     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
10199     SelectionDAG &DAG) const {
10200   // Currently only support length 1 constraints.
10201   if (Constraint.length() == 1) {
10202     switch (Constraint[0]) {
10203     case 'I':
10204       // Validate & create a 12-bit signed immediate operand.
10205       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10206         uint64_t CVal = C->getSExtValue();
10207         if (isInt<12>(CVal))
10208           Ops.push_back(
10209               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10210       }
10211       return;
10212     case 'J':
10213       // Validate & create an integer zero operand.
10214       if (auto *C = dyn_cast<ConstantSDNode>(Op))
10215         if (C->getZExtValue() == 0)
10216           Ops.push_back(
10217               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
10218       return;
10219     case 'K':
10220       // Validate & create a 5-bit unsigned immediate operand.
10221       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10222         uint64_t CVal = C->getZExtValue();
10223         if (isUInt<5>(CVal))
10224           Ops.push_back(
10225               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10226       }
10227       return;
10228     case 'S':
10229       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10230         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
10231                                                  GA->getValueType(0)));
10232       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
10233         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
10234                                                 BA->getValueType(0)));
10235       }
10236       return;
10237     default:
10238       break;
10239     }
10240   }
10241   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
10242 }
10243 
10244 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
10245                                                    Instruction *Inst,
10246                                                    AtomicOrdering Ord) const {
10247   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
10248     return Builder.CreateFence(Ord);
10249   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
10250     return Builder.CreateFence(AtomicOrdering::Release);
10251   return nullptr;
10252 }
10253 
10254 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
10255                                                     Instruction *Inst,
10256                                                     AtomicOrdering Ord) const {
10257   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
10258     return Builder.CreateFence(AtomicOrdering::Acquire);
10259   return nullptr;
10260 }
10261 
10262 TargetLowering::AtomicExpansionKind
10263 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
10264   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
10265   // point operations can't be used in an lr/sc sequence without breaking the
10266   // forward-progress guarantee.
10267   if (AI->isFloatingPointOperation())
10268     return AtomicExpansionKind::CmpXChg;
10269 
10270   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
10271   if (Size == 8 || Size == 16)
10272     return AtomicExpansionKind::MaskedIntrinsic;
10273   return AtomicExpansionKind::None;
10274 }
10275 
10276 static Intrinsic::ID
10277 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
10278   if (XLen == 32) {
10279     switch (BinOp) {
10280     default:
10281       llvm_unreachable("Unexpected AtomicRMW BinOp");
10282     case AtomicRMWInst::Xchg:
10283       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
10284     case AtomicRMWInst::Add:
10285       return Intrinsic::riscv_masked_atomicrmw_add_i32;
10286     case AtomicRMWInst::Sub:
10287       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
10288     case AtomicRMWInst::Nand:
10289       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
10290     case AtomicRMWInst::Max:
10291       return Intrinsic::riscv_masked_atomicrmw_max_i32;
10292     case AtomicRMWInst::Min:
10293       return Intrinsic::riscv_masked_atomicrmw_min_i32;
10294     case AtomicRMWInst::UMax:
10295       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
10296     case AtomicRMWInst::UMin:
10297       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
10298     }
10299   }
10300 
10301   if (XLen == 64) {
10302     switch (BinOp) {
10303     default:
10304       llvm_unreachable("Unexpected AtomicRMW BinOp");
10305     case AtomicRMWInst::Xchg:
10306       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
10307     case AtomicRMWInst::Add:
10308       return Intrinsic::riscv_masked_atomicrmw_add_i64;
10309     case AtomicRMWInst::Sub:
10310       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
10311     case AtomicRMWInst::Nand:
10312       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
10313     case AtomicRMWInst::Max:
10314       return Intrinsic::riscv_masked_atomicrmw_max_i64;
10315     case AtomicRMWInst::Min:
10316       return Intrinsic::riscv_masked_atomicrmw_min_i64;
10317     case AtomicRMWInst::UMax:
10318       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
10319     case AtomicRMWInst::UMin:
10320       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
10321     }
10322   }
10323 
10324   llvm_unreachable("Unexpected XLen\n");
10325 }
10326 
10327 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
10328     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
10329     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
10330   unsigned XLen = Subtarget.getXLen();
10331   Value *Ordering =
10332       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
10333   Type *Tys[] = {AlignedAddr->getType()};
10334   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
10335       AI->getModule(),
10336       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
10337 
10338   if (XLen == 64) {
10339     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
10340     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10341     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
10342   }
10343 
10344   Value *Result;
10345 
10346   // Must pass the shift amount needed to sign extend the loaded value prior
10347   // to performing a signed comparison for min/max. ShiftAmt is the number of
10348   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
10349   // is the number of bits to left+right shift the value in order to
10350   // sign-extend.
10351   if (AI->getOperation() == AtomicRMWInst::Min ||
10352       AI->getOperation() == AtomicRMWInst::Max) {
10353     const DataLayout &DL = AI->getModule()->getDataLayout();
10354     unsigned ValWidth =
10355         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
10356     Value *SextShamt =
10357         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
10358     Result = Builder.CreateCall(LrwOpScwLoop,
10359                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
10360   } else {
10361     Result =
10362         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
10363   }
10364 
10365   if (XLen == 64)
10366     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10367   return Result;
10368 }
10369 
10370 TargetLowering::AtomicExpansionKind
10371 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
10372     AtomicCmpXchgInst *CI) const {
10373   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
10374   if (Size == 8 || Size == 16)
10375     return AtomicExpansionKind::MaskedIntrinsic;
10376   return AtomicExpansionKind::None;
10377 }
10378 
10379 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
10380     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
10381     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
10382   unsigned XLen = Subtarget.getXLen();
10383   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
10384   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
10385   if (XLen == 64) {
10386     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
10387     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
10388     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10389     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
10390   }
10391   Type *Tys[] = {AlignedAddr->getType()};
10392   Function *MaskedCmpXchg =
10393       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
10394   Value *Result = Builder.CreateCall(
10395       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
10396   if (XLen == 64)
10397     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10398   return Result;
10399 }
10400 
10401 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
10402   return false;
10403 }
10404 
10405 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
10406                                                EVT VT) const {
10407   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
10408     return false;
10409 
10410   switch (FPVT.getSimpleVT().SimpleTy) {
10411   case MVT::f16:
10412     return Subtarget.hasStdExtZfh();
10413   case MVT::f32:
10414     return Subtarget.hasStdExtF();
10415   case MVT::f64:
10416     return Subtarget.hasStdExtD();
10417   default:
10418     return false;
10419   }
10420 }
10421 
10422 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
10423   // If we are using the small code model, we can reduce size of jump table
10424   // entry to 4 bytes.
10425   if (Subtarget.is64Bit() && !isPositionIndependent() &&
10426       getTargetMachine().getCodeModel() == CodeModel::Small) {
10427     return MachineJumpTableInfo::EK_Custom32;
10428   }
10429   return TargetLowering::getJumpTableEncoding();
10430 }
10431 
10432 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
10433     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
10434     unsigned uid, MCContext &Ctx) const {
10435   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
10436          getTargetMachine().getCodeModel() == CodeModel::Small);
10437   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
10438 }
10439 
10440 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
10441                                                      EVT VT) const {
10442   VT = VT.getScalarType();
10443 
10444   if (!VT.isSimple())
10445     return false;
10446 
10447   switch (VT.getSimpleVT().SimpleTy) {
10448   case MVT::f16:
10449     return Subtarget.hasStdExtZfh();
10450   case MVT::f32:
10451     return Subtarget.hasStdExtF();
10452   case MVT::f64:
10453     return Subtarget.hasStdExtD();
10454   default:
10455     break;
10456   }
10457 
10458   return false;
10459 }
10460 
10461 Register RISCVTargetLowering::getExceptionPointerRegister(
10462     const Constant *PersonalityFn) const {
10463   return RISCV::X10;
10464 }
10465 
10466 Register RISCVTargetLowering::getExceptionSelectorRegister(
10467     const Constant *PersonalityFn) const {
10468   return RISCV::X11;
10469 }
10470 
10471 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
10472   // Return false to suppress the unnecessary extensions if the LibCall
10473   // arguments or return value is f32 type for LP64 ABI.
10474   RISCVABI::ABI ABI = Subtarget.getTargetABI();
10475   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
10476     return false;
10477 
10478   return true;
10479 }
10480 
10481 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
10482   if (Subtarget.is64Bit() && Type == MVT::i32)
10483     return true;
10484 
10485   return IsSigned;
10486 }
10487 
10488 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
10489                                                  SDValue C) const {
10490   // Check integral scalar types.
10491   if (VT.isScalarInteger()) {
10492     // Omit the optimization if the sub target has the M extension and the data
10493     // size exceeds XLen.
10494     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
10495       return false;
10496     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
10497       // Break the MUL to a SLLI and an ADD/SUB.
10498       const APInt &Imm = ConstNode->getAPIntValue();
10499       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
10500           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
10501         return true;
10502       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
10503       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
10504           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
10505            (Imm - 8).isPowerOf2()))
10506         return true;
10507       // Omit the following optimization if the sub target has the M extension
10508       // and the data size >= XLen.
10509       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
10510         return false;
10511       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
10512       // a pair of LUI/ADDI.
10513       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
10514         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
10515         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
10516             (1 - ImmS).isPowerOf2())
10517         return true;
10518       }
10519     }
10520   }
10521 
10522   return false;
10523 }
10524 
10525 bool RISCVTargetLowering::isMulAddWithConstProfitable(
10526     const SDValue &AddNode, const SDValue &ConstNode) const {
10527   // Let the DAGCombiner decide for vectors.
10528   EVT VT = AddNode.getValueType();
10529   if (VT.isVector())
10530     return true;
10531 
10532   // Let the DAGCombiner decide for larger types.
10533   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
10534     return true;
10535 
10536   // It is worse if c1 is simm12 while c1*c2 is not.
10537   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
10538   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
10539   const APInt &C1 = C1Node->getAPIntValue();
10540   const APInt &C2 = C2Node->getAPIntValue();
10541   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
10542     return false;
10543 
10544   // Default to true and let the DAGCombiner decide.
10545   return true;
10546 }
10547 
10548 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
10549     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
10550     bool *Fast) const {
10551   if (!VT.isVector())
10552     return false;
10553 
10554   EVT ElemVT = VT.getVectorElementType();
10555   if (Alignment >= ElemVT.getStoreSize()) {
10556     if (Fast)
10557       *Fast = true;
10558     return true;
10559   }
10560 
10561   return false;
10562 }
10563 
10564 bool RISCVTargetLowering::splitValueIntoRegisterParts(
10565     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
10566     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
10567   bool IsABIRegCopy = CC.hasValue();
10568   EVT ValueVT = Val.getValueType();
10569   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10570     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
10571     // and cast to f32.
10572     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
10573     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
10574     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
10575                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
10576     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
10577     Parts[0] = Val;
10578     return true;
10579   }
10580 
10581   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10582     LLVMContext &Context = *DAG.getContext();
10583     EVT ValueEltVT = ValueVT.getVectorElementType();
10584     EVT PartEltVT = PartVT.getVectorElementType();
10585     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10586     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10587     if (PartVTBitSize % ValueVTBitSize == 0) {
10588       assert(PartVTBitSize >= ValueVTBitSize);
10589       // If the element types are different, bitcast to the same element type of
10590       // PartVT first.
10591       // Give an example here, we want copy a <vscale x 1 x i8> value to
10592       // <vscale x 4 x i16>.
10593       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
10594       // subvector, then we can bitcast to <vscale x 4 x i16>.
10595       if (ValueEltVT != PartEltVT) {
10596         if (PartVTBitSize > ValueVTBitSize) {
10597           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
10598           assert(Count != 0 && "The number of element should not be zero.");
10599           EVT SameEltTypeVT =
10600               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
10601           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
10602                             DAG.getUNDEF(SameEltTypeVT), Val,
10603                             DAG.getVectorIdxConstant(0, DL));
10604         }
10605         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
10606       } else {
10607         Val =
10608             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
10609                         Val, DAG.getVectorIdxConstant(0, DL));
10610       }
10611       Parts[0] = Val;
10612       return true;
10613     }
10614   }
10615   return false;
10616 }
10617 
10618 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
10619     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
10620     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
10621   bool IsABIRegCopy = CC.hasValue();
10622   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10623     SDValue Val = Parts[0];
10624 
10625     // Cast the f32 to i32, truncate to i16, and cast back to f16.
10626     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
10627     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
10628     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
10629     return Val;
10630   }
10631 
10632   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10633     LLVMContext &Context = *DAG.getContext();
10634     SDValue Val = Parts[0];
10635     EVT ValueEltVT = ValueVT.getVectorElementType();
10636     EVT PartEltVT = PartVT.getVectorElementType();
10637     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10638     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10639     if (PartVTBitSize % ValueVTBitSize == 0) {
10640       assert(PartVTBitSize >= ValueVTBitSize);
10641       EVT SameEltTypeVT = ValueVT;
10642       // If the element types are different, convert it to the same element type
10643       // of PartVT.
10644       // Give an example here, we want copy a <vscale x 1 x i8> value from
10645       // <vscale x 4 x i16>.
10646       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
10647       // then we can extract <vscale x 1 x i8>.
10648       if (ValueEltVT != PartEltVT) {
10649         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
10650         assert(Count != 0 && "The number of element should not be zero.");
10651         SameEltTypeVT =
10652             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
10653         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
10654       }
10655       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
10656                         DAG.getVectorIdxConstant(0, DL));
10657       return Val;
10658     }
10659   }
10660   return SDValue();
10661 }
10662 
10663 SDValue
10664 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
10665                                    SelectionDAG &DAG,
10666                                    SmallVectorImpl<SDNode *> &Created) const {
10667   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
10668   if (isIntDivCheap(N->getValueType(0), Attr))
10669     return SDValue(N, 0); // Lower SDIV as SDIV
10670 
10671   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
10672          "Unexpected divisor!");
10673 
10674   // Conditional move is needed, so do the transformation iff Zbt is enabled.
10675   if (!Subtarget.hasStdExtZbt())
10676     return SDValue();
10677 
10678   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
10679   // Besides, more critical path instructions will be generated when dividing
10680   // by 2. So we keep using the original DAGs for these cases.
10681   unsigned Lg2 = Divisor.countTrailingZeros();
10682   if (Lg2 == 1 || Lg2 >= 12)
10683     return SDValue();
10684 
10685   // fold (sdiv X, pow2)
10686   EVT VT = N->getValueType(0);
10687   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
10688     return SDValue();
10689 
10690   SDLoc DL(N);
10691   SDValue N0 = N->getOperand(0);
10692   SDValue Zero = DAG.getConstant(0, DL, VT);
10693   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
10694 
10695   // Add (N0 < 0) ? Pow2 - 1 : 0;
10696   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
10697   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
10698   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
10699 
10700   Created.push_back(Cmp.getNode());
10701   Created.push_back(Add.getNode());
10702   Created.push_back(Sel.getNode());
10703 
10704   // Divide by pow2.
10705   SDValue SRA =
10706       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
10707 
10708   // If we're dividing by a positive value, we're done.  Otherwise, we must
10709   // negate the result.
10710   if (Divisor.isNonNegative())
10711     return SRA;
10712 
10713   Created.push_back(SRA.getNode());
10714   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
10715 }
10716 
10717 #define GET_REGISTER_MATCHER
10718 #include "RISCVGenAsmMatcher.inc"
10719 
10720 Register
10721 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
10722                                        const MachineFunction &MF) const {
10723   Register Reg = MatchRegisterAltName(RegName);
10724   if (Reg == RISCV::NoRegister)
10725     Reg = MatchRegisterName(RegName);
10726   if (Reg == RISCV::NoRegister)
10727     report_fatal_error(
10728         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
10729   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
10730   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
10731     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
10732                              StringRef(RegName) + "\"."));
10733   return Reg;
10734 }
10735 
10736 namespace llvm {
10737 namespace RISCVVIntrinsicsTable {
10738 
10739 #define GET_RISCVVIntrinsicsTable_IMPL
10740 #include "RISCVGenSearchableTables.inc"
10741 
10742 } // namespace RISCVVIntrinsicsTable
10743 
10744 } // namespace llvm
10745