1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306   }
307 
308   if (Subtarget.hasStdExtZbt()) {
309     setOperationAction(ISD::FSHL, XLenVT, Custom);
310     setOperationAction(ISD::FSHR, XLenVT, Custom);
311     setOperationAction(ISD::SELECT, XLenVT, Legal);
312 
313     if (Subtarget.is64Bit()) {
314       setOperationAction(ISD::FSHL, MVT::i32, Custom);
315       setOperationAction(ISD::FSHR, MVT::i32, Custom);
316     }
317   } else {
318     setOperationAction(ISD::SELECT, XLenVT, Custom);
319   }
320 
321   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
322       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
323       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
324       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
325       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
326       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
327       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
328 
329   static const ISD::CondCode FPCCToExpand[] = {
330       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
331       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
332       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
333 
334   static const ISD::NodeType FPOpToExpand[] = {
335       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
336       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
337 
338   if (Subtarget.hasStdExtZfh())
339     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
340 
341   if (Subtarget.hasStdExtZfh()) {
342     for (auto NT : FPLegalNodeTypes)
343       setOperationAction(NT, MVT::f16, Legal);
344     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
345     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
346     for (auto CC : FPCCToExpand)
347       setCondCodeAction(CC, MVT::f16, Expand);
348     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
349     setOperationAction(ISD::SELECT, MVT::f16, Custom);
350     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
351 
352     setOperationAction(ISD::FREM,       MVT::f16, Promote);
353     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
354     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
355     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
356     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
357     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
358     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
359     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
360     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
361     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
362     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
363     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
364     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
365     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
366     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
367     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
368     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
369     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
370 
371     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
372     // complete support for all operations in LegalizeDAG.
373 
374     // We need to custom promote this.
375     if (Subtarget.is64Bit())
376       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
377   }
378 
379   if (Subtarget.hasStdExtF()) {
380     for (auto NT : FPLegalNodeTypes)
381       setOperationAction(NT, MVT::f32, Legal);
382     for (auto CC : FPCCToExpand)
383       setCondCodeAction(CC, MVT::f32, Expand);
384     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
385     setOperationAction(ISD::SELECT, MVT::f32, Custom);
386     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
387     for (auto Op : FPOpToExpand)
388       setOperationAction(Op, MVT::f32, Expand);
389     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
390     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
391   }
392 
393   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
394     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
395 
396   if (Subtarget.hasStdExtD()) {
397     for (auto NT : FPLegalNodeTypes)
398       setOperationAction(NT, MVT::f64, Legal);
399     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
400     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
401     for (auto CC : FPCCToExpand)
402       setCondCodeAction(CC, MVT::f64, Expand);
403     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
404     setOperationAction(ISD::SELECT, MVT::f64, Custom);
405     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
406     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
407     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
408     for (auto Op : FPOpToExpand)
409       setOperationAction(Op, MVT::f64, Expand);
410     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
411     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
412   }
413 
414   if (Subtarget.is64Bit()) {
415     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
416     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
417     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
418     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
419   }
420 
421   if (Subtarget.hasStdExtF()) {
422     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
423     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
424 
425     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
426     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
427     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
428     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
429 
430     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
431     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
432   }
433 
434   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
435   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
436   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
437   setOperationAction(ISD::JumpTable, XLenVT, Custom);
438 
439   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
440 
441   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
442   // Unfortunately this can't be determined just from the ISA naming string.
443   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
444                      Subtarget.is64Bit() ? Legal : Custom);
445 
446   setOperationAction(ISD::TRAP, MVT::Other, Legal);
447   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
448   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
449   if (Subtarget.is64Bit())
450     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
451 
452   if (Subtarget.hasStdExtA()) {
453     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
454     setMinCmpXchgSizeInBits(32);
455   } else {
456     setMaxAtomicSizeInBitsSupported(0);
457   }
458 
459   setBooleanContents(ZeroOrOneBooleanContent);
460 
461   if (Subtarget.hasVInstructions()) {
462     setBooleanVectorContents(ZeroOrOneBooleanContent);
463 
464     setOperationAction(ISD::VSCALE, XLenVT, Custom);
465 
466     // RVV intrinsics may have illegal operands.
467     // We also need to custom legalize vmv.x.s.
468     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
469     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
470     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
471     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
472     if (Subtarget.is64Bit()) {
473       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
474     } else {
475       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
476       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
477     }
478 
479     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
480     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
481 
482     static const unsigned IntegerVPOps[] = {
483         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
484         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
485         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
486         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
487         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
488         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
489         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
490         ISD::VP_MERGE,       ISD::VP_SELECT};
491 
492     static const unsigned FloatingPointVPOps[] = {
493         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
494         ISD::VP_FDIV,        ISD::VP_FNEG,        ISD::VP_FMA,
495         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN,
496         ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,       ISD::VP_SELECT};
497 
498     if (!Subtarget.is64Bit()) {
499       // We must custom-lower certain vXi64 operations on RV32 due to the vector
500       // element type being illegal.
501       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
502       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
503 
504       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
505       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
506       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
507       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
508       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
509       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
510       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
511       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
512 
513       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
514       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
515       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
516       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
517       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
518       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
519       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
520       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
521     }
522 
523     for (MVT VT : BoolVecVTs) {
524       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
525 
526       // Mask VTs are custom-expanded into a series of standard nodes
527       setOperationAction(ISD::TRUNCATE, VT, Custom);
528       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
529       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
530       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
531 
532       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
533       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
534 
535       setOperationAction(ISD::SELECT, VT, Custom);
536       setOperationAction(ISD::SELECT_CC, VT, Expand);
537       setOperationAction(ISD::VSELECT, VT, Expand);
538       setOperationAction(ISD::VP_MERGE, VT, Expand);
539       setOperationAction(ISD::VP_SELECT, VT, Expand);
540 
541       setOperationAction(ISD::VP_AND, VT, Custom);
542       setOperationAction(ISD::VP_OR, VT, Custom);
543       setOperationAction(ISD::VP_XOR, VT, Custom);
544 
545       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
546       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
547       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
548 
549       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
550       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
551       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
552 
553       // RVV has native int->float & float->int conversions where the
554       // element type sizes are within one power-of-two of each other. Any
555       // wider distances between type sizes have to be lowered as sequences
556       // which progressively narrow the gap in stages.
557       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
558       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
559       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
560       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
561 
562       // Expand all extending loads to types larger than this, and truncating
563       // stores from types larger than this.
564       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
565         setTruncStoreAction(OtherVT, VT, Expand);
566         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
567         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
568         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
569       }
570     }
571 
572     for (MVT VT : IntVecVTs) {
573       if (VT.getVectorElementType() == MVT::i64 &&
574           !Subtarget.hasVInstructionsI64())
575         continue;
576 
577       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
578       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
579 
580       // Vectors implement MULHS/MULHU.
581       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
582       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
583 
584       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
585       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
586         setOperationAction(ISD::MULHU, VT, Expand);
587         setOperationAction(ISD::MULHS, VT, Expand);
588       }
589 
590       setOperationAction(ISD::SMIN, VT, Legal);
591       setOperationAction(ISD::SMAX, VT, Legal);
592       setOperationAction(ISD::UMIN, VT, Legal);
593       setOperationAction(ISD::UMAX, VT, Legal);
594 
595       setOperationAction(ISD::ROTL, VT, Expand);
596       setOperationAction(ISD::ROTR, VT, Expand);
597 
598       setOperationAction(ISD::CTTZ, VT, Expand);
599       setOperationAction(ISD::CTLZ, VT, Expand);
600       setOperationAction(ISD::CTPOP, VT, Expand);
601 
602       setOperationAction(ISD::BSWAP, VT, Expand);
603 
604       // Custom-lower extensions and truncations from/to mask types.
605       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
606       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
607       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
608 
609       // RVV has native int->float & float->int conversions where the
610       // element type sizes are within one power-of-two of each other. Any
611       // wider distances between type sizes have to be lowered as sequences
612       // which progressively narrow the gap in stages.
613       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
614       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
615       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
616       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
617 
618       setOperationAction(ISD::SADDSAT, VT, Legal);
619       setOperationAction(ISD::UADDSAT, VT, Legal);
620       setOperationAction(ISD::SSUBSAT, VT, Legal);
621       setOperationAction(ISD::USUBSAT, VT, Legal);
622 
623       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
624       // nodes which truncate by one power of two at a time.
625       setOperationAction(ISD::TRUNCATE, VT, Custom);
626 
627       // Custom-lower insert/extract operations to simplify patterns.
628       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
629       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
630 
631       // Custom-lower reduction operations to set up the corresponding custom
632       // nodes' operands.
633       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
634       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
635       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
636       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
637       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
638       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
639       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
640       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
641 
642       for (unsigned VPOpc : IntegerVPOps)
643         setOperationAction(VPOpc, VT, Custom);
644 
645       setOperationAction(ISD::LOAD, VT, Custom);
646       setOperationAction(ISD::STORE, VT, Custom);
647 
648       setOperationAction(ISD::MLOAD, VT, Custom);
649       setOperationAction(ISD::MSTORE, VT, Custom);
650       setOperationAction(ISD::MGATHER, VT, Custom);
651       setOperationAction(ISD::MSCATTER, VT, Custom);
652 
653       setOperationAction(ISD::VP_LOAD, VT, Custom);
654       setOperationAction(ISD::VP_STORE, VT, Custom);
655       setOperationAction(ISD::VP_GATHER, VT, Custom);
656       setOperationAction(ISD::VP_SCATTER, VT, Custom);
657 
658       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
659       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
660       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
661 
662       setOperationAction(ISD::SELECT, VT, Custom);
663       setOperationAction(ISD::SELECT_CC, VT, Expand);
664 
665       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
666       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
667 
668       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
669         setTruncStoreAction(VT, OtherVT, Expand);
670         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
671         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
672         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
673       }
674 
675       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
676       // type that can represent the value exactly.
677       if (VT.getVectorElementType() != MVT::i64) {
678         MVT FloatEltVT =
679             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
680         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
681         if (isTypeLegal(FloatVT)) {
682           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
683           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
684         }
685       }
686     }
687 
688     // Expand various CCs to best match the RVV ISA, which natively supports UNE
689     // but no other unordered comparisons, and supports all ordered comparisons
690     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
691     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
692     // and we pattern-match those back to the "original", swapping operands once
693     // more. This way we catch both operations and both "vf" and "fv" forms with
694     // fewer patterns.
695     static const ISD::CondCode VFPCCToExpand[] = {
696         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
697         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
698         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
699     };
700 
701     // Sets common operation actions on RVV floating-point vector types.
702     const auto SetCommonVFPActions = [&](MVT VT) {
703       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
704       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
705       // sizes are within one power-of-two of each other. Therefore conversions
706       // between vXf16 and vXf64 must be lowered as sequences which convert via
707       // vXf32.
708       setOperationAction(ISD::FP_ROUND, VT, Custom);
709       setOperationAction(ISD::FP_EXTEND, VT, Custom);
710       // Custom-lower insert/extract operations to simplify patterns.
711       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
712       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
713       // Expand various condition codes (explained above).
714       for (auto CC : VFPCCToExpand)
715         setCondCodeAction(CC, VT, Expand);
716 
717       setOperationAction(ISD::FMINNUM, VT, Legal);
718       setOperationAction(ISD::FMAXNUM, VT, Legal);
719 
720       setOperationAction(ISD::FTRUNC, VT, Custom);
721       setOperationAction(ISD::FCEIL, VT, Custom);
722       setOperationAction(ISD::FFLOOR, VT, Custom);
723       setOperationAction(ISD::FROUND, VT, Custom);
724 
725       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
726       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
727       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
728       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
729 
730       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
731 
732       setOperationAction(ISD::LOAD, VT, Custom);
733       setOperationAction(ISD::STORE, VT, Custom);
734 
735       setOperationAction(ISD::MLOAD, VT, Custom);
736       setOperationAction(ISD::MSTORE, VT, Custom);
737       setOperationAction(ISD::MGATHER, VT, Custom);
738       setOperationAction(ISD::MSCATTER, VT, Custom);
739 
740       setOperationAction(ISD::VP_LOAD, VT, Custom);
741       setOperationAction(ISD::VP_STORE, VT, Custom);
742       setOperationAction(ISD::VP_GATHER, VT, Custom);
743       setOperationAction(ISD::VP_SCATTER, VT, Custom);
744 
745       setOperationAction(ISD::SELECT, VT, Custom);
746       setOperationAction(ISD::SELECT_CC, VT, Expand);
747 
748       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
749       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
750       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
751 
752       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
753 
754       for (unsigned VPOpc : FloatingPointVPOps)
755         setOperationAction(VPOpc, VT, Custom);
756     };
757 
758     // Sets common extload/truncstore actions on RVV floating-point vector
759     // types.
760     const auto SetCommonVFPExtLoadTruncStoreActions =
761         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
762           for (auto SmallVT : SmallerVTs) {
763             setTruncStoreAction(VT, SmallVT, Expand);
764             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
765           }
766         };
767 
768     if (Subtarget.hasVInstructionsF16())
769       for (MVT VT : F16VecVTs)
770         SetCommonVFPActions(VT);
771 
772     for (MVT VT : F32VecVTs) {
773       if (Subtarget.hasVInstructionsF32())
774         SetCommonVFPActions(VT);
775       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
776     }
777 
778     for (MVT VT : F64VecVTs) {
779       if (Subtarget.hasVInstructionsF64())
780         SetCommonVFPActions(VT);
781       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
782       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
783     }
784 
785     if (Subtarget.useRVVForFixedLengthVectors()) {
786       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
787         if (!useRVVForFixedLengthVectorVT(VT))
788           continue;
789 
790         // By default everything must be expanded.
791         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
792           setOperationAction(Op, VT, Expand);
793         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
794           setTruncStoreAction(VT, OtherVT, Expand);
795           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
796           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
797           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
798         }
799 
800         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
801         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
802         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
803 
804         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
805         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
806 
807         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
808         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
809 
810         setOperationAction(ISD::LOAD, VT, Custom);
811         setOperationAction(ISD::STORE, VT, Custom);
812 
813         setOperationAction(ISD::SETCC, VT, Custom);
814 
815         setOperationAction(ISD::SELECT, VT, Custom);
816 
817         setOperationAction(ISD::TRUNCATE, VT, Custom);
818 
819         setOperationAction(ISD::BITCAST, VT, Custom);
820 
821         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
822         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
823         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
824 
825         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
826         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
827         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
828 
829         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
830         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
831         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
832         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
833 
834         // Operations below are different for between masks and other vectors.
835         if (VT.getVectorElementType() == MVT::i1) {
836           setOperationAction(ISD::VP_AND, VT, Custom);
837           setOperationAction(ISD::VP_OR, VT, Custom);
838           setOperationAction(ISD::VP_XOR, VT, Custom);
839           setOperationAction(ISD::AND, VT, Custom);
840           setOperationAction(ISD::OR, VT, Custom);
841           setOperationAction(ISD::XOR, VT, Custom);
842           continue;
843         }
844 
845         // Use SPLAT_VECTOR to prevent type legalization from destroying the
846         // splats when type legalizing i64 scalar on RV32.
847         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
848         // improvements first.
849         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
850           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
851           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
852         }
853 
854         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
855         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
856 
857         setOperationAction(ISD::MLOAD, VT, Custom);
858         setOperationAction(ISD::MSTORE, VT, Custom);
859         setOperationAction(ISD::MGATHER, VT, Custom);
860         setOperationAction(ISD::MSCATTER, VT, Custom);
861 
862         setOperationAction(ISD::VP_LOAD, VT, Custom);
863         setOperationAction(ISD::VP_STORE, VT, Custom);
864         setOperationAction(ISD::VP_GATHER, VT, Custom);
865         setOperationAction(ISD::VP_SCATTER, VT, Custom);
866 
867         setOperationAction(ISD::ADD, VT, Custom);
868         setOperationAction(ISD::MUL, VT, Custom);
869         setOperationAction(ISD::SUB, VT, Custom);
870         setOperationAction(ISD::AND, VT, Custom);
871         setOperationAction(ISD::OR, VT, Custom);
872         setOperationAction(ISD::XOR, VT, Custom);
873         setOperationAction(ISD::SDIV, VT, Custom);
874         setOperationAction(ISD::SREM, VT, Custom);
875         setOperationAction(ISD::UDIV, VT, Custom);
876         setOperationAction(ISD::UREM, VT, Custom);
877         setOperationAction(ISD::SHL, VT, Custom);
878         setOperationAction(ISD::SRA, VT, Custom);
879         setOperationAction(ISD::SRL, VT, Custom);
880 
881         setOperationAction(ISD::SMIN, VT, Custom);
882         setOperationAction(ISD::SMAX, VT, Custom);
883         setOperationAction(ISD::UMIN, VT, Custom);
884         setOperationAction(ISD::UMAX, VT, Custom);
885         setOperationAction(ISD::ABS,  VT, Custom);
886 
887         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
888         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
889           setOperationAction(ISD::MULHS, VT, Custom);
890           setOperationAction(ISD::MULHU, VT, Custom);
891         }
892 
893         setOperationAction(ISD::SADDSAT, VT, Custom);
894         setOperationAction(ISD::UADDSAT, VT, Custom);
895         setOperationAction(ISD::SSUBSAT, VT, Custom);
896         setOperationAction(ISD::USUBSAT, VT, Custom);
897 
898         setOperationAction(ISD::VSELECT, VT, Custom);
899         setOperationAction(ISD::SELECT_CC, VT, Expand);
900 
901         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
902         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
903         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
904 
905         // Custom-lower reduction operations to set up the corresponding custom
906         // nodes' operands.
907         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
908         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
909         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
910         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
911         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
912 
913         for (unsigned VPOpc : IntegerVPOps)
914           setOperationAction(VPOpc, VT, Custom);
915 
916         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
917         // type that can represent the value exactly.
918         if (VT.getVectorElementType() != MVT::i64) {
919           MVT FloatEltVT =
920               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
921           EVT FloatVT =
922               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
923           if (isTypeLegal(FloatVT)) {
924             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
925             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
926           }
927         }
928       }
929 
930       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
931         if (!useRVVForFixedLengthVectorVT(VT))
932           continue;
933 
934         // By default everything must be expanded.
935         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
936           setOperationAction(Op, VT, Expand);
937         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
938           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
939           setTruncStoreAction(VT, OtherVT, Expand);
940         }
941 
942         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
943         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
944         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
945 
946         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
947         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
948         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
949         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
950         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
951 
952         setOperationAction(ISD::LOAD, VT, Custom);
953         setOperationAction(ISD::STORE, VT, Custom);
954         setOperationAction(ISD::MLOAD, VT, Custom);
955         setOperationAction(ISD::MSTORE, VT, Custom);
956         setOperationAction(ISD::MGATHER, VT, Custom);
957         setOperationAction(ISD::MSCATTER, VT, Custom);
958 
959         setOperationAction(ISD::VP_LOAD, VT, Custom);
960         setOperationAction(ISD::VP_STORE, VT, Custom);
961         setOperationAction(ISD::VP_GATHER, VT, Custom);
962         setOperationAction(ISD::VP_SCATTER, VT, Custom);
963 
964         setOperationAction(ISD::FADD, VT, Custom);
965         setOperationAction(ISD::FSUB, VT, Custom);
966         setOperationAction(ISD::FMUL, VT, Custom);
967         setOperationAction(ISD::FDIV, VT, Custom);
968         setOperationAction(ISD::FNEG, VT, Custom);
969         setOperationAction(ISD::FABS, VT, Custom);
970         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
971         setOperationAction(ISD::FSQRT, VT, Custom);
972         setOperationAction(ISD::FMA, VT, Custom);
973         setOperationAction(ISD::FMINNUM, VT, Custom);
974         setOperationAction(ISD::FMAXNUM, VT, Custom);
975 
976         setOperationAction(ISD::FP_ROUND, VT, Custom);
977         setOperationAction(ISD::FP_EXTEND, VT, Custom);
978 
979         setOperationAction(ISD::FTRUNC, VT, Custom);
980         setOperationAction(ISD::FCEIL, VT, Custom);
981         setOperationAction(ISD::FFLOOR, VT, Custom);
982         setOperationAction(ISD::FROUND, VT, Custom);
983 
984         for (auto CC : VFPCCToExpand)
985           setCondCodeAction(CC, VT, Expand);
986 
987         setOperationAction(ISD::VSELECT, VT, Custom);
988         setOperationAction(ISD::SELECT, VT, Custom);
989         setOperationAction(ISD::SELECT_CC, VT, Expand);
990 
991         setOperationAction(ISD::BITCAST, VT, Custom);
992 
993         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
994         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
995         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
996         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
997 
998         for (unsigned VPOpc : FloatingPointVPOps)
999           setOperationAction(VPOpc, VT, Custom);
1000       }
1001 
1002       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1003       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1004       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1005       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1006       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1007       if (Subtarget.hasStdExtZfh())
1008         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1009       if (Subtarget.hasStdExtF())
1010         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1011       if (Subtarget.hasStdExtD())
1012         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1013     }
1014   }
1015 
1016   // Function alignments.
1017   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1018   setMinFunctionAlignment(FunctionAlignment);
1019   setPrefFunctionAlignment(FunctionAlignment);
1020 
1021   setMinimumJumpTableEntries(5);
1022 
1023   // Jumps are expensive, compared to logic
1024   setJumpIsExpensive();
1025 
1026   setTargetDAGCombine(ISD::ADD);
1027   setTargetDAGCombine(ISD::SUB);
1028   setTargetDAGCombine(ISD::AND);
1029   setTargetDAGCombine(ISD::OR);
1030   setTargetDAGCombine(ISD::XOR);
1031   setTargetDAGCombine(ISD::ROTL);
1032   setTargetDAGCombine(ISD::ROTR);
1033   setTargetDAGCombine(ISD::ANY_EXTEND);
1034   if (Subtarget.hasStdExtF()) {
1035     setTargetDAGCombine(ISD::ZERO_EXTEND);
1036     setTargetDAGCombine(ISD::FP_TO_SINT);
1037     setTargetDAGCombine(ISD::FP_TO_UINT);
1038     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1039     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1040   }
1041   if (Subtarget.hasVInstructions()) {
1042     setTargetDAGCombine(ISD::FCOPYSIGN);
1043     setTargetDAGCombine(ISD::MGATHER);
1044     setTargetDAGCombine(ISD::MSCATTER);
1045     setTargetDAGCombine(ISD::VP_GATHER);
1046     setTargetDAGCombine(ISD::VP_SCATTER);
1047     setTargetDAGCombine(ISD::SRA);
1048     setTargetDAGCombine(ISD::SRL);
1049     setTargetDAGCombine(ISD::SHL);
1050     setTargetDAGCombine(ISD::STORE);
1051     setTargetDAGCombine(ISD::SPLAT_VECTOR);
1052   }
1053 
1054   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1055   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1056 }
1057 
1058 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1059                                             LLVMContext &Context,
1060                                             EVT VT) const {
1061   if (!VT.isVector())
1062     return getPointerTy(DL);
1063   if (Subtarget.hasVInstructions() &&
1064       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1065     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1066   return VT.changeVectorElementTypeToInteger();
1067 }
1068 
1069 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1070   return Subtarget.getXLenVT();
1071 }
1072 
1073 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1074                                              const CallInst &I,
1075                                              MachineFunction &MF,
1076                                              unsigned Intrinsic) const {
1077   auto &DL = I.getModule()->getDataLayout();
1078   switch (Intrinsic) {
1079   default:
1080     return false;
1081   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1082   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1083   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1084   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1085   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1086   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1087   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1088   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1089   case Intrinsic::riscv_masked_cmpxchg_i32:
1090     Info.opc = ISD::INTRINSIC_W_CHAIN;
1091     Info.memVT = MVT::i32;
1092     Info.ptrVal = I.getArgOperand(0);
1093     Info.offset = 0;
1094     Info.align = Align(4);
1095     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1096                  MachineMemOperand::MOVolatile;
1097     return true;
1098   case Intrinsic::riscv_masked_strided_load:
1099     Info.opc = ISD::INTRINSIC_W_CHAIN;
1100     Info.ptrVal = I.getArgOperand(1);
1101     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1102     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1103     Info.size = MemoryLocation::UnknownSize;
1104     Info.flags |= MachineMemOperand::MOLoad;
1105     return true;
1106   case Intrinsic::riscv_masked_strided_store:
1107     Info.opc = ISD::INTRINSIC_VOID;
1108     Info.ptrVal = I.getArgOperand(1);
1109     Info.memVT =
1110         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1111     Info.align = Align(
1112         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1113         8);
1114     Info.size = MemoryLocation::UnknownSize;
1115     Info.flags |= MachineMemOperand::MOStore;
1116     return true;
1117   }
1118 }
1119 
1120 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1121                                                 const AddrMode &AM, Type *Ty,
1122                                                 unsigned AS,
1123                                                 Instruction *I) const {
1124   // No global is ever allowed as a base.
1125   if (AM.BaseGV)
1126     return false;
1127 
1128   // Require a 12-bit signed offset.
1129   if (!isInt<12>(AM.BaseOffs))
1130     return false;
1131 
1132   switch (AM.Scale) {
1133   case 0: // "r+i" or just "i", depending on HasBaseReg.
1134     break;
1135   case 1:
1136     if (!AM.HasBaseReg) // allow "r+i".
1137       break;
1138     return false; // disallow "r+r" or "r+r+i".
1139   default:
1140     return false;
1141   }
1142 
1143   return true;
1144 }
1145 
1146 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1147   return isInt<12>(Imm);
1148 }
1149 
1150 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1151   return isInt<12>(Imm);
1152 }
1153 
1154 // On RV32, 64-bit integers are split into their high and low parts and held
1155 // in two different registers, so the trunc is free since the low register can
1156 // just be used.
1157 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1158   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1159     return false;
1160   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1161   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1162   return (SrcBits == 64 && DestBits == 32);
1163 }
1164 
1165 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1166   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1167       !SrcVT.isInteger() || !DstVT.isInteger())
1168     return false;
1169   unsigned SrcBits = SrcVT.getSizeInBits();
1170   unsigned DestBits = DstVT.getSizeInBits();
1171   return (SrcBits == 64 && DestBits == 32);
1172 }
1173 
1174 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1175   // Zexts are free if they can be combined with a load.
1176   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1177   // poorly with type legalization of compares preferring sext.
1178   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1179     EVT MemVT = LD->getMemoryVT();
1180     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1181         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1182          LD->getExtensionType() == ISD::ZEXTLOAD))
1183       return true;
1184   }
1185 
1186   return TargetLowering::isZExtFree(Val, VT2);
1187 }
1188 
1189 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1190   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1191 }
1192 
1193 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1194   return Subtarget.hasStdExtZbb();
1195 }
1196 
1197 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1198   return Subtarget.hasStdExtZbb();
1199 }
1200 
1201 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1202   EVT VT = Y.getValueType();
1203 
1204   // FIXME: Support vectors once we have tests.
1205   if (VT.isVector())
1206     return false;
1207 
1208   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1209           Subtarget.hasStdExtZbkb()) &&
1210          !isa<ConstantSDNode>(Y);
1211 }
1212 
1213 /// Check if sinking \p I's operands to I's basic block is profitable, because
1214 /// the operands can be folded into a target instruction, e.g.
1215 /// splats of scalars can fold into vector instructions.
1216 bool RISCVTargetLowering::shouldSinkOperands(
1217     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1218   using namespace llvm::PatternMatch;
1219 
1220   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1221     return false;
1222 
1223   auto IsSinker = [&](Instruction *I, int Operand) {
1224     switch (I->getOpcode()) {
1225     case Instruction::Add:
1226     case Instruction::Sub:
1227     case Instruction::Mul:
1228     case Instruction::And:
1229     case Instruction::Or:
1230     case Instruction::Xor:
1231     case Instruction::FAdd:
1232     case Instruction::FSub:
1233     case Instruction::FMul:
1234     case Instruction::FDiv:
1235     case Instruction::ICmp:
1236     case Instruction::FCmp:
1237       return true;
1238     case Instruction::Shl:
1239     case Instruction::LShr:
1240     case Instruction::AShr:
1241     case Instruction::UDiv:
1242     case Instruction::SDiv:
1243     case Instruction::URem:
1244     case Instruction::SRem:
1245       return Operand == 1;
1246     case Instruction::Call:
1247       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1248         switch (II->getIntrinsicID()) {
1249         case Intrinsic::fma:
1250         case Intrinsic::vp_fma:
1251           return Operand == 0 || Operand == 1;
1252         // FIXME: Our patterns can only match vx/vf instructions when the splat
1253         // it on the RHS, because TableGen doesn't recognize our VP operations
1254         // as commutative.
1255         case Intrinsic::vp_add:
1256         case Intrinsic::vp_mul:
1257         case Intrinsic::vp_and:
1258         case Intrinsic::vp_or:
1259         case Intrinsic::vp_xor:
1260         case Intrinsic::vp_fadd:
1261         case Intrinsic::vp_fmul:
1262         case Intrinsic::vp_shl:
1263         case Intrinsic::vp_lshr:
1264         case Intrinsic::vp_ashr:
1265         case Intrinsic::vp_udiv:
1266         case Intrinsic::vp_sdiv:
1267         case Intrinsic::vp_urem:
1268         case Intrinsic::vp_srem:
1269           return Operand == 1;
1270         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1271         // explicit patterns for both LHS and RHS (as 'vr' versions).
1272         case Intrinsic::vp_sub:
1273         case Intrinsic::vp_fsub:
1274         case Intrinsic::vp_fdiv:
1275           return Operand == 0 || Operand == 1;
1276         default:
1277           return false;
1278         }
1279       }
1280       return false;
1281     default:
1282       return false;
1283     }
1284   };
1285 
1286   for (auto OpIdx : enumerate(I->operands())) {
1287     if (!IsSinker(I, OpIdx.index()))
1288       continue;
1289 
1290     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1291     // Make sure we are not already sinking this operand
1292     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1293       continue;
1294 
1295     // We are looking for a splat that can be sunk.
1296     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1297                              m_Undef(), m_ZeroMask())))
1298       continue;
1299 
1300     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1301     // and vector registers
1302     for (Use &U : Op->uses()) {
1303       Instruction *Insn = cast<Instruction>(U.getUser());
1304       if (!IsSinker(Insn, U.getOperandNo()))
1305         return false;
1306     }
1307 
1308     Ops.push_back(&Op->getOperandUse(0));
1309     Ops.push_back(&OpIdx.value());
1310   }
1311   return true;
1312 }
1313 
1314 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1315                                        bool ForCodeSize) const {
1316   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1317   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1318     return false;
1319   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1320     return false;
1321   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1322     return false;
1323   return Imm.isZero();
1324 }
1325 
1326 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1327   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1328          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1329          (VT == MVT::f64 && Subtarget.hasStdExtD());
1330 }
1331 
1332 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1333                                                       CallingConv::ID CC,
1334                                                       EVT VT) const {
1335   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1336   // We might still end up using a GPR but that will be decided based on ABI.
1337   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1338   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1339     return MVT::f32;
1340 
1341   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1342 }
1343 
1344 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1345                                                            CallingConv::ID CC,
1346                                                            EVT VT) const {
1347   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1348   // We might still end up using a GPR but that will be decided based on ABI.
1349   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1350   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1351     return 1;
1352 
1353   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1354 }
1355 
1356 // Changes the condition code and swaps operands if necessary, so the SetCC
1357 // operation matches one of the comparisons supported directly by branches
1358 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1359 // with 1/-1.
1360 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1361                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1362   // Convert X > -1 to X >= 0.
1363   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1364     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1365     CC = ISD::SETGE;
1366     return;
1367   }
1368   // Convert X < 1 to 0 >= X.
1369   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1370     RHS = LHS;
1371     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1372     CC = ISD::SETGE;
1373     return;
1374   }
1375 
1376   switch (CC) {
1377   default:
1378     break;
1379   case ISD::SETGT:
1380   case ISD::SETLE:
1381   case ISD::SETUGT:
1382   case ISD::SETULE:
1383     CC = ISD::getSetCCSwappedOperands(CC);
1384     std::swap(LHS, RHS);
1385     break;
1386   }
1387 }
1388 
1389 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1390   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1391   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1392   if (VT.getVectorElementType() == MVT::i1)
1393     KnownSize *= 8;
1394 
1395   switch (KnownSize) {
1396   default:
1397     llvm_unreachable("Invalid LMUL.");
1398   case 8:
1399     return RISCVII::VLMUL::LMUL_F8;
1400   case 16:
1401     return RISCVII::VLMUL::LMUL_F4;
1402   case 32:
1403     return RISCVII::VLMUL::LMUL_F2;
1404   case 64:
1405     return RISCVII::VLMUL::LMUL_1;
1406   case 128:
1407     return RISCVII::VLMUL::LMUL_2;
1408   case 256:
1409     return RISCVII::VLMUL::LMUL_4;
1410   case 512:
1411     return RISCVII::VLMUL::LMUL_8;
1412   }
1413 }
1414 
1415 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1416   switch (LMul) {
1417   default:
1418     llvm_unreachable("Invalid LMUL.");
1419   case RISCVII::VLMUL::LMUL_F8:
1420   case RISCVII::VLMUL::LMUL_F4:
1421   case RISCVII::VLMUL::LMUL_F2:
1422   case RISCVII::VLMUL::LMUL_1:
1423     return RISCV::VRRegClassID;
1424   case RISCVII::VLMUL::LMUL_2:
1425     return RISCV::VRM2RegClassID;
1426   case RISCVII::VLMUL::LMUL_4:
1427     return RISCV::VRM4RegClassID;
1428   case RISCVII::VLMUL::LMUL_8:
1429     return RISCV::VRM8RegClassID;
1430   }
1431 }
1432 
1433 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1434   RISCVII::VLMUL LMUL = getLMUL(VT);
1435   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1436       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1437       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1438       LMUL == RISCVII::VLMUL::LMUL_1) {
1439     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1440                   "Unexpected subreg numbering");
1441     return RISCV::sub_vrm1_0 + Index;
1442   }
1443   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1444     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1445                   "Unexpected subreg numbering");
1446     return RISCV::sub_vrm2_0 + Index;
1447   }
1448   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1449     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1450                   "Unexpected subreg numbering");
1451     return RISCV::sub_vrm4_0 + Index;
1452   }
1453   llvm_unreachable("Invalid vector type.");
1454 }
1455 
1456 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1457   if (VT.getVectorElementType() == MVT::i1)
1458     return RISCV::VRRegClassID;
1459   return getRegClassIDForLMUL(getLMUL(VT));
1460 }
1461 
1462 // Attempt to decompose a subvector insert/extract between VecVT and
1463 // SubVecVT via subregister indices. Returns the subregister index that
1464 // can perform the subvector insert/extract with the given element index, as
1465 // well as the index corresponding to any leftover subvectors that must be
1466 // further inserted/extracted within the register class for SubVecVT.
1467 std::pair<unsigned, unsigned>
1468 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1469     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1470     const RISCVRegisterInfo *TRI) {
1471   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1472                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1473                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1474                 "Register classes not ordered");
1475   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1476   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1477   // Try to compose a subregister index that takes us from the incoming
1478   // LMUL>1 register class down to the outgoing one. At each step we half
1479   // the LMUL:
1480   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1481   // Note that this is not guaranteed to find a subregister index, such as
1482   // when we are extracting from one VR type to another.
1483   unsigned SubRegIdx = RISCV::NoSubRegister;
1484   for (const unsigned RCID :
1485        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1486     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1487       VecVT = VecVT.getHalfNumVectorElementsVT();
1488       bool IsHi =
1489           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1490       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1491                                             getSubregIndexByMVT(VecVT, IsHi));
1492       if (IsHi)
1493         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1494     }
1495   return {SubRegIdx, InsertExtractIdx};
1496 }
1497 
1498 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1499 // stores for those types.
1500 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1501   return !Subtarget.useRVVForFixedLengthVectors() ||
1502          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1503 }
1504 
1505 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1506   if (ScalarTy->isPointerTy())
1507     return true;
1508 
1509   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1510       ScalarTy->isIntegerTy(32))
1511     return true;
1512 
1513   if (ScalarTy->isIntegerTy(64))
1514     return Subtarget.hasVInstructionsI64();
1515 
1516   if (ScalarTy->isHalfTy())
1517     return Subtarget.hasVInstructionsF16();
1518   if (ScalarTy->isFloatTy())
1519     return Subtarget.hasVInstructionsF32();
1520   if (ScalarTy->isDoubleTy())
1521     return Subtarget.hasVInstructionsF64();
1522 
1523   return false;
1524 }
1525 
1526 static SDValue getVLOperand(SDValue Op) {
1527   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1528           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1529          "Unexpected opcode");
1530   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1531   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1532   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1533       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1534   if (!II)
1535     return SDValue();
1536   return Op.getOperand(II->VLOperand + 1 + HasChain);
1537 }
1538 
1539 static bool useRVVForFixedLengthVectorVT(MVT VT,
1540                                          const RISCVSubtarget &Subtarget) {
1541   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1542   if (!Subtarget.useRVVForFixedLengthVectors())
1543     return false;
1544 
1545   // We only support a set of vector types with a consistent maximum fixed size
1546   // across all supported vector element types to avoid legalization issues.
1547   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1548   // fixed-length vector type we support is 1024 bytes.
1549   if (VT.getFixedSizeInBits() > 1024 * 8)
1550     return false;
1551 
1552   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1553 
1554   MVT EltVT = VT.getVectorElementType();
1555 
1556   // Don't use RVV for vectors we cannot scalarize if required.
1557   switch (EltVT.SimpleTy) {
1558   // i1 is supported but has different rules.
1559   default:
1560     return false;
1561   case MVT::i1:
1562     // Masks can only use a single register.
1563     if (VT.getVectorNumElements() > MinVLen)
1564       return false;
1565     MinVLen /= 8;
1566     break;
1567   case MVT::i8:
1568   case MVT::i16:
1569   case MVT::i32:
1570     break;
1571   case MVT::i64:
1572     if (!Subtarget.hasVInstructionsI64())
1573       return false;
1574     break;
1575   case MVT::f16:
1576     if (!Subtarget.hasVInstructionsF16())
1577       return false;
1578     break;
1579   case MVT::f32:
1580     if (!Subtarget.hasVInstructionsF32())
1581       return false;
1582     break;
1583   case MVT::f64:
1584     if (!Subtarget.hasVInstructionsF64())
1585       return false;
1586     break;
1587   }
1588 
1589   // Reject elements larger than ELEN.
1590   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1591     return false;
1592 
1593   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1594   // Don't use RVV for types that don't fit.
1595   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1596     return false;
1597 
1598   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1599   // the base fixed length RVV support in place.
1600   if (!VT.isPow2VectorType())
1601     return false;
1602 
1603   return true;
1604 }
1605 
1606 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1607   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1608 }
1609 
1610 // Return the largest legal scalable vector type that matches VT's element type.
1611 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1612                                             const RISCVSubtarget &Subtarget) {
1613   // This may be called before legal types are setup.
1614   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1615           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1616          "Expected legal fixed length vector!");
1617 
1618   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1619   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1620 
1621   MVT EltVT = VT.getVectorElementType();
1622   switch (EltVT.SimpleTy) {
1623   default:
1624     llvm_unreachable("unexpected element type for RVV container");
1625   case MVT::i1:
1626   case MVT::i8:
1627   case MVT::i16:
1628   case MVT::i32:
1629   case MVT::i64:
1630   case MVT::f16:
1631   case MVT::f32:
1632   case MVT::f64: {
1633     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1634     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1635     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1636     unsigned NumElts =
1637         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1638     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1639     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1640     return MVT::getScalableVectorVT(EltVT, NumElts);
1641   }
1642   }
1643 }
1644 
1645 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1646                                             const RISCVSubtarget &Subtarget) {
1647   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1648                                           Subtarget);
1649 }
1650 
1651 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1652   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1653 }
1654 
1655 // Grow V to consume an entire RVV register.
1656 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1657                                        const RISCVSubtarget &Subtarget) {
1658   assert(VT.isScalableVector() &&
1659          "Expected to convert into a scalable vector!");
1660   assert(V.getValueType().isFixedLengthVector() &&
1661          "Expected a fixed length vector operand!");
1662   SDLoc DL(V);
1663   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1664   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1665 }
1666 
1667 // Shrink V so it's just big enough to maintain a VT's worth of data.
1668 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1669                                          const RISCVSubtarget &Subtarget) {
1670   assert(VT.isFixedLengthVector() &&
1671          "Expected to convert into a fixed length vector!");
1672   assert(V.getValueType().isScalableVector() &&
1673          "Expected a scalable vector operand!");
1674   SDLoc DL(V);
1675   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1676   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1677 }
1678 
1679 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1680 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1681 // the vector type that it is contained in.
1682 static std::pair<SDValue, SDValue>
1683 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1684                 const RISCVSubtarget &Subtarget) {
1685   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1686   MVT XLenVT = Subtarget.getXLenVT();
1687   SDValue VL = VecVT.isFixedLengthVector()
1688                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1689                    : DAG.getRegister(RISCV::X0, XLenVT);
1690   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1691   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1692   return {Mask, VL};
1693 }
1694 
1695 // As above but assuming the given type is a scalable vector type.
1696 static std::pair<SDValue, SDValue>
1697 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1698                         const RISCVSubtarget &Subtarget) {
1699   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1700   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1701 }
1702 
1703 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1704 // of either is (currently) supported. This can get us into an infinite loop
1705 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1706 // as a ..., etc.
1707 // Until either (or both) of these can reliably lower any node, reporting that
1708 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1709 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1710 // which is not desirable.
1711 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1712     EVT VT, unsigned DefinedValues) const {
1713   return false;
1714 }
1715 
1716 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1717                                   const RISCVSubtarget &Subtarget) {
1718   // RISCV FP-to-int conversions saturate to the destination register size, but
1719   // don't produce 0 for nan. We can use a conversion instruction and fix the
1720   // nan case with a compare and a select.
1721   SDValue Src = Op.getOperand(0);
1722 
1723   EVT DstVT = Op.getValueType();
1724   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1725 
1726   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1727   unsigned Opc;
1728   if (SatVT == DstVT)
1729     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1730   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1731     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1732   else
1733     return SDValue();
1734   // FIXME: Support other SatVTs by clamping before or after the conversion.
1735 
1736   SDLoc DL(Op);
1737   SDValue FpToInt = DAG.getNode(
1738       Opc, DL, DstVT, Src,
1739       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1740 
1741   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1742   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1743 }
1744 
1745 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1746 // and back. Taking care to avoid converting values that are nan or already
1747 // correct.
1748 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1749 // have FRM dependencies modeled yet.
1750 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1751   MVT VT = Op.getSimpleValueType();
1752   assert(VT.isVector() && "Unexpected type");
1753 
1754   SDLoc DL(Op);
1755 
1756   // Freeze the source since we are increasing the number of uses.
1757   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1758 
1759   // Truncate to integer and convert back to FP.
1760   MVT IntVT = VT.changeVectorElementTypeToInteger();
1761   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1762   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1763 
1764   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1765 
1766   if (Op.getOpcode() == ISD::FCEIL) {
1767     // If the truncated value is the greater than or equal to the original
1768     // value, we've computed the ceil. Otherwise, we went the wrong way and
1769     // need to increase by 1.
1770     // FIXME: This should use a masked operation. Handle here or in isel?
1771     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1772                                  DAG.getConstantFP(1.0, DL, VT));
1773     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1774     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1775   } else if (Op.getOpcode() == ISD::FFLOOR) {
1776     // If the truncated value is the less than or equal to the original value,
1777     // we've computed the floor. Otherwise, we went the wrong way and need to
1778     // decrease by 1.
1779     // FIXME: This should use a masked operation. Handle here or in isel?
1780     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1781                                  DAG.getConstantFP(1.0, DL, VT));
1782     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1783     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1784   }
1785 
1786   // Restore the original sign so that -0.0 is preserved.
1787   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1788 
1789   // Determine the largest integer that can be represented exactly. This and
1790   // values larger than it don't have any fractional bits so don't need to
1791   // be converted.
1792   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1793   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1794   APFloat MaxVal = APFloat(FltSem);
1795   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1796                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1797   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1798 
1799   // If abs(Src) was larger than MaxVal or nan, keep it.
1800   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1801   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1802   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1803 }
1804 
1805 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1806 // This mode isn't supported in vector hardware on RISCV. But as long as we
1807 // aren't compiling with trapping math, we can emulate this with
1808 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1809 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1810 // dependencies modeled yet.
1811 // FIXME: Use masked operations to avoid final merge.
1812 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1813   MVT VT = Op.getSimpleValueType();
1814   assert(VT.isVector() && "Unexpected type");
1815 
1816   SDLoc DL(Op);
1817 
1818   // Freeze the source since we are increasing the number of uses.
1819   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1820 
1821   // We do the conversion on the absolute value and fix the sign at the end.
1822   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1823 
1824   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1825   bool Ignored;
1826   APFloat Point5Pred = APFloat(0.5f);
1827   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1828   Point5Pred.next(/*nextDown*/ true);
1829 
1830   // Add the adjustment.
1831   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1832                                DAG.getConstantFP(Point5Pred, DL, VT));
1833 
1834   // Truncate to integer and convert back to fp.
1835   MVT IntVT = VT.changeVectorElementTypeToInteger();
1836   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1837   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1838 
1839   // Restore the original sign.
1840   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1841 
1842   // Determine the largest integer that can be represented exactly. This and
1843   // values larger than it don't have any fractional bits so don't need to
1844   // be converted.
1845   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1846   APFloat MaxVal = APFloat(FltSem);
1847   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1848                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1849   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1850 
1851   // If abs(Src) was larger than MaxVal or nan, keep it.
1852   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1853   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1854   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1855 }
1856 
1857 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1858                                  const RISCVSubtarget &Subtarget) {
1859   MVT VT = Op.getSimpleValueType();
1860   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1861 
1862   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1863 
1864   SDLoc DL(Op);
1865   SDValue Mask, VL;
1866   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1867 
1868   unsigned Opc =
1869       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1870   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
1871                               Op.getOperand(0), VL);
1872   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1873 }
1874 
1875 struct VIDSequence {
1876   int64_t StepNumerator;
1877   unsigned StepDenominator;
1878   int64_t Addend;
1879 };
1880 
1881 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1882 // to the (non-zero) step S and start value X. This can be then lowered as the
1883 // RVV sequence (VID * S) + X, for example.
1884 // The step S is represented as an integer numerator divided by a positive
1885 // denominator. Note that the implementation currently only identifies
1886 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1887 // cannot detect 2/3, for example.
1888 // Note that this method will also match potentially unappealing index
1889 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1890 // determine whether this is worth generating code for.
1891 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1892   unsigned NumElts = Op.getNumOperands();
1893   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1894   if (!Op.getValueType().isInteger())
1895     return None;
1896 
1897   Optional<unsigned> SeqStepDenom;
1898   Optional<int64_t> SeqStepNum, SeqAddend;
1899   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1900   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1901   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1902     // Assume undef elements match the sequence; we just have to be careful
1903     // when interpolating across them.
1904     if (Op.getOperand(Idx).isUndef())
1905       continue;
1906     // The BUILD_VECTOR must be all constants.
1907     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1908       return None;
1909 
1910     uint64_t Val = Op.getConstantOperandVal(Idx) &
1911                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1912 
1913     if (PrevElt) {
1914       // Calculate the step since the last non-undef element, and ensure
1915       // it's consistent across the entire sequence.
1916       unsigned IdxDiff = Idx - PrevElt->second;
1917       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1918 
1919       // A zero-value value difference means that we're somewhere in the middle
1920       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1921       // step change before evaluating the sequence.
1922       if (ValDiff != 0) {
1923         int64_t Remainder = ValDiff % IdxDiff;
1924         // Normalize the step if it's greater than 1.
1925         if (Remainder != ValDiff) {
1926           // The difference must cleanly divide the element span.
1927           if (Remainder != 0)
1928             return None;
1929           ValDiff /= IdxDiff;
1930           IdxDiff = 1;
1931         }
1932 
1933         if (!SeqStepNum)
1934           SeqStepNum = ValDiff;
1935         else if (ValDiff != SeqStepNum)
1936           return None;
1937 
1938         if (!SeqStepDenom)
1939           SeqStepDenom = IdxDiff;
1940         else if (IdxDiff != *SeqStepDenom)
1941           return None;
1942       }
1943     }
1944 
1945     // Record and/or check any addend.
1946     if (SeqStepNum && SeqStepDenom) {
1947       uint64_t ExpectedVal =
1948           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1949       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1950       if (!SeqAddend)
1951         SeqAddend = Addend;
1952       else if (SeqAddend != Addend)
1953         return None;
1954     }
1955 
1956     // Record this non-undef element for later.
1957     if (!PrevElt || PrevElt->first != Val)
1958       PrevElt = std::make_pair(Val, Idx);
1959   }
1960   // We need to have logged both a step and an addend for this to count as
1961   // a legal index sequence.
1962   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1963     return None;
1964 
1965   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1966 }
1967 
1968 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1969 // and lower it as a VRGATHER_VX_VL from the source vector.
1970 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1971                                   SelectionDAG &DAG,
1972                                   const RISCVSubtarget &Subtarget) {
1973   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1974     return SDValue();
1975   SDValue Vec = SplatVal.getOperand(0);
1976   // Only perform this optimization on vectors of the same size for simplicity.
1977   if (Vec.getValueType() != VT)
1978     return SDValue();
1979   SDValue Idx = SplatVal.getOperand(1);
1980   // The index must be a legal type.
1981   if (Idx.getValueType() != Subtarget.getXLenVT())
1982     return SDValue();
1983 
1984   MVT ContainerVT = VT;
1985   if (VT.isFixedLengthVector()) {
1986     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1987     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
1988   }
1989 
1990   SDValue Mask, VL;
1991   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1992 
1993   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
1994                                Idx, Mask, VL);
1995 
1996   if (!VT.isFixedLengthVector())
1997     return Gather;
1998 
1999   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2000 }
2001 
2002 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2003                                  const RISCVSubtarget &Subtarget) {
2004   MVT VT = Op.getSimpleValueType();
2005   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2006 
2007   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2008 
2009   SDLoc DL(Op);
2010   SDValue Mask, VL;
2011   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2012 
2013   MVT XLenVT = Subtarget.getXLenVT();
2014   unsigned NumElts = Op.getNumOperands();
2015 
2016   if (VT.getVectorElementType() == MVT::i1) {
2017     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2018       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2019       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2020     }
2021 
2022     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2023       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2024       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2025     }
2026 
2027     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2028     // scalar integer chunks whose bit-width depends on the number of mask
2029     // bits and XLEN.
2030     // First, determine the most appropriate scalar integer type to use. This
2031     // is at most XLenVT, but may be shrunk to a smaller vector element type
2032     // according to the size of the final vector - use i8 chunks rather than
2033     // XLenVT if we're producing a v8i1. This results in more consistent
2034     // codegen across RV32 and RV64.
2035     unsigned NumViaIntegerBits =
2036         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2037     NumViaIntegerBits = std::min(NumViaIntegerBits,
2038                                  Subtarget.getMaxELENForFixedLengthVectors());
2039     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2040       // If we have to use more than one INSERT_VECTOR_ELT then this
2041       // optimization is likely to increase code size; avoid peforming it in
2042       // such a case. We can use a load from a constant pool in this case.
2043       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2044         return SDValue();
2045       // Now we can create our integer vector type. Note that it may be larger
2046       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2047       MVT IntegerViaVecVT =
2048           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2049                            divideCeil(NumElts, NumViaIntegerBits));
2050 
2051       uint64_t Bits = 0;
2052       unsigned BitPos = 0, IntegerEltIdx = 0;
2053       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2054 
2055       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2056         // Once we accumulate enough bits to fill our scalar type, insert into
2057         // our vector and clear our accumulated data.
2058         if (I != 0 && I % NumViaIntegerBits == 0) {
2059           if (NumViaIntegerBits <= 32)
2060             Bits = SignExtend64(Bits, 32);
2061           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2062           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2063                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2064           Bits = 0;
2065           BitPos = 0;
2066           IntegerEltIdx++;
2067         }
2068         SDValue V = Op.getOperand(I);
2069         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2070         Bits |= ((uint64_t)BitValue << BitPos);
2071       }
2072 
2073       // Insert the (remaining) scalar value into position in our integer
2074       // vector type.
2075       if (NumViaIntegerBits <= 32)
2076         Bits = SignExtend64(Bits, 32);
2077       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2078       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2079                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2080 
2081       if (NumElts < NumViaIntegerBits) {
2082         // If we're producing a smaller vector than our minimum legal integer
2083         // type, bitcast to the equivalent (known-legal) mask type, and extract
2084         // our final mask.
2085         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2086         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2087         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2088                           DAG.getConstant(0, DL, XLenVT));
2089       } else {
2090         // Else we must have produced an integer type with the same size as the
2091         // mask type; bitcast for the final result.
2092         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2093         Vec = DAG.getBitcast(VT, Vec);
2094       }
2095 
2096       return Vec;
2097     }
2098 
2099     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2100     // vector type, we have a legal equivalently-sized i8 type, so we can use
2101     // that.
2102     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2103     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2104 
2105     SDValue WideVec;
2106     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2107       // For a splat, perform a scalar truncate before creating the wider
2108       // vector.
2109       assert(Splat.getValueType() == XLenVT &&
2110              "Unexpected type for i1 splat value");
2111       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2112                           DAG.getConstant(1, DL, XLenVT));
2113       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2114     } else {
2115       SmallVector<SDValue, 8> Ops(Op->op_values());
2116       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2117       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2118       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2119     }
2120 
2121     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2122   }
2123 
2124   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2125     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2126       return Gather;
2127     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2128                                         : RISCVISD::VMV_V_X_VL;
2129     Splat =
2130         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2131     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2132   }
2133 
2134   // Try and match index sequences, which we can lower to the vid instruction
2135   // with optional modifications. An all-undef vector is matched by
2136   // getSplatValue, above.
2137   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2138     int64_t StepNumerator = SimpleVID->StepNumerator;
2139     unsigned StepDenominator = SimpleVID->StepDenominator;
2140     int64_t Addend = SimpleVID->Addend;
2141 
2142     assert(StepNumerator != 0 && "Invalid step");
2143     bool Negate = false;
2144     int64_t SplatStepVal = StepNumerator;
2145     unsigned StepOpcode = ISD::MUL;
2146     if (StepNumerator != 1) {
2147       if (isPowerOf2_64(std::abs(StepNumerator))) {
2148         Negate = StepNumerator < 0;
2149         StepOpcode = ISD::SHL;
2150         SplatStepVal = Log2_64(std::abs(StepNumerator));
2151       }
2152     }
2153 
2154     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2155     // threshold since it's the immediate value many RVV instructions accept.
2156     // There is no vmul.vi instruction so ensure multiply constant can fit in
2157     // a single addi instruction.
2158     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2159          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2160         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2161       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2162       // Convert right out of the scalable type so we can use standard ISD
2163       // nodes for the rest of the computation. If we used scalable types with
2164       // these, we'd lose the fixed-length vector info and generate worse
2165       // vsetvli code.
2166       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2167       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2168           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2169         SDValue SplatStep = DAG.getSplatVector(
2170             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2171         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2172       }
2173       if (StepDenominator != 1) {
2174         SDValue SplatStep = DAG.getSplatVector(
2175             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2176         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2177       }
2178       if (Addend != 0 || Negate) {
2179         SDValue SplatAddend =
2180             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2181         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2182       }
2183       return VID;
2184     }
2185   }
2186 
2187   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2188   // when re-interpreted as a vector with a larger element type. For example,
2189   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2190   // could be instead splat as
2191   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2192   // TODO: This optimization could also work on non-constant splats, but it
2193   // would require bit-manipulation instructions to construct the splat value.
2194   SmallVector<SDValue> Sequence;
2195   unsigned EltBitSize = VT.getScalarSizeInBits();
2196   const auto *BV = cast<BuildVectorSDNode>(Op);
2197   if (VT.isInteger() && EltBitSize < 64 &&
2198       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2199       BV->getRepeatedSequence(Sequence) &&
2200       (Sequence.size() * EltBitSize) <= 64) {
2201     unsigned SeqLen = Sequence.size();
2202     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2203     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2204     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2205             ViaIntVT == MVT::i64) &&
2206            "Unexpected sequence type");
2207 
2208     unsigned EltIdx = 0;
2209     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2210     uint64_t SplatValue = 0;
2211     // Construct the amalgamated value which can be splatted as this larger
2212     // vector type.
2213     for (const auto &SeqV : Sequence) {
2214       if (!SeqV.isUndef())
2215         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2216                        << (EltIdx * EltBitSize));
2217       EltIdx++;
2218     }
2219 
2220     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2221     // achieve better constant materializion.
2222     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2223       SplatValue = SignExtend64(SplatValue, 32);
2224 
2225     // Since we can't introduce illegal i64 types at this stage, we can only
2226     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2227     // way we can use RVV instructions to splat.
2228     assert((ViaIntVT.bitsLE(XLenVT) ||
2229             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2230            "Unexpected bitcast sequence");
2231     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2232       SDValue ViaVL =
2233           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2234       MVT ViaContainerVT =
2235           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2236       SDValue Splat =
2237           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2238                       DAG.getUNDEF(ViaContainerVT),
2239                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2240       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2241       return DAG.getBitcast(VT, Splat);
2242     }
2243   }
2244 
2245   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2246   // which constitute a large proportion of the elements. In such cases we can
2247   // splat a vector with the dominant element and make up the shortfall with
2248   // INSERT_VECTOR_ELTs.
2249   // Note that this includes vectors of 2 elements by association. The
2250   // upper-most element is the "dominant" one, allowing us to use a splat to
2251   // "insert" the upper element, and an insert of the lower element at position
2252   // 0, which improves codegen.
2253   SDValue DominantValue;
2254   unsigned MostCommonCount = 0;
2255   DenseMap<SDValue, unsigned> ValueCounts;
2256   unsigned NumUndefElts =
2257       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2258 
2259   // Track the number of scalar loads we know we'd be inserting, estimated as
2260   // any non-zero floating-point constant. Other kinds of element are either
2261   // already in registers or are materialized on demand. The threshold at which
2262   // a vector load is more desirable than several scalar materializion and
2263   // vector-insertion instructions is not known.
2264   unsigned NumScalarLoads = 0;
2265 
2266   for (SDValue V : Op->op_values()) {
2267     if (V.isUndef())
2268       continue;
2269 
2270     ValueCounts.insert(std::make_pair(V, 0));
2271     unsigned &Count = ValueCounts[V];
2272 
2273     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2274       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2275 
2276     // Is this value dominant? In case of a tie, prefer the highest element as
2277     // it's cheaper to insert near the beginning of a vector than it is at the
2278     // end.
2279     if (++Count >= MostCommonCount) {
2280       DominantValue = V;
2281       MostCommonCount = Count;
2282     }
2283   }
2284 
2285   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2286   unsigned NumDefElts = NumElts - NumUndefElts;
2287   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2288 
2289   // Don't perform this optimization when optimizing for size, since
2290   // materializing elements and inserting them tends to cause code bloat.
2291   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2292       ((MostCommonCount > DominantValueCountThreshold) ||
2293        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2294     // Start by splatting the most common element.
2295     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2296 
2297     DenseSet<SDValue> Processed{DominantValue};
2298     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2299     for (const auto &OpIdx : enumerate(Op->ops())) {
2300       const SDValue &V = OpIdx.value();
2301       if (V.isUndef() || !Processed.insert(V).second)
2302         continue;
2303       if (ValueCounts[V] == 1) {
2304         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2305                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2306       } else {
2307         // Blend in all instances of this value using a VSELECT, using a
2308         // mask where each bit signals whether that element is the one
2309         // we're after.
2310         SmallVector<SDValue> Ops;
2311         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2312           return DAG.getConstant(V == V1, DL, XLenVT);
2313         });
2314         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2315                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2316                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2317       }
2318     }
2319 
2320     return Vec;
2321   }
2322 
2323   return SDValue();
2324 }
2325 
2326 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2327                                    SDValue Lo, SDValue Hi, SDValue VL,
2328                                    SelectionDAG &DAG) {
2329   bool HasPassthru = Passthru && !Passthru.isUndef();
2330   if (!HasPassthru && !Passthru)
2331     Passthru = DAG.getUNDEF(VT);
2332   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2333     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2334     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2335     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2336     // node in order to try and match RVV vector/scalar instructions.
2337     if ((LoC >> 31) == HiC)
2338       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2339 
2340     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2341     // vmv.v.x whose EEW = 32 to lower it.
2342     auto *Const = dyn_cast<ConstantSDNode>(VL);
2343     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2344       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2345       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2346       // access the subtarget here now.
2347       auto InterVec = DAG.getNode(
2348           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2349                                   DAG.getRegister(RISCV::X0, MVT::i32));
2350       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2351     }
2352   }
2353 
2354   // Fall back to a stack store and stride x0 vector load.
2355   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2356                      Hi, VL);
2357 }
2358 
2359 // Called by type legalization to handle splat of i64 on RV32.
2360 // FIXME: We can optimize this when the type has sign or zero bits in one
2361 // of the halves.
2362 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2363                                    SDValue Scalar, SDValue VL,
2364                                    SelectionDAG &DAG) {
2365   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2366   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2367                            DAG.getConstant(0, DL, MVT::i32));
2368   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2369                            DAG.getConstant(1, DL, MVT::i32));
2370   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2371 }
2372 
2373 // This function lowers a splat of a scalar operand Splat with the vector
2374 // length VL. It ensures the final sequence is type legal, which is useful when
2375 // lowering a splat after type legalization.
2376 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2377                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2378                                 const RISCVSubtarget &Subtarget) {
2379   bool HasPassthru = Passthru && !Passthru.isUndef();
2380   if (!HasPassthru && !Passthru)
2381     Passthru = DAG.getUNDEF(VT);
2382   if (VT.isFloatingPoint()) {
2383     // If VL is 1, we could use vfmv.s.f.
2384     if (isOneConstant(VL))
2385       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2386     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2387   }
2388 
2389   MVT XLenVT = Subtarget.getXLenVT();
2390 
2391   // Simplest case is that the operand needs to be promoted to XLenVT.
2392   if (Scalar.getValueType().bitsLE(XLenVT)) {
2393     // If the operand is a constant, sign extend to increase our chances
2394     // of being able to use a .vi instruction. ANY_EXTEND would become a
2395     // a zero extend and the simm5 check in isel would fail.
2396     // FIXME: Should we ignore the upper bits in isel instead?
2397     unsigned ExtOpc =
2398         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2399     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2400     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2401     // If VL is 1 and the scalar value won't benefit from immediate, we could
2402     // use vmv.s.x.
2403     if (isOneConstant(VL) &&
2404         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2405       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2406     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2407   }
2408 
2409   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2410          "Unexpected scalar for splat lowering!");
2411 
2412   if (isOneConstant(VL) && isNullConstant(Scalar))
2413     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2414                        DAG.getConstant(0, DL, XLenVT), VL);
2415 
2416   // Otherwise use the more complicated splatting algorithm.
2417   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2418 }
2419 
2420 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2421                                 const RISCVSubtarget &Subtarget) {
2422   // We need to be able to widen elements to the next larger integer type.
2423   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2424     return false;
2425 
2426   int Size = Mask.size();
2427   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2428 
2429   int Srcs[] = {-1, -1};
2430   for (int i = 0; i != Size; ++i) {
2431     // Ignore undef elements.
2432     if (Mask[i] < 0)
2433       continue;
2434 
2435     // Is this an even or odd element.
2436     int Pol = i % 2;
2437 
2438     // Ensure we consistently use the same source for this element polarity.
2439     int Src = Mask[i] / Size;
2440     if (Srcs[Pol] < 0)
2441       Srcs[Pol] = Src;
2442     if (Srcs[Pol] != Src)
2443       return false;
2444 
2445     // Make sure the element within the source is appropriate for this element
2446     // in the destination.
2447     int Elt = Mask[i] % Size;
2448     if (Elt != i / 2)
2449       return false;
2450   }
2451 
2452   // We need to find a source for each polarity and they can't be the same.
2453   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2454     return false;
2455 
2456   // Swap the sources if the second source was in the even polarity.
2457   SwapSources = Srcs[0] > Srcs[1];
2458 
2459   return true;
2460 }
2461 
2462 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2463 /// and then extract the original number of elements from the rotated result.
2464 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2465 /// returned rotation amount is for a rotate right, where elements move from
2466 /// higher elements to lower elements. \p LoSrc indicates the first source
2467 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2468 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2469 /// 0 or 1 if a rotation is found.
2470 ///
2471 /// NOTE: We talk about rotate to the right which matches how bit shift and
2472 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2473 /// and the table below write vectors with the lowest elements on the left.
2474 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2475   int Size = Mask.size();
2476 
2477   // We need to detect various ways of spelling a rotation:
2478   //   [11, 12, 13, 14, 15,  0,  1,  2]
2479   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2480   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2481   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2482   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2483   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2484   int Rotation = 0;
2485   LoSrc = -1;
2486   HiSrc = -1;
2487   for (int i = 0; i != Size; ++i) {
2488     int M = Mask[i];
2489     if (M < 0)
2490       continue;
2491 
2492     // Determine where a rotate vector would have started.
2493     int StartIdx = i - (M % Size);
2494     // The identity rotation isn't interesting, stop.
2495     if (StartIdx == 0)
2496       return -1;
2497 
2498     // If we found the tail of a vector the rotation must be the missing
2499     // front. If we found the head of a vector, it must be how much of the
2500     // head.
2501     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2502 
2503     if (Rotation == 0)
2504       Rotation = CandidateRotation;
2505     else if (Rotation != CandidateRotation)
2506       // The rotations don't match, so we can't match this mask.
2507       return -1;
2508 
2509     // Compute which value this mask is pointing at.
2510     int MaskSrc = M < Size ? 0 : 1;
2511 
2512     // Compute which of the two target values this index should be assigned to.
2513     // This reflects whether the high elements are remaining or the low elemnts
2514     // are remaining.
2515     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2516 
2517     // Either set up this value if we've not encountered it before, or check
2518     // that it remains consistent.
2519     if (TargetSrc < 0)
2520       TargetSrc = MaskSrc;
2521     else if (TargetSrc != MaskSrc)
2522       // This may be a rotation, but it pulls from the inputs in some
2523       // unsupported interleaving.
2524       return -1;
2525   }
2526 
2527   // Check that we successfully analyzed the mask, and normalize the results.
2528   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2529   assert((LoSrc >= 0 || HiSrc >= 0) &&
2530          "Failed to find a rotated input vector!");
2531 
2532   return Rotation;
2533 }
2534 
2535 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2536                                    const RISCVSubtarget &Subtarget) {
2537   SDValue V1 = Op.getOperand(0);
2538   SDValue V2 = Op.getOperand(1);
2539   SDLoc DL(Op);
2540   MVT XLenVT = Subtarget.getXLenVT();
2541   MVT VT = Op.getSimpleValueType();
2542   unsigned NumElts = VT.getVectorNumElements();
2543   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2544 
2545   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2546 
2547   SDValue TrueMask, VL;
2548   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2549 
2550   if (SVN->isSplat()) {
2551     const int Lane = SVN->getSplatIndex();
2552     if (Lane >= 0) {
2553       MVT SVT = VT.getVectorElementType();
2554 
2555       // Turn splatted vector load into a strided load with an X0 stride.
2556       SDValue V = V1;
2557       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2558       // with undef.
2559       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2560       int Offset = Lane;
2561       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2562         int OpElements =
2563             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2564         V = V.getOperand(Offset / OpElements);
2565         Offset %= OpElements;
2566       }
2567 
2568       // We need to ensure the load isn't atomic or volatile.
2569       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2570         auto *Ld = cast<LoadSDNode>(V);
2571         Offset *= SVT.getStoreSize();
2572         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2573                                                    TypeSize::Fixed(Offset), DL);
2574 
2575         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2576         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2577           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2578           SDValue IntID =
2579               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2580           SDValue Ops[] = {Ld->getChain(),
2581                            IntID,
2582                            DAG.getUNDEF(ContainerVT),
2583                            NewAddr,
2584                            DAG.getRegister(RISCV::X0, XLenVT),
2585                            VL};
2586           SDValue NewLoad = DAG.getMemIntrinsicNode(
2587               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2588               DAG.getMachineFunction().getMachineMemOperand(
2589                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2590           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2591           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2592         }
2593 
2594         // Otherwise use a scalar load and splat. This will give the best
2595         // opportunity to fold a splat into the operation. ISel can turn it into
2596         // the x0 strided load if we aren't able to fold away the select.
2597         if (SVT.isFloatingPoint())
2598           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2599                           Ld->getPointerInfo().getWithOffset(Offset),
2600                           Ld->getOriginalAlign(),
2601                           Ld->getMemOperand()->getFlags());
2602         else
2603           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2604                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2605                              Ld->getOriginalAlign(),
2606                              Ld->getMemOperand()->getFlags());
2607         DAG.makeEquivalentMemoryOrdering(Ld, V);
2608 
2609         unsigned Opc =
2610             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2611         SDValue Splat =
2612             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2613         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2614       }
2615 
2616       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2617       assert(Lane < (int)NumElts && "Unexpected lane!");
2618       SDValue Gather =
2619           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2620                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2621       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2622     }
2623   }
2624 
2625   ArrayRef<int> Mask = SVN->getMask();
2626 
2627   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2628   // be undef which can be handled with a single SLIDEDOWN/UP.
2629   int LoSrc, HiSrc;
2630   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2631   if (Rotation > 0) {
2632     SDValue LoV, HiV;
2633     if (LoSrc >= 0) {
2634       LoV = LoSrc == 0 ? V1 : V2;
2635       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2636     }
2637     if (HiSrc >= 0) {
2638       HiV = HiSrc == 0 ? V1 : V2;
2639       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2640     }
2641 
2642     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2643     // to slide LoV up by (NumElts - Rotation).
2644     unsigned InvRotate = NumElts - Rotation;
2645 
2646     SDValue Res = DAG.getUNDEF(ContainerVT);
2647     if (HiV) {
2648       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2649       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2650       // causes multiple vsetvlis in some test cases such as lowering
2651       // reduce.mul
2652       SDValue DownVL = VL;
2653       if (LoV)
2654         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2655       Res =
2656           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2657                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2658     }
2659     if (LoV)
2660       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2661                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2662 
2663     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2664   }
2665 
2666   // Detect an interleave shuffle and lower to
2667   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2668   bool SwapSources;
2669   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2670     // Swap sources if needed.
2671     if (SwapSources)
2672       std::swap(V1, V2);
2673 
2674     // Extract the lower half of the vectors.
2675     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2676     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2677                      DAG.getConstant(0, DL, XLenVT));
2678     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2679                      DAG.getConstant(0, DL, XLenVT));
2680 
2681     // Double the element width and halve the number of elements in an int type.
2682     unsigned EltBits = VT.getScalarSizeInBits();
2683     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2684     MVT WideIntVT =
2685         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2686     // Convert this to a scalable vector. We need to base this on the
2687     // destination size to ensure there's always a type with a smaller LMUL.
2688     MVT WideIntContainerVT =
2689         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2690 
2691     // Convert sources to scalable vectors with the same element count as the
2692     // larger type.
2693     MVT HalfContainerVT = MVT::getVectorVT(
2694         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2695     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2696     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2697 
2698     // Cast sources to integer.
2699     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2700     MVT IntHalfVT =
2701         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2702     V1 = DAG.getBitcast(IntHalfVT, V1);
2703     V2 = DAG.getBitcast(IntHalfVT, V2);
2704 
2705     // Freeze V2 since we use it twice and we need to be sure that the add and
2706     // multiply see the same value.
2707     V2 = DAG.getFreeze(V2);
2708 
2709     // Recreate TrueMask using the widened type's element count.
2710     MVT MaskVT =
2711         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2712     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2713 
2714     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2715     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2716                               V2, TrueMask, VL);
2717     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2718     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2719                                      DAG.getUNDEF(IntHalfVT),
2720                                      DAG.getAllOnesConstant(DL, XLenVT));
2721     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2722                                    V2, Multiplier, TrueMask, VL);
2723     // Add the new copies to our previous addition giving us 2^eltbits copies of
2724     // V2. This is equivalent to shifting V2 left by eltbits. This should
2725     // combine with the vwmulu.vv above to form vwmaccu.vv.
2726     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2727                       TrueMask, VL);
2728     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2729     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2730     // vector VT.
2731     ContainerVT =
2732         MVT::getVectorVT(VT.getVectorElementType(),
2733                          WideIntContainerVT.getVectorElementCount() * 2);
2734     Add = DAG.getBitcast(ContainerVT, Add);
2735     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2736   }
2737 
2738   // Detect shuffles which can be re-expressed as vector selects; these are
2739   // shuffles in which each element in the destination is taken from an element
2740   // at the corresponding index in either source vectors.
2741   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2742     int MaskIndex = MaskIdx.value();
2743     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2744   });
2745 
2746   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2747 
2748   SmallVector<SDValue> MaskVals;
2749   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2750   // merged with a second vrgather.
2751   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2752 
2753   // By default we preserve the original operand order, and use a mask to
2754   // select LHS as true and RHS as false. However, since RVV vector selects may
2755   // feature splats but only on the LHS, we may choose to invert our mask and
2756   // instead select between RHS and LHS.
2757   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2758   bool InvertMask = IsSelect == SwapOps;
2759 
2760   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2761   // half.
2762   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2763 
2764   // Now construct the mask that will be used by the vselect or blended
2765   // vrgather operation. For vrgathers, construct the appropriate indices into
2766   // each vector.
2767   for (int MaskIndex : Mask) {
2768     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2769     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2770     if (!IsSelect) {
2771       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2772       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2773                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2774                                      : DAG.getUNDEF(XLenVT));
2775       GatherIndicesRHS.push_back(
2776           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2777                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2778       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2779         ++LHSIndexCounts[MaskIndex];
2780       if (!IsLHSOrUndefIndex)
2781         ++RHSIndexCounts[MaskIndex - NumElts];
2782     }
2783   }
2784 
2785   if (SwapOps) {
2786     std::swap(V1, V2);
2787     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2788   }
2789 
2790   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2791   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2792   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2793 
2794   if (IsSelect)
2795     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2796 
2797   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2798     // On such a large vector we're unable to use i8 as the index type.
2799     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2800     // may involve vector splitting if we're already at LMUL=8, or our
2801     // user-supplied maximum fixed-length LMUL.
2802     return SDValue();
2803   }
2804 
2805   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2806   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2807   MVT IndexVT = VT.changeTypeToInteger();
2808   // Since we can't introduce illegal index types at this stage, use i16 and
2809   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2810   // than XLenVT.
2811   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2812     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2813     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2814   }
2815 
2816   MVT IndexContainerVT =
2817       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2818 
2819   SDValue Gather;
2820   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2821   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2822   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2823     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2824                               Subtarget);
2825   } else {
2826     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2827     // If only one index is used, we can use a "splat" vrgather.
2828     // TODO: We can splat the most-common index and fix-up any stragglers, if
2829     // that's beneficial.
2830     if (LHSIndexCounts.size() == 1) {
2831       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2832       Gather =
2833           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2834                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2835     } else {
2836       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2837       LHSIndices =
2838           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2839 
2840       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2841                            TrueMask, VL);
2842     }
2843   }
2844 
2845   // If a second vector operand is used by this shuffle, blend it in with an
2846   // additional vrgather.
2847   if (!V2.isUndef()) {
2848     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2849     // If only one index is used, we can use a "splat" vrgather.
2850     // TODO: We can splat the most-common index and fix-up any stragglers, if
2851     // that's beneficial.
2852     if (RHSIndexCounts.size() == 1) {
2853       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2854       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2855                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2856     } else {
2857       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2858       RHSIndices =
2859           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2860       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2861                        VL);
2862     }
2863 
2864     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2865     SelectMask =
2866         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2867 
2868     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2869                          Gather, VL);
2870   }
2871 
2872   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2873 }
2874 
2875 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2876   // Support splats for any type. These should type legalize well.
2877   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2878     return true;
2879 
2880   // Only support legal VTs for other shuffles for now.
2881   if (!isTypeLegal(VT))
2882     return false;
2883 
2884   MVT SVT = VT.getSimpleVT();
2885 
2886   bool SwapSources;
2887   int LoSrc, HiSrc;
2888   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2889          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2890 }
2891 
2892 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2893                                      SDLoc DL, SelectionDAG &DAG,
2894                                      const RISCVSubtarget &Subtarget) {
2895   if (VT.isScalableVector())
2896     return DAG.getFPExtendOrRound(Op, DL, VT);
2897   assert(VT.isFixedLengthVector() &&
2898          "Unexpected value type for RVV FP extend/round lowering");
2899   SDValue Mask, VL;
2900   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2901   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2902                         ? RISCVISD::FP_EXTEND_VL
2903                         : RISCVISD::FP_ROUND_VL;
2904   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2905 }
2906 
2907 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2908 // the exponent.
2909 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2910   MVT VT = Op.getSimpleValueType();
2911   unsigned EltSize = VT.getScalarSizeInBits();
2912   SDValue Src = Op.getOperand(0);
2913   SDLoc DL(Op);
2914 
2915   // We need a FP type that can represent the value.
2916   // TODO: Use f16 for i8 when possible?
2917   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2918   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2919 
2920   // Legal types should have been checked in the RISCVTargetLowering
2921   // constructor.
2922   // TODO: Splitting may make sense in some cases.
2923   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2924          "Expected legal float type!");
2925 
2926   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2927   // The trailing zero count is equal to log2 of this single bit value.
2928   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2929     SDValue Neg =
2930         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2931     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2932   }
2933 
2934   // We have a legal FP type, convert to it.
2935   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2936   // Bitcast to integer and shift the exponent to the LSB.
2937   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2938   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2939   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2940   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2941                               DAG.getConstant(ShiftAmt, DL, IntVT));
2942   // Truncate back to original type to allow vnsrl.
2943   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2944   // The exponent contains log2 of the value in biased form.
2945   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2946 
2947   // For trailing zeros, we just need to subtract the bias.
2948   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2949     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2950                        DAG.getConstant(ExponentBias, DL, VT));
2951 
2952   // For leading zeros, we need to remove the bias and convert from log2 to
2953   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2954   unsigned Adjust = ExponentBias + (EltSize - 1);
2955   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2956 }
2957 
2958 // While RVV has alignment restrictions, we should always be able to load as a
2959 // legal equivalently-sized byte-typed vector instead. This method is
2960 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2961 // the load is already correctly-aligned, it returns SDValue().
2962 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2963                                                     SelectionDAG &DAG) const {
2964   auto *Load = cast<LoadSDNode>(Op);
2965   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2966 
2967   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2968                                      Load->getMemoryVT(),
2969                                      *Load->getMemOperand()))
2970     return SDValue();
2971 
2972   SDLoc DL(Op);
2973   MVT VT = Op.getSimpleValueType();
2974   unsigned EltSizeBits = VT.getScalarSizeInBits();
2975   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2976          "Unexpected unaligned RVV load type");
2977   MVT NewVT =
2978       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2979   assert(NewVT.isValid() &&
2980          "Expecting equally-sized RVV vector types to be legal");
2981   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2982                           Load->getPointerInfo(), Load->getOriginalAlign(),
2983                           Load->getMemOperand()->getFlags());
2984   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2985 }
2986 
2987 // While RVV has alignment restrictions, we should always be able to store as a
2988 // legal equivalently-sized byte-typed vector instead. This method is
2989 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2990 // returns SDValue() if the store is already correctly aligned.
2991 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2992                                                      SelectionDAG &DAG) const {
2993   auto *Store = cast<StoreSDNode>(Op);
2994   assert(Store && Store->getValue().getValueType().isVector() &&
2995          "Expected vector store");
2996 
2997   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2998                                      Store->getMemoryVT(),
2999                                      *Store->getMemOperand()))
3000     return SDValue();
3001 
3002   SDLoc DL(Op);
3003   SDValue StoredVal = Store->getValue();
3004   MVT VT = StoredVal.getSimpleValueType();
3005   unsigned EltSizeBits = VT.getScalarSizeInBits();
3006   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3007          "Unexpected unaligned RVV store type");
3008   MVT NewVT =
3009       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3010   assert(NewVT.isValid() &&
3011          "Expecting equally-sized RVV vector types to be legal");
3012   StoredVal = DAG.getBitcast(NewVT, StoredVal);
3013   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
3014                       Store->getPointerInfo(), Store->getOriginalAlign(),
3015                       Store->getMemOperand()->getFlags());
3016 }
3017 
3018 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
3019                                             SelectionDAG &DAG) const {
3020   switch (Op.getOpcode()) {
3021   default:
3022     report_fatal_error("unimplemented operand");
3023   case ISD::GlobalAddress:
3024     return lowerGlobalAddress(Op, DAG);
3025   case ISD::BlockAddress:
3026     return lowerBlockAddress(Op, DAG);
3027   case ISD::ConstantPool:
3028     return lowerConstantPool(Op, DAG);
3029   case ISD::JumpTable:
3030     return lowerJumpTable(Op, DAG);
3031   case ISD::GlobalTLSAddress:
3032     return lowerGlobalTLSAddress(Op, DAG);
3033   case ISD::SELECT:
3034     return lowerSELECT(Op, DAG);
3035   case ISD::BRCOND:
3036     return lowerBRCOND(Op, DAG);
3037   case ISD::VASTART:
3038     return lowerVASTART(Op, DAG);
3039   case ISD::FRAMEADDR:
3040     return lowerFRAMEADDR(Op, DAG);
3041   case ISD::RETURNADDR:
3042     return lowerRETURNADDR(Op, DAG);
3043   case ISD::SHL_PARTS:
3044     return lowerShiftLeftParts(Op, DAG);
3045   case ISD::SRA_PARTS:
3046     return lowerShiftRightParts(Op, DAG, true);
3047   case ISD::SRL_PARTS:
3048     return lowerShiftRightParts(Op, DAG, false);
3049   case ISD::BITCAST: {
3050     SDLoc DL(Op);
3051     EVT VT = Op.getValueType();
3052     SDValue Op0 = Op.getOperand(0);
3053     EVT Op0VT = Op0.getValueType();
3054     MVT XLenVT = Subtarget.getXLenVT();
3055     if (VT.isFixedLengthVector()) {
3056       // We can handle fixed length vector bitcasts with a simple replacement
3057       // in isel.
3058       if (Op0VT.isFixedLengthVector())
3059         return Op;
3060       // When bitcasting from scalar to fixed-length vector, insert the scalar
3061       // into a one-element vector of the result type, and perform a vector
3062       // bitcast.
3063       if (!Op0VT.isVector()) {
3064         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3065         if (!isTypeLegal(BVT))
3066           return SDValue();
3067         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3068                                               DAG.getUNDEF(BVT), Op0,
3069                                               DAG.getConstant(0, DL, XLenVT)));
3070       }
3071       return SDValue();
3072     }
3073     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3074     // thus: bitcast the vector to a one-element vector type whose element type
3075     // is the same as the result type, and extract the first element.
3076     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3077       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3078       if (!isTypeLegal(BVT))
3079         return SDValue();
3080       SDValue BVec = DAG.getBitcast(BVT, Op0);
3081       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3082                          DAG.getConstant(0, DL, XLenVT));
3083     }
3084     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3085       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3086       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3087       return FPConv;
3088     }
3089     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3090         Subtarget.hasStdExtF()) {
3091       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3092       SDValue FPConv =
3093           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3094       return FPConv;
3095     }
3096     return SDValue();
3097   }
3098   case ISD::INTRINSIC_WO_CHAIN:
3099     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3100   case ISD::INTRINSIC_W_CHAIN:
3101     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3102   case ISD::INTRINSIC_VOID:
3103     return LowerINTRINSIC_VOID(Op, DAG);
3104   case ISD::BSWAP:
3105   case ISD::BITREVERSE: {
3106     MVT VT = Op.getSimpleValueType();
3107     SDLoc DL(Op);
3108     if (Subtarget.hasStdExtZbp()) {
3109       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3110       // Start with the maximum immediate value which is the bitwidth - 1.
3111       unsigned Imm = VT.getSizeInBits() - 1;
3112       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3113       if (Op.getOpcode() == ISD::BSWAP)
3114         Imm &= ~0x7U;
3115       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3116                          DAG.getConstant(Imm, DL, VT));
3117     }
3118     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3119     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3120     // Expand bitreverse to a bswap(rev8) followed by brev8.
3121     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3122     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3123     // as brev8 by an isel pattern.
3124     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3125                        DAG.getConstant(7, DL, VT));
3126   }
3127   case ISD::FSHL:
3128   case ISD::FSHR: {
3129     MVT VT = Op.getSimpleValueType();
3130     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3131     SDLoc DL(Op);
3132     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3133     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3134     // accidentally setting the extra bit.
3135     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3136     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3137                                 DAG.getConstant(ShAmtWidth, DL, VT));
3138     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3139     // instruction use different orders. fshl will return its first operand for
3140     // shift of zero, fshr will return its second operand. fsl and fsr both
3141     // return rs1 so the ISD nodes need to have different operand orders.
3142     // Shift amount is in rs2.
3143     SDValue Op0 = Op.getOperand(0);
3144     SDValue Op1 = Op.getOperand(1);
3145     unsigned Opc = RISCVISD::FSL;
3146     if (Op.getOpcode() == ISD::FSHR) {
3147       std::swap(Op0, Op1);
3148       Opc = RISCVISD::FSR;
3149     }
3150     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3151   }
3152   case ISD::TRUNCATE: {
3153     SDLoc DL(Op);
3154     MVT VT = Op.getSimpleValueType();
3155     // Only custom-lower vector truncates
3156     if (!VT.isVector())
3157       return Op;
3158 
3159     // Truncates to mask types are handled differently
3160     if (VT.getVectorElementType() == MVT::i1)
3161       return lowerVectorMaskTrunc(Op, DAG);
3162 
3163     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3164     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3165     // truncate by one power of two at a time.
3166     MVT DstEltVT = VT.getVectorElementType();
3167 
3168     SDValue Src = Op.getOperand(0);
3169     MVT SrcVT = Src.getSimpleValueType();
3170     MVT SrcEltVT = SrcVT.getVectorElementType();
3171 
3172     assert(DstEltVT.bitsLT(SrcEltVT) &&
3173            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3174            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3175            "Unexpected vector truncate lowering");
3176 
3177     MVT ContainerVT = SrcVT;
3178     if (SrcVT.isFixedLengthVector()) {
3179       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3180       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3181     }
3182 
3183     SDValue Result = Src;
3184     SDValue Mask, VL;
3185     std::tie(Mask, VL) =
3186         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3187     LLVMContext &Context = *DAG.getContext();
3188     const ElementCount Count = ContainerVT.getVectorElementCount();
3189     do {
3190       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3191       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3192       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3193                            Mask, VL);
3194     } while (SrcEltVT != DstEltVT);
3195 
3196     if (SrcVT.isFixedLengthVector())
3197       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3198 
3199     return Result;
3200   }
3201   case ISD::ANY_EXTEND:
3202   case ISD::ZERO_EXTEND:
3203     if (Op.getOperand(0).getValueType().isVector() &&
3204         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3205       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3206     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3207   case ISD::SIGN_EXTEND:
3208     if (Op.getOperand(0).getValueType().isVector() &&
3209         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3210       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3211     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3212   case ISD::SPLAT_VECTOR_PARTS:
3213     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3214   case ISD::INSERT_VECTOR_ELT:
3215     return lowerINSERT_VECTOR_ELT(Op, DAG);
3216   case ISD::EXTRACT_VECTOR_ELT:
3217     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3218   case ISD::VSCALE: {
3219     MVT VT = Op.getSimpleValueType();
3220     SDLoc DL(Op);
3221     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3222     // We define our scalable vector types for lmul=1 to use a 64 bit known
3223     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3224     // vscale as VLENB / 8.
3225     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3226     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3227       report_fatal_error("Support for VLEN==32 is incomplete.");
3228     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3229       // We assume VLENB is a multiple of 8. We manually choose the best shift
3230       // here because SimplifyDemandedBits isn't always able to simplify it.
3231       uint64_t Val = Op.getConstantOperandVal(0);
3232       if (isPowerOf2_64(Val)) {
3233         uint64_t Log2 = Log2_64(Val);
3234         if (Log2 < 3)
3235           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3236                              DAG.getConstant(3 - Log2, DL, VT));
3237         if (Log2 > 3)
3238           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3239                              DAG.getConstant(Log2 - 3, DL, VT));
3240         return VLENB;
3241       }
3242       // If the multiplier is a multiple of 8, scale it down to avoid needing
3243       // to shift the VLENB value.
3244       if ((Val % 8) == 0)
3245         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3246                            DAG.getConstant(Val / 8, DL, VT));
3247     }
3248 
3249     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3250                                  DAG.getConstant(3, DL, VT));
3251     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3252   }
3253   case ISD::FPOWI: {
3254     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3255     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3256     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3257         Op.getOperand(1).getValueType() == MVT::i32) {
3258       SDLoc DL(Op);
3259       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3260       SDValue Powi =
3261           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3262       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3263                          DAG.getIntPtrConstant(0, DL));
3264     }
3265     return SDValue();
3266   }
3267   case ISD::FP_EXTEND: {
3268     // RVV can only do fp_extend to types double the size as the source. We
3269     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3270     // via f32.
3271     SDLoc DL(Op);
3272     MVT VT = Op.getSimpleValueType();
3273     SDValue Src = Op.getOperand(0);
3274     MVT SrcVT = Src.getSimpleValueType();
3275 
3276     // Prepare any fixed-length vector operands.
3277     MVT ContainerVT = VT;
3278     if (SrcVT.isFixedLengthVector()) {
3279       ContainerVT = getContainerForFixedLengthVector(VT);
3280       MVT SrcContainerVT =
3281           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3282       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3283     }
3284 
3285     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3286         SrcVT.getVectorElementType() != MVT::f16) {
3287       // For scalable vectors, we only need to close the gap between
3288       // vXf16->vXf64.
3289       if (!VT.isFixedLengthVector())
3290         return Op;
3291       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3292       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3293       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3294     }
3295 
3296     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3297     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3298     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3299         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3300 
3301     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3302                                            DL, DAG, Subtarget);
3303     if (VT.isFixedLengthVector())
3304       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3305     return Extend;
3306   }
3307   case ISD::FP_ROUND: {
3308     // RVV can only do fp_round to types half the size as the source. We
3309     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3310     // conversion instruction.
3311     SDLoc DL(Op);
3312     MVT VT = Op.getSimpleValueType();
3313     SDValue Src = Op.getOperand(0);
3314     MVT SrcVT = Src.getSimpleValueType();
3315 
3316     // Prepare any fixed-length vector operands.
3317     MVT ContainerVT = VT;
3318     if (VT.isFixedLengthVector()) {
3319       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3320       ContainerVT =
3321           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3322       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3323     }
3324 
3325     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3326         SrcVT.getVectorElementType() != MVT::f64) {
3327       // For scalable vectors, we only need to close the gap between
3328       // vXf64<->vXf16.
3329       if (!VT.isFixedLengthVector())
3330         return Op;
3331       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3332       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3333       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3334     }
3335 
3336     SDValue Mask, VL;
3337     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3338 
3339     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3340     SDValue IntermediateRound =
3341         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3342     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3343                                           DL, DAG, Subtarget);
3344 
3345     if (VT.isFixedLengthVector())
3346       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3347     return Round;
3348   }
3349   case ISD::FP_TO_SINT:
3350   case ISD::FP_TO_UINT:
3351   case ISD::SINT_TO_FP:
3352   case ISD::UINT_TO_FP: {
3353     // RVV can only do fp<->int conversions to types half/double the size as
3354     // the source. We custom-lower any conversions that do two hops into
3355     // sequences.
3356     MVT VT = Op.getSimpleValueType();
3357     if (!VT.isVector())
3358       return Op;
3359     SDLoc DL(Op);
3360     SDValue Src = Op.getOperand(0);
3361     MVT EltVT = VT.getVectorElementType();
3362     MVT SrcVT = Src.getSimpleValueType();
3363     MVT SrcEltVT = SrcVT.getVectorElementType();
3364     unsigned EltSize = EltVT.getSizeInBits();
3365     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3366     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3367            "Unexpected vector element types");
3368 
3369     bool IsInt2FP = SrcEltVT.isInteger();
3370     // Widening conversions
3371     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3372       if (IsInt2FP) {
3373         // Do a regular integer sign/zero extension then convert to float.
3374         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3375                                       VT.getVectorElementCount());
3376         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3377                                  ? ISD::ZERO_EXTEND
3378                                  : ISD::SIGN_EXTEND;
3379         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3380         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3381       }
3382       // FP2Int
3383       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3384       // Do one doubling fp_extend then complete the operation by converting
3385       // to int.
3386       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3387       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3388       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3389     }
3390 
3391     // Narrowing conversions
3392     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3393       if (IsInt2FP) {
3394         // One narrowing int_to_fp, then an fp_round.
3395         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3396         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3397         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3398         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3399       }
3400       // FP2Int
3401       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3402       // representable by the integer, the result is poison.
3403       MVT IVecVT =
3404           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3405                            VT.getVectorElementCount());
3406       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3407       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3408     }
3409 
3410     // Scalable vectors can exit here. Patterns will handle equally-sized
3411     // conversions halving/doubling ones.
3412     if (!VT.isFixedLengthVector())
3413       return Op;
3414 
3415     // For fixed-length vectors we lower to a custom "VL" node.
3416     unsigned RVVOpc = 0;
3417     switch (Op.getOpcode()) {
3418     default:
3419       llvm_unreachable("Impossible opcode");
3420     case ISD::FP_TO_SINT:
3421       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3422       break;
3423     case ISD::FP_TO_UINT:
3424       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3425       break;
3426     case ISD::SINT_TO_FP:
3427       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3428       break;
3429     case ISD::UINT_TO_FP:
3430       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3431       break;
3432     }
3433 
3434     MVT ContainerVT, SrcContainerVT;
3435     // Derive the reference container type from the larger vector type.
3436     if (SrcEltSize > EltSize) {
3437       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3438       ContainerVT =
3439           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3440     } else {
3441       ContainerVT = getContainerForFixedLengthVector(VT);
3442       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3443     }
3444 
3445     SDValue Mask, VL;
3446     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3447 
3448     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3449     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3450     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3451   }
3452   case ISD::FP_TO_SINT_SAT:
3453   case ISD::FP_TO_UINT_SAT:
3454     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3455   case ISD::FTRUNC:
3456   case ISD::FCEIL:
3457   case ISD::FFLOOR:
3458     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3459   case ISD::FROUND:
3460     return lowerFROUND(Op, DAG);
3461   case ISD::VECREDUCE_ADD:
3462   case ISD::VECREDUCE_UMAX:
3463   case ISD::VECREDUCE_SMAX:
3464   case ISD::VECREDUCE_UMIN:
3465   case ISD::VECREDUCE_SMIN:
3466     return lowerVECREDUCE(Op, DAG);
3467   case ISD::VECREDUCE_AND:
3468   case ISD::VECREDUCE_OR:
3469   case ISD::VECREDUCE_XOR:
3470     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3471       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3472     return lowerVECREDUCE(Op, DAG);
3473   case ISD::VECREDUCE_FADD:
3474   case ISD::VECREDUCE_SEQ_FADD:
3475   case ISD::VECREDUCE_FMIN:
3476   case ISD::VECREDUCE_FMAX:
3477     return lowerFPVECREDUCE(Op, DAG);
3478   case ISD::VP_REDUCE_ADD:
3479   case ISD::VP_REDUCE_UMAX:
3480   case ISD::VP_REDUCE_SMAX:
3481   case ISD::VP_REDUCE_UMIN:
3482   case ISD::VP_REDUCE_SMIN:
3483   case ISD::VP_REDUCE_FADD:
3484   case ISD::VP_REDUCE_SEQ_FADD:
3485   case ISD::VP_REDUCE_FMIN:
3486   case ISD::VP_REDUCE_FMAX:
3487     return lowerVPREDUCE(Op, DAG);
3488   case ISD::VP_REDUCE_AND:
3489   case ISD::VP_REDUCE_OR:
3490   case ISD::VP_REDUCE_XOR:
3491     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3492       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3493     return lowerVPREDUCE(Op, DAG);
3494   case ISD::INSERT_SUBVECTOR:
3495     return lowerINSERT_SUBVECTOR(Op, DAG);
3496   case ISD::EXTRACT_SUBVECTOR:
3497     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3498   case ISD::STEP_VECTOR:
3499     return lowerSTEP_VECTOR(Op, DAG);
3500   case ISD::VECTOR_REVERSE:
3501     return lowerVECTOR_REVERSE(Op, DAG);
3502   case ISD::BUILD_VECTOR:
3503     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3504   case ISD::SPLAT_VECTOR:
3505     if (Op.getValueType().getVectorElementType() == MVT::i1)
3506       return lowerVectorMaskSplat(Op, DAG);
3507     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3508   case ISD::VECTOR_SHUFFLE:
3509     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3510   case ISD::CONCAT_VECTORS: {
3511     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3512     // better than going through the stack, as the default expansion does.
3513     SDLoc DL(Op);
3514     MVT VT = Op.getSimpleValueType();
3515     unsigned NumOpElts =
3516         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3517     SDValue Vec = DAG.getUNDEF(VT);
3518     for (const auto &OpIdx : enumerate(Op->ops())) {
3519       SDValue SubVec = OpIdx.value();
3520       // Don't insert undef subvectors.
3521       if (SubVec.isUndef())
3522         continue;
3523       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3524                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3525     }
3526     return Vec;
3527   }
3528   case ISD::LOAD:
3529     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3530       return V;
3531     if (Op.getValueType().isFixedLengthVector())
3532       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3533     return Op;
3534   case ISD::STORE:
3535     if (auto V = expandUnalignedRVVStore(Op, DAG))
3536       return V;
3537     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3538       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3539     return Op;
3540   case ISD::MLOAD:
3541   case ISD::VP_LOAD:
3542     return lowerMaskedLoad(Op, DAG);
3543   case ISD::MSTORE:
3544   case ISD::VP_STORE:
3545     return lowerMaskedStore(Op, DAG);
3546   case ISD::SETCC:
3547     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3548   case ISD::ADD:
3549     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3550   case ISD::SUB:
3551     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3552   case ISD::MUL:
3553     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3554   case ISD::MULHS:
3555     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3556   case ISD::MULHU:
3557     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3558   case ISD::AND:
3559     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3560                                               RISCVISD::AND_VL);
3561   case ISD::OR:
3562     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3563                                               RISCVISD::OR_VL);
3564   case ISD::XOR:
3565     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3566                                               RISCVISD::XOR_VL);
3567   case ISD::SDIV:
3568     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3569   case ISD::SREM:
3570     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3571   case ISD::UDIV:
3572     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3573   case ISD::UREM:
3574     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3575   case ISD::SHL:
3576   case ISD::SRA:
3577   case ISD::SRL:
3578     if (Op.getSimpleValueType().isFixedLengthVector())
3579       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3580     // This can be called for an i32 shift amount that needs to be promoted.
3581     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3582            "Unexpected custom legalisation");
3583     return SDValue();
3584   case ISD::SADDSAT:
3585     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3586   case ISD::UADDSAT:
3587     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3588   case ISD::SSUBSAT:
3589     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3590   case ISD::USUBSAT:
3591     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3592   case ISD::FADD:
3593     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3594   case ISD::FSUB:
3595     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3596   case ISD::FMUL:
3597     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3598   case ISD::FDIV:
3599     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3600   case ISD::FNEG:
3601     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3602   case ISD::FABS:
3603     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3604   case ISD::FSQRT:
3605     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3606   case ISD::FMA:
3607     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3608   case ISD::SMIN:
3609     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3610   case ISD::SMAX:
3611     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3612   case ISD::UMIN:
3613     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3614   case ISD::UMAX:
3615     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3616   case ISD::FMINNUM:
3617     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3618   case ISD::FMAXNUM:
3619     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3620   case ISD::ABS:
3621     return lowerABS(Op, DAG);
3622   case ISD::CTLZ_ZERO_UNDEF:
3623   case ISD::CTTZ_ZERO_UNDEF:
3624     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3625   case ISD::VSELECT:
3626     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3627   case ISD::FCOPYSIGN:
3628     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3629   case ISD::MGATHER:
3630   case ISD::VP_GATHER:
3631     return lowerMaskedGather(Op, DAG);
3632   case ISD::MSCATTER:
3633   case ISD::VP_SCATTER:
3634     return lowerMaskedScatter(Op, DAG);
3635   case ISD::FLT_ROUNDS_:
3636     return lowerGET_ROUNDING(Op, DAG);
3637   case ISD::SET_ROUNDING:
3638     return lowerSET_ROUNDING(Op, DAG);
3639   case ISD::VP_SELECT:
3640     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3641   case ISD::VP_MERGE:
3642     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3643   case ISD::VP_ADD:
3644     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3645   case ISD::VP_SUB:
3646     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3647   case ISD::VP_MUL:
3648     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3649   case ISD::VP_SDIV:
3650     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3651   case ISD::VP_UDIV:
3652     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3653   case ISD::VP_SREM:
3654     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3655   case ISD::VP_UREM:
3656     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3657   case ISD::VP_AND:
3658     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3659   case ISD::VP_OR:
3660     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3661   case ISD::VP_XOR:
3662     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3663   case ISD::VP_ASHR:
3664     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3665   case ISD::VP_LSHR:
3666     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3667   case ISD::VP_SHL:
3668     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3669   case ISD::VP_FADD:
3670     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3671   case ISD::VP_FSUB:
3672     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3673   case ISD::VP_FMUL:
3674     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3675   case ISD::VP_FDIV:
3676     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3677   case ISD::VP_FNEG:
3678     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3679   case ISD::VP_FMA:
3680     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3681   }
3682 }
3683 
3684 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3685                              SelectionDAG &DAG, unsigned Flags) {
3686   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3687 }
3688 
3689 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3690                              SelectionDAG &DAG, unsigned Flags) {
3691   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3692                                    Flags);
3693 }
3694 
3695 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3696                              SelectionDAG &DAG, unsigned Flags) {
3697   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3698                                    N->getOffset(), Flags);
3699 }
3700 
3701 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3702                              SelectionDAG &DAG, unsigned Flags) {
3703   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3704 }
3705 
3706 template <class NodeTy>
3707 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3708                                      bool IsLocal) const {
3709   SDLoc DL(N);
3710   EVT Ty = getPointerTy(DAG.getDataLayout());
3711 
3712   if (isPositionIndependent()) {
3713     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3714     if (IsLocal)
3715       // Use PC-relative addressing to access the symbol. This generates the
3716       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3717       // %pcrel_lo(auipc)).
3718       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3719 
3720     // Use PC-relative addressing to access the GOT for this symbol, then load
3721     // the address from the GOT. This generates the pattern (PseudoLA sym),
3722     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3723     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3724   }
3725 
3726   switch (getTargetMachine().getCodeModel()) {
3727   default:
3728     report_fatal_error("Unsupported code model for lowering");
3729   case CodeModel::Small: {
3730     // Generate a sequence for accessing addresses within the first 2 GiB of
3731     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3732     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3733     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3734     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3735     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3736   }
3737   case CodeModel::Medium: {
3738     // Generate a sequence for accessing addresses within any 2GiB range within
3739     // the address space. This generates the pattern (PseudoLLA sym), which
3740     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3741     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3742     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3743   }
3744   }
3745 }
3746 
3747 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3748                                                 SelectionDAG &DAG) const {
3749   SDLoc DL(Op);
3750   EVT Ty = Op.getValueType();
3751   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3752   int64_t Offset = N->getOffset();
3753   MVT XLenVT = Subtarget.getXLenVT();
3754 
3755   const GlobalValue *GV = N->getGlobal();
3756   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3757   SDValue Addr = getAddr(N, DAG, IsLocal);
3758 
3759   // In order to maximise the opportunity for common subexpression elimination,
3760   // emit a separate ADD node for the global address offset instead of folding
3761   // it in the global address node. Later peephole optimisations may choose to
3762   // fold it back in when profitable.
3763   if (Offset != 0)
3764     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3765                        DAG.getConstant(Offset, DL, XLenVT));
3766   return Addr;
3767 }
3768 
3769 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3770                                                SelectionDAG &DAG) const {
3771   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3772 
3773   return getAddr(N, DAG);
3774 }
3775 
3776 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3777                                                SelectionDAG &DAG) const {
3778   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3779 
3780   return getAddr(N, DAG);
3781 }
3782 
3783 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3784                                             SelectionDAG &DAG) const {
3785   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3786 
3787   return getAddr(N, DAG);
3788 }
3789 
3790 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3791                                               SelectionDAG &DAG,
3792                                               bool UseGOT) const {
3793   SDLoc DL(N);
3794   EVT Ty = getPointerTy(DAG.getDataLayout());
3795   const GlobalValue *GV = N->getGlobal();
3796   MVT XLenVT = Subtarget.getXLenVT();
3797 
3798   if (UseGOT) {
3799     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3800     // load the address from the GOT and add the thread pointer. This generates
3801     // the pattern (PseudoLA_TLS_IE sym), which expands to
3802     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3803     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3804     SDValue Load =
3805         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3806 
3807     // Add the thread pointer.
3808     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3809     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3810   }
3811 
3812   // Generate a sequence for accessing the address relative to the thread
3813   // pointer, with the appropriate adjustment for the thread pointer offset.
3814   // This generates the pattern
3815   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3816   SDValue AddrHi =
3817       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3818   SDValue AddrAdd =
3819       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3820   SDValue AddrLo =
3821       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3822 
3823   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3824   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3825   SDValue MNAdd = SDValue(
3826       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3827       0);
3828   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3829 }
3830 
3831 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3832                                                SelectionDAG &DAG) const {
3833   SDLoc DL(N);
3834   EVT Ty = getPointerTy(DAG.getDataLayout());
3835   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3836   const GlobalValue *GV = N->getGlobal();
3837 
3838   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3839   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3840   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3841   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3842   SDValue Load =
3843       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3844 
3845   // Prepare argument list to generate call.
3846   ArgListTy Args;
3847   ArgListEntry Entry;
3848   Entry.Node = Load;
3849   Entry.Ty = CallTy;
3850   Args.push_back(Entry);
3851 
3852   // Setup call to __tls_get_addr.
3853   TargetLowering::CallLoweringInfo CLI(DAG);
3854   CLI.setDebugLoc(DL)
3855       .setChain(DAG.getEntryNode())
3856       .setLibCallee(CallingConv::C, CallTy,
3857                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3858                     std::move(Args));
3859 
3860   return LowerCallTo(CLI).first;
3861 }
3862 
3863 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3864                                                    SelectionDAG &DAG) const {
3865   SDLoc DL(Op);
3866   EVT Ty = Op.getValueType();
3867   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3868   int64_t Offset = N->getOffset();
3869   MVT XLenVT = Subtarget.getXLenVT();
3870 
3871   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3872 
3873   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3874       CallingConv::GHC)
3875     report_fatal_error("In GHC calling convention TLS is not supported");
3876 
3877   SDValue Addr;
3878   switch (Model) {
3879   case TLSModel::LocalExec:
3880     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3881     break;
3882   case TLSModel::InitialExec:
3883     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3884     break;
3885   case TLSModel::LocalDynamic:
3886   case TLSModel::GeneralDynamic:
3887     Addr = getDynamicTLSAddr(N, DAG);
3888     break;
3889   }
3890 
3891   // In order to maximise the opportunity for common subexpression elimination,
3892   // emit a separate ADD node for the global address offset instead of folding
3893   // it in the global address node. Later peephole optimisations may choose to
3894   // fold it back in when profitable.
3895   if (Offset != 0)
3896     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3897                        DAG.getConstant(Offset, DL, XLenVT));
3898   return Addr;
3899 }
3900 
3901 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3902   SDValue CondV = Op.getOperand(0);
3903   SDValue TrueV = Op.getOperand(1);
3904   SDValue FalseV = Op.getOperand(2);
3905   SDLoc DL(Op);
3906   MVT VT = Op.getSimpleValueType();
3907   MVT XLenVT = Subtarget.getXLenVT();
3908 
3909   // Lower vector SELECTs to VSELECTs by splatting the condition.
3910   if (VT.isVector()) {
3911     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3912     SDValue CondSplat = VT.isScalableVector()
3913                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3914                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3915     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3916   }
3917 
3918   // If the result type is XLenVT and CondV is the output of a SETCC node
3919   // which also operated on XLenVT inputs, then merge the SETCC node into the
3920   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3921   // compare+branch instructions. i.e.:
3922   // (select (setcc lhs, rhs, cc), truev, falsev)
3923   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3924   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3925       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3926     SDValue LHS = CondV.getOperand(0);
3927     SDValue RHS = CondV.getOperand(1);
3928     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3929     ISD::CondCode CCVal = CC->get();
3930 
3931     // Special case for a select of 2 constants that have a diffence of 1.
3932     // Normally this is done by DAGCombine, but if the select is introduced by
3933     // type legalization or op legalization, we miss it. Restricting to SETLT
3934     // case for now because that is what signed saturating add/sub need.
3935     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3936     // but we would probably want to swap the true/false values if the condition
3937     // is SETGE/SETLE to avoid an XORI.
3938     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3939         CCVal == ISD::SETLT) {
3940       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3941       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3942       if (TrueVal - 1 == FalseVal)
3943         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3944       if (TrueVal + 1 == FalseVal)
3945         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3946     }
3947 
3948     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3949 
3950     SDValue TargetCC = DAG.getCondCode(CCVal);
3951     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3952     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3953   }
3954 
3955   // Otherwise:
3956   // (select condv, truev, falsev)
3957   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3958   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3959   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3960 
3961   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3962 
3963   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3964 }
3965 
3966 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3967   SDValue CondV = Op.getOperand(1);
3968   SDLoc DL(Op);
3969   MVT XLenVT = Subtarget.getXLenVT();
3970 
3971   if (CondV.getOpcode() == ISD::SETCC &&
3972       CondV.getOperand(0).getValueType() == XLenVT) {
3973     SDValue LHS = CondV.getOperand(0);
3974     SDValue RHS = CondV.getOperand(1);
3975     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3976 
3977     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3978 
3979     SDValue TargetCC = DAG.getCondCode(CCVal);
3980     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3981                        LHS, RHS, TargetCC, Op.getOperand(2));
3982   }
3983 
3984   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3985                      CondV, DAG.getConstant(0, DL, XLenVT),
3986                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3987 }
3988 
3989 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3990   MachineFunction &MF = DAG.getMachineFunction();
3991   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3992 
3993   SDLoc DL(Op);
3994   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3995                                  getPointerTy(MF.getDataLayout()));
3996 
3997   // vastart just stores the address of the VarArgsFrameIndex slot into the
3998   // memory location argument.
3999   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4000   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
4001                       MachinePointerInfo(SV));
4002 }
4003 
4004 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
4005                                             SelectionDAG &DAG) const {
4006   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4007   MachineFunction &MF = DAG.getMachineFunction();
4008   MachineFrameInfo &MFI = MF.getFrameInfo();
4009   MFI.setFrameAddressIsTaken(true);
4010   Register FrameReg = RI.getFrameRegister(MF);
4011   int XLenInBytes = Subtarget.getXLen() / 8;
4012 
4013   EVT VT = Op.getValueType();
4014   SDLoc DL(Op);
4015   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
4016   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4017   while (Depth--) {
4018     int Offset = -(XLenInBytes * 2);
4019     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
4020                               DAG.getIntPtrConstant(Offset, DL));
4021     FrameAddr =
4022         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
4023   }
4024   return FrameAddr;
4025 }
4026 
4027 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
4028                                              SelectionDAG &DAG) const {
4029   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4030   MachineFunction &MF = DAG.getMachineFunction();
4031   MachineFrameInfo &MFI = MF.getFrameInfo();
4032   MFI.setReturnAddressIsTaken(true);
4033   MVT XLenVT = Subtarget.getXLenVT();
4034   int XLenInBytes = Subtarget.getXLen() / 8;
4035 
4036   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4037     return SDValue();
4038 
4039   EVT VT = Op.getValueType();
4040   SDLoc DL(Op);
4041   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4042   if (Depth) {
4043     int Off = -XLenInBytes;
4044     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
4045     SDValue Offset = DAG.getConstant(Off, DL, VT);
4046     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
4047                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
4048                        MachinePointerInfo());
4049   }
4050 
4051   // Return the value of the return address register, marking it an implicit
4052   // live-in.
4053   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4054   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4055 }
4056 
4057 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4058                                                  SelectionDAG &DAG) const {
4059   SDLoc DL(Op);
4060   SDValue Lo = Op.getOperand(0);
4061   SDValue Hi = Op.getOperand(1);
4062   SDValue Shamt = Op.getOperand(2);
4063   EVT VT = Lo.getValueType();
4064 
4065   // if Shamt-XLEN < 0: // Shamt < XLEN
4066   //   Lo = Lo << Shamt
4067   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
4068   // else:
4069   //   Lo = 0
4070   //   Hi = Lo << (Shamt-XLEN)
4071 
4072   SDValue Zero = DAG.getConstant(0, DL, VT);
4073   SDValue One = DAG.getConstant(1, DL, VT);
4074   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4075   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4076   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4077   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4078 
4079   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4080   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4081   SDValue ShiftRightLo =
4082       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4083   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4084   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4085   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4086 
4087   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4088 
4089   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4090   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4091 
4092   SDValue Parts[2] = {Lo, Hi};
4093   return DAG.getMergeValues(Parts, DL);
4094 }
4095 
4096 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4097                                                   bool IsSRA) const {
4098   SDLoc DL(Op);
4099   SDValue Lo = Op.getOperand(0);
4100   SDValue Hi = Op.getOperand(1);
4101   SDValue Shamt = Op.getOperand(2);
4102   EVT VT = Lo.getValueType();
4103 
4104   // SRA expansion:
4105   //   if Shamt-XLEN < 0: // Shamt < XLEN
4106   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4107   //     Hi = Hi >>s Shamt
4108   //   else:
4109   //     Lo = Hi >>s (Shamt-XLEN);
4110   //     Hi = Hi >>s (XLEN-1)
4111   //
4112   // SRL expansion:
4113   //   if Shamt-XLEN < 0: // Shamt < XLEN
4114   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4115   //     Hi = Hi >>u Shamt
4116   //   else:
4117   //     Lo = Hi >>u (Shamt-XLEN);
4118   //     Hi = 0;
4119 
4120   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4121 
4122   SDValue Zero = DAG.getConstant(0, DL, VT);
4123   SDValue One = DAG.getConstant(1, DL, VT);
4124   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4125   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4126   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4127   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4128 
4129   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4130   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4131   SDValue ShiftLeftHi =
4132       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4133   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4134   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4135   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4136   SDValue HiFalse =
4137       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4138 
4139   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4140 
4141   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4142   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4143 
4144   SDValue Parts[2] = {Lo, Hi};
4145   return DAG.getMergeValues(Parts, DL);
4146 }
4147 
4148 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4149 // legal equivalently-sized i8 type, so we can use that as a go-between.
4150 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4151                                                   SelectionDAG &DAG) const {
4152   SDLoc DL(Op);
4153   MVT VT = Op.getSimpleValueType();
4154   SDValue SplatVal = Op.getOperand(0);
4155   // All-zeros or all-ones splats are handled specially.
4156   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4157     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4158     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4159   }
4160   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4161     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4162     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4163   }
4164   MVT XLenVT = Subtarget.getXLenVT();
4165   assert(SplatVal.getValueType() == XLenVT &&
4166          "Unexpected type for i1 splat value");
4167   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4168   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4169                          DAG.getConstant(1, DL, XLenVT));
4170   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4171   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4172   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4173 }
4174 
4175 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4176 // illegal (currently only vXi64 RV32).
4177 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4178 // them to VMV_V_X_VL.
4179 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4180                                                      SelectionDAG &DAG) const {
4181   SDLoc DL(Op);
4182   MVT VecVT = Op.getSimpleValueType();
4183   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4184          "Unexpected SPLAT_VECTOR_PARTS lowering");
4185 
4186   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4187   SDValue Lo = Op.getOperand(0);
4188   SDValue Hi = Op.getOperand(1);
4189 
4190   if (VecVT.isFixedLengthVector()) {
4191     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4192     SDLoc DL(Op);
4193     SDValue Mask, VL;
4194     std::tie(Mask, VL) =
4195         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4196 
4197     SDValue Res =
4198         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4199     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4200   }
4201 
4202   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4203     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4204     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4205     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4206     // node in order to try and match RVV vector/scalar instructions.
4207     if ((LoC >> 31) == HiC)
4208       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4209                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4210   }
4211 
4212   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4213   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4214       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4215       Hi.getConstantOperandVal(1) == 31)
4216     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4217                        DAG.getRegister(RISCV::X0, MVT::i32));
4218 
4219   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4220   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4221                      DAG.getUNDEF(VecVT), Lo, Hi,
4222                      DAG.getRegister(RISCV::X0, MVT::i32));
4223 }
4224 
4225 // Custom-lower extensions from mask vectors by using a vselect either with 1
4226 // for zero/any-extension or -1 for sign-extension:
4227 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4228 // Note that any-extension is lowered identically to zero-extension.
4229 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4230                                                 int64_t ExtTrueVal) const {
4231   SDLoc DL(Op);
4232   MVT VecVT = Op.getSimpleValueType();
4233   SDValue Src = Op.getOperand(0);
4234   // Only custom-lower extensions from mask types
4235   assert(Src.getValueType().isVector() &&
4236          Src.getValueType().getVectorElementType() == MVT::i1);
4237 
4238   MVT XLenVT = Subtarget.getXLenVT();
4239   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4240   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4241 
4242   if (VecVT.isScalableVector()) {
4243     // Be careful not to introduce illegal scalar types at this stage, and be
4244     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
4245     // illegal and must be expanded. Since we know that the constants are
4246     // sign-extended 32-bit values, we use VMV_V_X_VL directly.
4247     bool IsRV32E64 =
4248         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
4249 
4250     if (!IsRV32E64) {
4251       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
4252       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
4253     } else {
4254       SplatZero =
4255           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4256                       SplatZero, DAG.getRegister(RISCV::X0, XLenVT));
4257       SplatTrueVal =
4258           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4259                       SplatTrueVal, DAG.getRegister(RISCV::X0, XLenVT));
4260     }
4261 
4262     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4263   }
4264 
4265   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4266   MVT I1ContainerVT =
4267       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4268 
4269   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4270 
4271   SDValue Mask, VL;
4272   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4273 
4274   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4275                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4276   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4277                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4278   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4279                                SplatTrueVal, SplatZero, VL);
4280 
4281   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4282 }
4283 
4284 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4285     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4286   MVT ExtVT = Op.getSimpleValueType();
4287   // Only custom-lower extensions from fixed-length vector types.
4288   if (!ExtVT.isFixedLengthVector())
4289     return Op;
4290   MVT VT = Op.getOperand(0).getSimpleValueType();
4291   // Grab the canonical container type for the extended type. Infer the smaller
4292   // type from that to ensure the same number of vector elements, as we know
4293   // the LMUL will be sufficient to hold the smaller type.
4294   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4295   // Get the extended container type manually to ensure the same number of
4296   // vector elements between source and dest.
4297   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4298                                      ContainerExtVT.getVectorElementCount());
4299 
4300   SDValue Op1 =
4301       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4302 
4303   SDLoc DL(Op);
4304   SDValue Mask, VL;
4305   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4306 
4307   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4308 
4309   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4310 }
4311 
4312 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4313 // setcc operation:
4314 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4315 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4316                                                   SelectionDAG &DAG) const {
4317   SDLoc DL(Op);
4318   EVT MaskVT = Op.getValueType();
4319   // Only expect to custom-lower truncations to mask types
4320   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4321          "Unexpected type for vector mask lowering");
4322   SDValue Src = Op.getOperand(0);
4323   MVT VecVT = Src.getSimpleValueType();
4324 
4325   // If this is a fixed vector, we need to convert it to a scalable vector.
4326   MVT ContainerVT = VecVT;
4327   if (VecVT.isFixedLengthVector()) {
4328     ContainerVT = getContainerForFixedLengthVector(VecVT);
4329     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4330   }
4331 
4332   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4333   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4334 
4335   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4336                          DAG.getUNDEF(ContainerVT), SplatOne);
4337   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4338                           DAG.getUNDEF(ContainerVT), SplatZero);
4339 
4340   if (VecVT.isScalableVector()) {
4341     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4342     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4343   }
4344 
4345   SDValue Mask, VL;
4346   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4347 
4348   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4349   SDValue Trunc =
4350       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4351   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4352                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4353   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4354 }
4355 
4356 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4357 // first position of a vector, and that vector is slid up to the insert index.
4358 // By limiting the active vector length to index+1 and merging with the
4359 // original vector (with an undisturbed tail policy for elements >= VL), we
4360 // achieve the desired result of leaving all elements untouched except the one
4361 // at VL-1, which is replaced with the desired value.
4362 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4363                                                     SelectionDAG &DAG) const {
4364   SDLoc DL(Op);
4365   MVT VecVT = Op.getSimpleValueType();
4366   SDValue Vec = Op.getOperand(0);
4367   SDValue Val = Op.getOperand(1);
4368   SDValue Idx = Op.getOperand(2);
4369 
4370   if (VecVT.getVectorElementType() == MVT::i1) {
4371     // FIXME: For now we just promote to an i8 vector and insert into that,
4372     // but this is probably not optimal.
4373     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4374     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4375     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4376     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4377   }
4378 
4379   MVT ContainerVT = VecVT;
4380   // If the operand is a fixed-length vector, convert to a scalable one.
4381   if (VecVT.isFixedLengthVector()) {
4382     ContainerVT = getContainerForFixedLengthVector(VecVT);
4383     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4384   }
4385 
4386   MVT XLenVT = Subtarget.getXLenVT();
4387 
4388   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4389   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4390   // Even i64-element vectors on RV32 can be lowered without scalar
4391   // legalization if the most-significant 32 bits of the value are not affected
4392   // by the sign-extension of the lower 32 bits.
4393   // TODO: We could also catch sign extensions of a 32-bit value.
4394   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4395     const auto *CVal = cast<ConstantSDNode>(Val);
4396     if (isInt<32>(CVal->getSExtValue())) {
4397       IsLegalInsert = true;
4398       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4399     }
4400   }
4401 
4402   SDValue Mask, VL;
4403   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4404 
4405   SDValue ValInVec;
4406 
4407   if (IsLegalInsert) {
4408     unsigned Opc =
4409         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4410     if (isNullConstant(Idx)) {
4411       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4412       if (!VecVT.isFixedLengthVector())
4413         return Vec;
4414       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4415     }
4416     ValInVec =
4417         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4418   } else {
4419     // On RV32, i64-element vectors must be specially handled to place the
4420     // value at element 0, by using two vslide1up instructions in sequence on
4421     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4422     // this.
4423     SDValue One = DAG.getConstant(1, DL, XLenVT);
4424     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4425     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4426     MVT I32ContainerVT =
4427         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4428     SDValue I32Mask =
4429         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4430     // Limit the active VL to two.
4431     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4432     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4433     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4434     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4435                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4436     // First slide in the hi value, then the lo in underneath it.
4437     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4438                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4439                            I32Mask, InsertI64VL);
4440     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4441                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4442                            I32Mask, InsertI64VL);
4443     // Bitcast back to the right container type.
4444     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4445   }
4446 
4447   // Now that the value is in a vector, slide it into position.
4448   SDValue InsertVL =
4449       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4450   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4451                                 ValInVec, Idx, Mask, InsertVL);
4452   if (!VecVT.isFixedLengthVector())
4453     return Slideup;
4454   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4455 }
4456 
4457 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4458 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4459 // types this is done using VMV_X_S to allow us to glean information about the
4460 // sign bits of the result.
4461 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4462                                                      SelectionDAG &DAG) const {
4463   SDLoc DL(Op);
4464   SDValue Idx = Op.getOperand(1);
4465   SDValue Vec = Op.getOperand(0);
4466   EVT EltVT = Op.getValueType();
4467   MVT VecVT = Vec.getSimpleValueType();
4468   MVT XLenVT = Subtarget.getXLenVT();
4469 
4470   if (VecVT.getVectorElementType() == MVT::i1) {
4471     if (VecVT.isFixedLengthVector()) {
4472       unsigned NumElts = VecVT.getVectorNumElements();
4473       if (NumElts >= 8) {
4474         MVT WideEltVT;
4475         unsigned WidenVecLen;
4476         SDValue ExtractElementIdx;
4477         SDValue ExtractBitIdx;
4478         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4479         MVT LargestEltVT = MVT::getIntegerVT(
4480             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4481         if (NumElts <= LargestEltVT.getSizeInBits()) {
4482           assert(isPowerOf2_32(NumElts) &&
4483                  "the number of elements should be power of 2");
4484           WideEltVT = MVT::getIntegerVT(NumElts);
4485           WidenVecLen = 1;
4486           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4487           ExtractBitIdx = Idx;
4488         } else {
4489           WideEltVT = LargestEltVT;
4490           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4491           // extract element index = index / element width
4492           ExtractElementIdx = DAG.getNode(
4493               ISD::SRL, DL, XLenVT, Idx,
4494               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4495           // mask bit index = index % element width
4496           ExtractBitIdx = DAG.getNode(
4497               ISD::AND, DL, XLenVT, Idx,
4498               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4499         }
4500         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4501         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4502         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4503                                          Vec, ExtractElementIdx);
4504         // Extract the bit from GPR.
4505         SDValue ShiftRight =
4506             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4507         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4508                            DAG.getConstant(1, DL, XLenVT));
4509       }
4510     }
4511     // Otherwise, promote to an i8 vector and extract from that.
4512     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4513     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4514     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4515   }
4516 
4517   // If this is a fixed vector, we need to convert it to a scalable vector.
4518   MVT ContainerVT = VecVT;
4519   if (VecVT.isFixedLengthVector()) {
4520     ContainerVT = getContainerForFixedLengthVector(VecVT);
4521     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4522   }
4523 
4524   // If the index is 0, the vector is already in the right position.
4525   if (!isNullConstant(Idx)) {
4526     // Use a VL of 1 to avoid processing more elements than we need.
4527     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4528     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4529     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4530     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4531                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4532   }
4533 
4534   if (!EltVT.isInteger()) {
4535     // Floating-point extracts are handled in TableGen.
4536     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4537                        DAG.getConstant(0, DL, XLenVT));
4538   }
4539 
4540   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4541   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4542 }
4543 
4544 // Some RVV intrinsics may claim that they want an integer operand to be
4545 // promoted or expanded.
4546 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4547                                           const RISCVSubtarget &Subtarget) {
4548   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4549           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4550          "Unexpected opcode");
4551 
4552   if (!Subtarget.hasVInstructions())
4553     return SDValue();
4554 
4555   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4556   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4557   SDLoc DL(Op);
4558 
4559   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4560       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4561   if (!II || !II->hasSplatOperand())
4562     return SDValue();
4563 
4564   unsigned SplatOp = II->SplatOperand + 1 + HasChain;
4565   assert(SplatOp < Op.getNumOperands());
4566 
4567   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4568   SDValue &ScalarOp = Operands[SplatOp];
4569   MVT OpVT = ScalarOp.getSimpleValueType();
4570   MVT XLenVT = Subtarget.getXLenVT();
4571 
4572   // If this isn't a scalar, or its type is XLenVT we're done.
4573   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4574     return SDValue();
4575 
4576   // Simplest case is that the operand needs to be promoted to XLenVT.
4577   if (OpVT.bitsLT(XLenVT)) {
4578     // If the operand is a constant, sign extend to increase our chances
4579     // of being able to use a .vi instruction. ANY_EXTEND would become a
4580     // a zero extend and the simm5 check in isel would fail.
4581     // FIXME: Should we ignore the upper bits in isel instead?
4582     unsigned ExtOpc =
4583         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4584     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4585     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4586   }
4587 
4588   // Use the previous operand to get the vXi64 VT. The result might be a mask
4589   // VT for compares. Using the previous operand assumes that the previous
4590   // operand will never have a smaller element size than a scalar operand and
4591   // that a widening operation never uses SEW=64.
4592   // NOTE: If this fails the below assert, we can probably just find the
4593   // element count from any operand or result and use it to construct the VT.
4594   assert(II->SplatOperand > 0 && "Unexpected splat operand!");
4595   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4596 
4597   // The more complex case is when the scalar is larger than XLenVT.
4598   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4599          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4600 
4601   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4602   // on the instruction to sign-extend since SEW>XLEN.
4603   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4604     if (isInt<32>(CVal->getSExtValue())) {
4605       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4606       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4607     }
4608   }
4609 
4610   // We need to convert the scalar to a splat vector.
4611   // FIXME: Can we implicitly truncate the scalar if it is known to
4612   // be sign extended?
4613   SDValue VL = getVLOperand(Op);
4614   assert(VL.getValueType() == XLenVT);
4615   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4616   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4617 }
4618 
4619 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4620                                                      SelectionDAG &DAG) const {
4621   unsigned IntNo = Op.getConstantOperandVal(0);
4622   SDLoc DL(Op);
4623   MVT XLenVT = Subtarget.getXLenVT();
4624 
4625   switch (IntNo) {
4626   default:
4627     break; // Don't custom lower most intrinsics.
4628   case Intrinsic::thread_pointer: {
4629     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4630     return DAG.getRegister(RISCV::X4, PtrVT);
4631   }
4632   case Intrinsic::riscv_orc_b:
4633   case Intrinsic::riscv_brev8: {
4634     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4635     unsigned Opc =
4636         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4637     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4638                        DAG.getConstant(7, DL, XLenVT));
4639   }
4640   case Intrinsic::riscv_grev:
4641   case Intrinsic::riscv_gorc: {
4642     unsigned Opc =
4643         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4644     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4645   }
4646   case Intrinsic::riscv_zip:
4647   case Intrinsic::riscv_unzip: {
4648     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4649     // For i32 the immdiate is 15. For i64 the immediate is 31.
4650     unsigned Opc =
4651         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4652     unsigned BitWidth = Op.getValueSizeInBits();
4653     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4654     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4655                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4656   }
4657   case Intrinsic::riscv_shfl:
4658   case Intrinsic::riscv_unshfl: {
4659     unsigned Opc =
4660         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4661     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4662   }
4663   case Intrinsic::riscv_bcompress:
4664   case Intrinsic::riscv_bdecompress: {
4665     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4666                                                        : RISCVISD::BDECOMPRESS;
4667     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4668   }
4669   case Intrinsic::riscv_bfp:
4670     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4671                        Op.getOperand(2));
4672   case Intrinsic::riscv_fsl:
4673     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4674                        Op.getOperand(2), Op.getOperand(3));
4675   case Intrinsic::riscv_fsr:
4676     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4677                        Op.getOperand(2), Op.getOperand(3));
4678   case Intrinsic::riscv_vmv_x_s:
4679     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4680     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4681                        Op.getOperand(1));
4682   case Intrinsic::riscv_vmv_v_x:
4683     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4684                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4685                             Subtarget);
4686   case Intrinsic::riscv_vfmv_v_f:
4687     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4688                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4689   case Intrinsic::riscv_vmv_s_x: {
4690     SDValue Scalar = Op.getOperand(2);
4691 
4692     if (Scalar.getValueType().bitsLE(XLenVT)) {
4693       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4694       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4695                          Op.getOperand(1), Scalar, Op.getOperand(3));
4696     }
4697 
4698     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4699 
4700     // This is an i64 value that lives in two scalar registers. We have to
4701     // insert this in a convoluted way. First we build vXi64 splat containing
4702     // the/ two values that we assemble using some bit math. Next we'll use
4703     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4704     // to merge element 0 from our splat into the source vector.
4705     // FIXME: This is probably not the best way to do this, but it is
4706     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4707     // point.
4708     //   sw lo, (a0)
4709     //   sw hi, 4(a0)
4710     //   vlse vX, (a0)
4711     //
4712     //   vid.v      vVid
4713     //   vmseq.vx   mMask, vVid, 0
4714     //   vmerge.vvm vDest, vSrc, vVal, mMask
4715     MVT VT = Op.getSimpleValueType();
4716     SDValue Vec = Op.getOperand(1);
4717     SDValue VL = getVLOperand(Op);
4718 
4719     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4720     if (Op.getOperand(1).isUndef())
4721       return SplattedVal;
4722     SDValue SplattedIdx =
4723         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4724                     DAG.getConstant(0, DL, MVT::i32), VL);
4725 
4726     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4727     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4728     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4729     SDValue SelectCond =
4730         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4731                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4732     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4733                        Vec, VL);
4734   }
4735   case Intrinsic::riscv_vslide1up:
4736   case Intrinsic::riscv_vslide1down:
4737   case Intrinsic::riscv_vslide1up_mask:
4738   case Intrinsic::riscv_vslide1down_mask: {
4739     // We need to special case these when the scalar is larger than XLen.
4740     unsigned NumOps = Op.getNumOperands();
4741     bool IsMasked = NumOps == 7;
4742     SDValue Scalar = Op.getOperand(3);
4743     if (Scalar.getValueType().bitsLE(XLenVT))
4744       break;
4745 
4746     // Splatting a sign extended constant is fine.
4747     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4748       if (isInt<32>(CVal->getSExtValue()))
4749         break;
4750 
4751     MVT VT = Op.getSimpleValueType();
4752     assert(VT.getVectorElementType() == MVT::i64 &&
4753            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4754 
4755     // Convert the vector source to the equivalent nxvXi32 vector.
4756     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4757     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(2));
4758 
4759     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4760                                    DAG.getConstant(0, DL, XLenVT));
4761     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4762                                    DAG.getConstant(1, DL, XLenVT));
4763 
4764     // Double the VL since we halved SEW.
4765     SDValue VL = getVLOperand(Op);
4766     SDValue I32VL =
4767         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4768 
4769     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4770     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4771 
4772     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4773     // instructions.
4774     SDValue Passthru = DAG.getBitcast(I32VT, Op.getOperand(1));
4775     if (!IsMasked) {
4776       if (IntNo == Intrinsic::riscv_vslide1up) {
4777         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4778                           ScalarHi, I32Mask, I32VL);
4779         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4780                           ScalarLo, I32Mask, I32VL);
4781       } else {
4782         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4783                           ScalarLo, I32Mask, I32VL);
4784         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4785                           ScalarHi, I32Mask, I32VL);
4786       }
4787     } else {
4788       // TODO Those VSLIDE1 could be TAMA because we use vmerge to select
4789       // maskedoff
4790       SDValue Undef = DAG.getUNDEF(I32VT);
4791       if (IntNo == Intrinsic::riscv_vslide1up_mask) {
4792         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec,
4793                           ScalarHi, I32Mask, I32VL);
4794         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec,
4795                           ScalarLo, I32Mask, I32VL);
4796       } else {
4797         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec,
4798                           ScalarLo, I32Mask, I32VL);
4799         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec,
4800                           ScalarHi, I32Mask, I32VL);
4801       }
4802     }
4803 
4804     // Convert back to nxvXi64.
4805     Vec = DAG.getBitcast(VT, Vec);
4806 
4807     if (!IsMasked)
4808       return Vec;
4809     // Apply mask after the operation.
4810     SDValue Mask = Op.getOperand(NumOps - 3);
4811     SDValue MaskedOff = Op.getOperand(1);
4812     // Assume Policy operand is the last operand.
4813     uint64_t Policy = Op.getConstantOperandVal(NumOps - 1);
4814     // We don't need to select maskedoff if it's undef.
4815     if (MaskedOff.isUndef())
4816       return Vec;
4817     // TAMU
4818     if (Policy == RISCVII::TAIL_AGNOSTIC)
4819       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4820                          VL);
4821     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4822     // It's fine because vmerge does not care mask policy.
4823     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4824   }
4825   }
4826 
4827   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4828 }
4829 
4830 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4831                                                     SelectionDAG &DAG) const {
4832   unsigned IntNo = Op.getConstantOperandVal(1);
4833   switch (IntNo) {
4834   default:
4835     break;
4836   case Intrinsic::riscv_masked_strided_load: {
4837     SDLoc DL(Op);
4838     MVT XLenVT = Subtarget.getXLenVT();
4839 
4840     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4841     // the selection of the masked intrinsics doesn't do this for us.
4842     SDValue Mask = Op.getOperand(5);
4843     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4844 
4845     MVT VT = Op->getSimpleValueType(0);
4846     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4847 
4848     SDValue PassThru = Op.getOperand(2);
4849     if (!IsUnmasked) {
4850       MVT MaskVT =
4851           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4852       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4853       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4854     }
4855 
4856     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4857 
4858     SDValue IntID = DAG.getTargetConstant(
4859         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4860         XLenVT);
4861 
4862     auto *Load = cast<MemIntrinsicSDNode>(Op);
4863     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4864     if (IsUnmasked)
4865       Ops.push_back(DAG.getUNDEF(ContainerVT));
4866     else
4867       Ops.push_back(PassThru);
4868     Ops.push_back(Op.getOperand(3)); // Ptr
4869     Ops.push_back(Op.getOperand(4)); // Stride
4870     if (!IsUnmasked)
4871       Ops.push_back(Mask);
4872     Ops.push_back(VL);
4873     if (!IsUnmasked) {
4874       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4875       Ops.push_back(Policy);
4876     }
4877 
4878     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4879     SDValue Result =
4880         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4881                                 Load->getMemoryVT(), Load->getMemOperand());
4882     SDValue Chain = Result.getValue(1);
4883     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4884     return DAG.getMergeValues({Result, Chain}, DL);
4885   }
4886   }
4887 
4888   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4889 }
4890 
4891 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4892                                                  SelectionDAG &DAG) const {
4893   unsigned IntNo = Op.getConstantOperandVal(1);
4894   switch (IntNo) {
4895   default:
4896     break;
4897   case Intrinsic::riscv_masked_strided_store: {
4898     SDLoc DL(Op);
4899     MVT XLenVT = Subtarget.getXLenVT();
4900 
4901     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4902     // the selection of the masked intrinsics doesn't do this for us.
4903     SDValue Mask = Op.getOperand(5);
4904     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4905 
4906     SDValue Val = Op.getOperand(2);
4907     MVT VT = Val.getSimpleValueType();
4908     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4909 
4910     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4911     if (!IsUnmasked) {
4912       MVT MaskVT =
4913           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4914       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4915     }
4916 
4917     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4918 
4919     SDValue IntID = DAG.getTargetConstant(
4920         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4921         XLenVT);
4922 
4923     auto *Store = cast<MemIntrinsicSDNode>(Op);
4924     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4925     Ops.push_back(Val);
4926     Ops.push_back(Op.getOperand(3)); // Ptr
4927     Ops.push_back(Op.getOperand(4)); // Stride
4928     if (!IsUnmasked)
4929       Ops.push_back(Mask);
4930     Ops.push_back(VL);
4931 
4932     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4933                                    Ops, Store->getMemoryVT(),
4934                                    Store->getMemOperand());
4935   }
4936   }
4937 
4938   return SDValue();
4939 }
4940 
4941 static MVT getLMUL1VT(MVT VT) {
4942   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4943          "Unexpected vector MVT");
4944   return MVT::getScalableVectorVT(
4945       VT.getVectorElementType(),
4946       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4947 }
4948 
4949 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4950   switch (ISDOpcode) {
4951   default:
4952     llvm_unreachable("Unhandled reduction");
4953   case ISD::VECREDUCE_ADD:
4954     return RISCVISD::VECREDUCE_ADD_VL;
4955   case ISD::VECREDUCE_UMAX:
4956     return RISCVISD::VECREDUCE_UMAX_VL;
4957   case ISD::VECREDUCE_SMAX:
4958     return RISCVISD::VECREDUCE_SMAX_VL;
4959   case ISD::VECREDUCE_UMIN:
4960     return RISCVISD::VECREDUCE_UMIN_VL;
4961   case ISD::VECREDUCE_SMIN:
4962     return RISCVISD::VECREDUCE_SMIN_VL;
4963   case ISD::VECREDUCE_AND:
4964     return RISCVISD::VECREDUCE_AND_VL;
4965   case ISD::VECREDUCE_OR:
4966     return RISCVISD::VECREDUCE_OR_VL;
4967   case ISD::VECREDUCE_XOR:
4968     return RISCVISD::VECREDUCE_XOR_VL;
4969   }
4970 }
4971 
4972 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4973                                                          SelectionDAG &DAG,
4974                                                          bool IsVP) const {
4975   SDLoc DL(Op);
4976   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4977   MVT VecVT = Vec.getSimpleValueType();
4978   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4979           Op.getOpcode() == ISD::VECREDUCE_OR ||
4980           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4981           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4982           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4983           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4984          "Unexpected reduction lowering");
4985 
4986   MVT XLenVT = Subtarget.getXLenVT();
4987   assert(Op.getValueType() == XLenVT &&
4988          "Expected reduction output to be legalized to XLenVT");
4989 
4990   MVT ContainerVT = VecVT;
4991   if (VecVT.isFixedLengthVector()) {
4992     ContainerVT = getContainerForFixedLengthVector(VecVT);
4993     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4994   }
4995 
4996   SDValue Mask, VL;
4997   if (IsVP) {
4998     Mask = Op.getOperand(2);
4999     VL = Op.getOperand(3);
5000   } else {
5001     std::tie(Mask, VL) =
5002         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5003   }
5004 
5005   unsigned BaseOpc;
5006   ISD::CondCode CC;
5007   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5008 
5009   switch (Op.getOpcode()) {
5010   default:
5011     llvm_unreachable("Unhandled reduction");
5012   case ISD::VECREDUCE_AND:
5013   case ISD::VP_REDUCE_AND: {
5014     // vcpop ~x == 0
5015     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5016     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5017     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5018     CC = ISD::SETEQ;
5019     BaseOpc = ISD::AND;
5020     break;
5021   }
5022   case ISD::VECREDUCE_OR:
5023   case ISD::VP_REDUCE_OR:
5024     // vcpop x != 0
5025     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5026     CC = ISD::SETNE;
5027     BaseOpc = ISD::OR;
5028     break;
5029   case ISD::VECREDUCE_XOR:
5030   case ISD::VP_REDUCE_XOR: {
5031     // ((vcpop x) & 1) != 0
5032     SDValue One = DAG.getConstant(1, DL, XLenVT);
5033     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5034     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5035     CC = ISD::SETNE;
5036     BaseOpc = ISD::XOR;
5037     break;
5038   }
5039   }
5040 
5041   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5042 
5043   if (!IsVP)
5044     return SetCC;
5045 
5046   // Now include the start value in the operation.
5047   // Note that we must return the start value when no elements are operated
5048   // upon. The vcpop instructions we've emitted in each case above will return
5049   // 0 for an inactive vector, and so we've already received the neutral value:
5050   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5051   // can simply include the start value.
5052   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5053 }
5054 
5055 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5056                                             SelectionDAG &DAG) const {
5057   SDLoc DL(Op);
5058   SDValue Vec = Op.getOperand(0);
5059   EVT VecEVT = Vec.getValueType();
5060 
5061   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5062 
5063   // Due to ordering in legalize types we may have a vector type that needs to
5064   // be split. Do that manually so we can get down to a legal type.
5065   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5066          TargetLowering::TypeSplitVector) {
5067     SDValue Lo, Hi;
5068     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5069     VecEVT = Lo.getValueType();
5070     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5071   }
5072 
5073   // TODO: The type may need to be widened rather than split. Or widened before
5074   // it can be split.
5075   if (!isTypeLegal(VecEVT))
5076     return SDValue();
5077 
5078   MVT VecVT = VecEVT.getSimpleVT();
5079   MVT VecEltVT = VecVT.getVectorElementType();
5080   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5081 
5082   MVT ContainerVT = VecVT;
5083   if (VecVT.isFixedLengthVector()) {
5084     ContainerVT = getContainerForFixedLengthVector(VecVT);
5085     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5086   }
5087 
5088   MVT M1VT = getLMUL1VT(ContainerVT);
5089   MVT XLenVT = Subtarget.getXLenVT();
5090 
5091   SDValue Mask, VL;
5092   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5093 
5094   SDValue NeutralElem =
5095       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5096   SDValue IdentitySplat =
5097       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5098                        M1VT, DL, DAG, Subtarget);
5099   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5100                                   IdentitySplat, Mask, VL);
5101   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5102                              DAG.getConstant(0, DL, XLenVT));
5103   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5104 }
5105 
5106 // Given a reduction op, this function returns the matching reduction opcode,
5107 // the vector SDValue and the scalar SDValue required to lower this to a
5108 // RISCVISD node.
5109 static std::tuple<unsigned, SDValue, SDValue>
5110 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5111   SDLoc DL(Op);
5112   auto Flags = Op->getFlags();
5113   unsigned Opcode = Op.getOpcode();
5114   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5115   switch (Opcode) {
5116   default:
5117     llvm_unreachable("Unhandled reduction");
5118   case ISD::VECREDUCE_FADD: {
5119     // Use positive zero if we can. It is cheaper to materialize.
5120     SDValue Zero =
5121         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5122     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5123   }
5124   case ISD::VECREDUCE_SEQ_FADD:
5125     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5126                            Op.getOperand(0));
5127   case ISD::VECREDUCE_FMIN:
5128     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5129                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5130   case ISD::VECREDUCE_FMAX:
5131     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5132                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5133   }
5134 }
5135 
5136 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5137                                               SelectionDAG &DAG) const {
5138   SDLoc DL(Op);
5139   MVT VecEltVT = Op.getSimpleValueType();
5140 
5141   unsigned RVVOpcode;
5142   SDValue VectorVal, ScalarVal;
5143   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5144       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5145   MVT VecVT = VectorVal.getSimpleValueType();
5146 
5147   MVT ContainerVT = VecVT;
5148   if (VecVT.isFixedLengthVector()) {
5149     ContainerVT = getContainerForFixedLengthVector(VecVT);
5150     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5151   }
5152 
5153   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5154   MVT XLenVT = Subtarget.getXLenVT();
5155 
5156   SDValue Mask, VL;
5157   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5158 
5159   SDValue ScalarSplat =
5160       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5161                        M1VT, DL, DAG, Subtarget);
5162   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5163                                   VectorVal, ScalarSplat, Mask, VL);
5164   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5165                      DAG.getConstant(0, DL, XLenVT));
5166 }
5167 
5168 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5169   switch (ISDOpcode) {
5170   default:
5171     llvm_unreachable("Unhandled reduction");
5172   case ISD::VP_REDUCE_ADD:
5173     return RISCVISD::VECREDUCE_ADD_VL;
5174   case ISD::VP_REDUCE_UMAX:
5175     return RISCVISD::VECREDUCE_UMAX_VL;
5176   case ISD::VP_REDUCE_SMAX:
5177     return RISCVISD::VECREDUCE_SMAX_VL;
5178   case ISD::VP_REDUCE_UMIN:
5179     return RISCVISD::VECREDUCE_UMIN_VL;
5180   case ISD::VP_REDUCE_SMIN:
5181     return RISCVISD::VECREDUCE_SMIN_VL;
5182   case ISD::VP_REDUCE_AND:
5183     return RISCVISD::VECREDUCE_AND_VL;
5184   case ISD::VP_REDUCE_OR:
5185     return RISCVISD::VECREDUCE_OR_VL;
5186   case ISD::VP_REDUCE_XOR:
5187     return RISCVISD::VECREDUCE_XOR_VL;
5188   case ISD::VP_REDUCE_FADD:
5189     return RISCVISD::VECREDUCE_FADD_VL;
5190   case ISD::VP_REDUCE_SEQ_FADD:
5191     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5192   case ISD::VP_REDUCE_FMAX:
5193     return RISCVISD::VECREDUCE_FMAX_VL;
5194   case ISD::VP_REDUCE_FMIN:
5195     return RISCVISD::VECREDUCE_FMIN_VL;
5196   }
5197 }
5198 
5199 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5200                                            SelectionDAG &DAG) const {
5201   SDLoc DL(Op);
5202   SDValue Vec = Op.getOperand(1);
5203   EVT VecEVT = Vec.getValueType();
5204 
5205   // TODO: The type may need to be widened rather than split. Or widened before
5206   // it can be split.
5207   if (!isTypeLegal(VecEVT))
5208     return SDValue();
5209 
5210   MVT VecVT = VecEVT.getSimpleVT();
5211   MVT VecEltVT = VecVT.getVectorElementType();
5212   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5213 
5214   MVT ContainerVT = VecVT;
5215   if (VecVT.isFixedLengthVector()) {
5216     ContainerVT = getContainerForFixedLengthVector(VecVT);
5217     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5218   }
5219 
5220   SDValue VL = Op.getOperand(3);
5221   SDValue Mask = Op.getOperand(2);
5222 
5223   MVT M1VT = getLMUL1VT(ContainerVT);
5224   MVT XLenVT = Subtarget.getXLenVT();
5225   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5226 
5227   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5228                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5229                                         DL, DAG, Subtarget);
5230   SDValue Reduction =
5231       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5232   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5233                              DAG.getConstant(0, DL, XLenVT));
5234   if (!VecVT.isInteger())
5235     return Elt0;
5236   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5237 }
5238 
5239 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5240                                                    SelectionDAG &DAG) const {
5241   SDValue Vec = Op.getOperand(0);
5242   SDValue SubVec = Op.getOperand(1);
5243   MVT VecVT = Vec.getSimpleValueType();
5244   MVT SubVecVT = SubVec.getSimpleValueType();
5245 
5246   SDLoc DL(Op);
5247   MVT XLenVT = Subtarget.getXLenVT();
5248   unsigned OrigIdx = Op.getConstantOperandVal(2);
5249   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5250 
5251   // We don't have the ability to slide mask vectors up indexed by their i1
5252   // elements; the smallest we can do is i8. Often we are able to bitcast to
5253   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5254   // into a scalable one, we might not necessarily have enough scalable
5255   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5256   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5257       (OrigIdx != 0 || !Vec.isUndef())) {
5258     if (VecVT.getVectorMinNumElements() >= 8 &&
5259         SubVecVT.getVectorMinNumElements() >= 8) {
5260       assert(OrigIdx % 8 == 0 && "Invalid index");
5261       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5262              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5263              "Unexpected mask vector lowering");
5264       OrigIdx /= 8;
5265       SubVecVT =
5266           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5267                            SubVecVT.isScalableVector());
5268       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5269                                VecVT.isScalableVector());
5270       Vec = DAG.getBitcast(VecVT, Vec);
5271       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5272     } else {
5273       // We can't slide this mask vector up indexed by its i1 elements.
5274       // This poses a problem when we wish to insert a scalable vector which
5275       // can't be re-expressed as a larger type. Just choose the slow path and
5276       // extend to a larger type, then truncate back down.
5277       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5278       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5279       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5280       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5281       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5282                         Op.getOperand(2));
5283       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5284       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5285     }
5286   }
5287 
5288   // If the subvector vector is a fixed-length type, we cannot use subregister
5289   // manipulation to simplify the codegen; we don't know which register of a
5290   // LMUL group contains the specific subvector as we only know the minimum
5291   // register size. Therefore we must slide the vector group up the full
5292   // amount.
5293   if (SubVecVT.isFixedLengthVector()) {
5294     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5295       return Op;
5296     MVT ContainerVT = VecVT;
5297     if (VecVT.isFixedLengthVector()) {
5298       ContainerVT = getContainerForFixedLengthVector(VecVT);
5299       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5300     }
5301     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5302                          DAG.getUNDEF(ContainerVT), SubVec,
5303                          DAG.getConstant(0, DL, XLenVT));
5304     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5305       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5306       return DAG.getBitcast(Op.getValueType(), SubVec);
5307     }
5308     SDValue Mask =
5309         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5310     // Set the vector length to only the number of elements we care about. Note
5311     // that for slideup this includes the offset.
5312     SDValue VL =
5313         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5314     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5315     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5316                                   SubVec, SlideupAmt, Mask, VL);
5317     if (VecVT.isFixedLengthVector())
5318       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5319     return DAG.getBitcast(Op.getValueType(), Slideup);
5320   }
5321 
5322   unsigned SubRegIdx, RemIdx;
5323   std::tie(SubRegIdx, RemIdx) =
5324       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5325           VecVT, SubVecVT, OrigIdx, TRI);
5326 
5327   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5328   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5329                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5330                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5331 
5332   // 1. If the Idx has been completely eliminated and this subvector's size is
5333   // a vector register or a multiple thereof, or the surrounding elements are
5334   // undef, then this is a subvector insert which naturally aligns to a vector
5335   // register. These can easily be handled using subregister manipulation.
5336   // 2. If the subvector is smaller than a vector register, then the insertion
5337   // must preserve the undisturbed elements of the register. We do this by
5338   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5339   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5340   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5341   // LMUL=1 type back into the larger vector (resolving to another subregister
5342   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5343   // to avoid allocating a large register group to hold our subvector.
5344   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5345     return Op;
5346 
5347   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5348   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5349   // (in our case undisturbed). This means we can set up a subvector insertion
5350   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5351   // size of the subvector.
5352   MVT InterSubVT = VecVT;
5353   SDValue AlignedExtract = Vec;
5354   unsigned AlignedIdx = OrigIdx - RemIdx;
5355   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5356     InterSubVT = getLMUL1VT(VecVT);
5357     // Extract a subvector equal to the nearest full vector register type. This
5358     // should resolve to a EXTRACT_SUBREG instruction.
5359     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5360                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5361   }
5362 
5363   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5364   // For scalable vectors this must be further multiplied by vscale.
5365   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5366 
5367   SDValue Mask, VL;
5368   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5369 
5370   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5371   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5372   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5373   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5374 
5375   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5376                        DAG.getUNDEF(InterSubVT), SubVec,
5377                        DAG.getConstant(0, DL, XLenVT));
5378 
5379   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5380                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5381 
5382   // If required, insert this subvector back into the correct vector register.
5383   // This should resolve to an INSERT_SUBREG instruction.
5384   if (VecVT.bitsGT(InterSubVT))
5385     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5386                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5387 
5388   // We might have bitcast from a mask type: cast back to the original type if
5389   // required.
5390   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5391 }
5392 
5393 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5394                                                     SelectionDAG &DAG) const {
5395   SDValue Vec = Op.getOperand(0);
5396   MVT SubVecVT = Op.getSimpleValueType();
5397   MVT VecVT = Vec.getSimpleValueType();
5398 
5399   SDLoc DL(Op);
5400   MVT XLenVT = Subtarget.getXLenVT();
5401   unsigned OrigIdx = Op.getConstantOperandVal(1);
5402   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5403 
5404   // We don't have the ability to slide mask vectors down indexed by their i1
5405   // elements; the smallest we can do is i8. Often we are able to bitcast to
5406   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5407   // from a scalable one, we might not necessarily have enough scalable
5408   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5409   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5410     if (VecVT.getVectorMinNumElements() >= 8 &&
5411         SubVecVT.getVectorMinNumElements() >= 8) {
5412       assert(OrigIdx % 8 == 0 && "Invalid index");
5413       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5414              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5415              "Unexpected mask vector lowering");
5416       OrigIdx /= 8;
5417       SubVecVT =
5418           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5419                            SubVecVT.isScalableVector());
5420       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5421                                VecVT.isScalableVector());
5422       Vec = DAG.getBitcast(VecVT, Vec);
5423     } else {
5424       // We can't slide this mask vector down, indexed by its i1 elements.
5425       // This poses a problem when we wish to extract a scalable vector which
5426       // can't be re-expressed as a larger type. Just choose the slow path and
5427       // extend to a larger type, then truncate back down.
5428       // TODO: We could probably improve this when extracting certain fixed
5429       // from fixed, where we can extract as i8 and shift the correct element
5430       // right to reach the desired subvector?
5431       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5432       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5433       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5434       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5435                         Op.getOperand(1));
5436       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5437       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5438     }
5439   }
5440 
5441   // If the subvector vector is a fixed-length type, we cannot use subregister
5442   // manipulation to simplify the codegen; we don't know which register of a
5443   // LMUL group contains the specific subvector as we only know the minimum
5444   // register size. Therefore we must slide the vector group down the full
5445   // amount.
5446   if (SubVecVT.isFixedLengthVector()) {
5447     // With an index of 0 this is a cast-like subvector, which can be performed
5448     // with subregister operations.
5449     if (OrigIdx == 0)
5450       return Op;
5451     MVT ContainerVT = VecVT;
5452     if (VecVT.isFixedLengthVector()) {
5453       ContainerVT = getContainerForFixedLengthVector(VecVT);
5454       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5455     }
5456     SDValue Mask =
5457         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5458     // Set the vector length to only the number of elements we care about. This
5459     // avoids sliding down elements we're going to discard straight away.
5460     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5461     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5462     SDValue Slidedown =
5463         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5464                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5465     // Now we can use a cast-like subvector extract to get the result.
5466     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5467                             DAG.getConstant(0, DL, XLenVT));
5468     return DAG.getBitcast(Op.getValueType(), Slidedown);
5469   }
5470 
5471   unsigned SubRegIdx, RemIdx;
5472   std::tie(SubRegIdx, RemIdx) =
5473       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5474           VecVT, SubVecVT, OrigIdx, TRI);
5475 
5476   // If the Idx has been completely eliminated then this is a subvector extract
5477   // which naturally aligns to a vector register. These can easily be handled
5478   // using subregister manipulation.
5479   if (RemIdx == 0)
5480     return Op;
5481 
5482   // Else we must shift our vector register directly to extract the subvector.
5483   // Do this using VSLIDEDOWN.
5484 
5485   // If the vector type is an LMUL-group type, extract a subvector equal to the
5486   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5487   // instruction.
5488   MVT InterSubVT = VecVT;
5489   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5490     InterSubVT = getLMUL1VT(VecVT);
5491     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5492                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5493   }
5494 
5495   // Slide this vector register down by the desired number of elements in order
5496   // to place the desired subvector starting at element 0.
5497   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5498   // For scalable vectors this must be further multiplied by vscale.
5499   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5500 
5501   SDValue Mask, VL;
5502   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5503   SDValue Slidedown =
5504       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5505                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5506 
5507   // Now the vector is in the right position, extract our final subvector. This
5508   // should resolve to a COPY.
5509   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5510                           DAG.getConstant(0, DL, XLenVT));
5511 
5512   // We might have bitcast from a mask type: cast back to the original type if
5513   // required.
5514   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5515 }
5516 
5517 // Lower step_vector to the vid instruction. Any non-identity step value must
5518 // be accounted for my manual expansion.
5519 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5520                                               SelectionDAG &DAG) const {
5521   SDLoc DL(Op);
5522   MVT VT = Op.getSimpleValueType();
5523   MVT XLenVT = Subtarget.getXLenVT();
5524   SDValue Mask, VL;
5525   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5526   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5527   uint64_t StepValImm = Op.getConstantOperandVal(0);
5528   if (StepValImm != 1) {
5529     if (isPowerOf2_64(StepValImm)) {
5530       SDValue StepVal =
5531           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5532                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5533       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5534     } else {
5535       SDValue StepVal = lowerScalarSplat(
5536           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5537           VL, VT, DL, DAG, Subtarget);
5538       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5539     }
5540   }
5541   return StepVec;
5542 }
5543 
5544 // Implement vector_reverse using vrgather.vv with indices determined by
5545 // subtracting the id of each element from (VLMAX-1). This will convert
5546 // the indices like so:
5547 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5548 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5549 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5550                                                  SelectionDAG &DAG) const {
5551   SDLoc DL(Op);
5552   MVT VecVT = Op.getSimpleValueType();
5553   unsigned EltSize = VecVT.getScalarSizeInBits();
5554   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5555 
5556   unsigned MaxVLMAX = 0;
5557   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5558   if (VectorBitsMax != 0)
5559     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5560 
5561   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5562   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5563 
5564   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5565   // to use vrgatherei16.vv.
5566   // TODO: It's also possible to use vrgatherei16.vv for other types to
5567   // decrease register width for the index calculation.
5568   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5569     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5570     // Reverse each half, then reassemble them in reverse order.
5571     // NOTE: It's also possible that after splitting that VLMAX no longer
5572     // requires vrgatherei16.vv.
5573     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5574       SDValue Lo, Hi;
5575       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5576       EVT LoVT, HiVT;
5577       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5578       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5579       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5580       // Reassemble the low and high pieces reversed.
5581       // FIXME: This is a CONCAT_VECTORS.
5582       SDValue Res =
5583           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5584                       DAG.getIntPtrConstant(0, DL));
5585       return DAG.getNode(
5586           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5587           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5588     }
5589 
5590     // Just promote the int type to i16 which will double the LMUL.
5591     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5592     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5593   }
5594 
5595   MVT XLenVT = Subtarget.getXLenVT();
5596   SDValue Mask, VL;
5597   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5598 
5599   // Calculate VLMAX-1 for the desired SEW.
5600   unsigned MinElts = VecVT.getVectorMinNumElements();
5601   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5602                               DAG.getConstant(MinElts, DL, XLenVT));
5603   SDValue VLMinus1 =
5604       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5605 
5606   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5607   bool IsRV32E64 =
5608       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5609   SDValue SplatVL;
5610   if (!IsRV32E64)
5611     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5612   else
5613     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5614                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5615 
5616   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5617   SDValue Indices =
5618       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5619 
5620   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5621 }
5622 
5623 SDValue
5624 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5625                                                      SelectionDAG &DAG) const {
5626   SDLoc DL(Op);
5627   auto *Load = cast<LoadSDNode>(Op);
5628 
5629   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5630                                         Load->getMemoryVT(),
5631                                         *Load->getMemOperand()) &&
5632          "Expecting a correctly-aligned load");
5633 
5634   MVT VT = Op.getSimpleValueType();
5635   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5636 
5637   SDValue VL =
5638       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5639 
5640   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5641   SDValue NewLoad = DAG.getMemIntrinsicNode(
5642       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5643       Load->getMemoryVT(), Load->getMemOperand());
5644 
5645   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5646   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5647 }
5648 
5649 SDValue
5650 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5651                                                       SelectionDAG &DAG) const {
5652   SDLoc DL(Op);
5653   auto *Store = cast<StoreSDNode>(Op);
5654 
5655   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5656                                         Store->getMemoryVT(),
5657                                         *Store->getMemOperand()) &&
5658          "Expecting a correctly-aligned store");
5659 
5660   SDValue StoreVal = Store->getValue();
5661   MVT VT = StoreVal.getSimpleValueType();
5662 
5663   // If the size less than a byte, we need to pad with zeros to make a byte.
5664   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5665     VT = MVT::v8i1;
5666     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5667                            DAG.getConstant(0, DL, VT), StoreVal,
5668                            DAG.getIntPtrConstant(0, DL));
5669   }
5670 
5671   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5672 
5673   SDValue VL =
5674       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5675 
5676   SDValue NewValue =
5677       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5678   return DAG.getMemIntrinsicNode(
5679       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5680       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5681       Store->getMemoryVT(), Store->getMemOperand());
5682 }
5683 
5684 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5685                                              SelectionDAG &DAG) const {
5686   SDLoc DL(Op);
5687   MVT VT = Op.getSimpleValueType();
5688 
5689   const auto *MemSD = cast<MemSDNode>(Op);
5690   EVT MemVT = MemSD->getMemoryVT();
5691   MachineMemOperand *MMO = MemSD->getMemOperand();
5692   SDValue Chain = MemSD->getChain();
5693   SDValue BasePtr = MemSD->getBasePtr();
5694 
5695   SDValue Mask, PassThru, VL;
5696   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5697     Mask = VPLoad->getMask();
5698     PassThru = DAG.getUNDEF(VT);
5699     VL = VPLoad->getVectorLength();
5700   } else {
5701     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5702     Mask = MLoad->getMask();
5703     PassThru = MLoad->getPassThru();
5704   }
5705 
5706   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5707 
5708   MVT XLenVT = Subtarget.getXLenVT();
5709 
5710   MVT ContainerVT = VT;
5711   if (VT.isFixedLengthVector()) {
5712     ContainerVT = getContainerForFixedLengthVector(VT);
5713     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5714     if (!IsUnmasked) {
5715       MVT MaskVT =
5716           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5717       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5718     }
5719   }
5720 
5721   if (!VL)
5722     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5723 
5724   unsigned IntID =
5725       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5726   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5727   if (IsUnmasked)
5728     Ops.push_back(DAG.getUNDEF(ContainerVT));
5729   else
5730     Ops.push_back(PassThru);
5731   Ops.push_back(BasePtr);
5732   if (!IsUnmasked)
5733     Ops.push_back(Mask);
5734   Ops.push_back(VL);
5735   if (!IsUnmasked)
5736     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5737 
5738   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5739 
5740   SDValue Result =
5741       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5742   Chain = Result.getValue(1);
5743 
5744   if (VT.isFixedLengthVector())
5745     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5746 
5747   return DAG.getMergeValues({Result, Chain}, DL);
5748 }
5749 
5750 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5751                                               SelectionDAG &DAG) const {
5752   SDLoc DL(Op);
5753 
5754   const auto *MemSD = cast<MemSDNode>(Op);
5755   EVT MemVT = MemSD->getMemoryVT();
5756   MachineMemOperand *MMO = MemSD->getMemOperand();
5757   SDValue Chain = MemSD->getChain();
5758   SDValue BasePtr = MemSD->getBasePtr();
5759   SDValue Val, Mask, VL;
5760 
5761   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5762     Val = VPStore->getValue();
5763     Mask = VPStore->getMask();
5764     VL = VPStore->getVectorLength();
5765   } else {
5766     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5767     Val = MStore->getValue();
5768     Mask = MStore->getMask();
5769   }
5770 
5771   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5772 
5773   MVT VT = Val.getSimpleValueType();
5774   MVT XLenVT = Subtarget.getXLenVT();
5775 
5776   MVT ContainerVT = VT;
5777   if (VT.isFixedLengthVector()) {
5778     ContainerVT = getContainerForFixedLengthVector(VT);
5779 
5780     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5781     if (!IsUnmasked) {
5782       MVT MaskVT =
5783           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5784       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5785     }
5786   }
5787 
5788   if (!VL)
5789     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5790 
5791   unsigned IntID =
5792       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5793   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5794   Ops.push_back(Val);
5795   Ops.push_back(BasePtr);
5796   if (!IsUnmasked)
5797     Ops.push_back(Mask);
5798   Ops.push_back(VL);
5799 
5800   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5801                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5802 }
5803 
5804 SDValue
5805 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5806                                                       SelectionDAG &DAG) const {
5807   MVT InVT = Op.getOperand(0).getSimpleValueType();
5808   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5809 
5810   MVT VT = Op.getSimpleValueType();
5811 
5812   SDValue Op1 =
5813       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5814   SDValue Op2 =
5815       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5816 
5817   SDLoc DL(Op);
5818   SDValue VL =
5819       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5820 
5821   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5822   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5823 
5824   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5825                             Op.getOperand(2), Mask, VL);
5826 
5827   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5828 }
5829 
5830 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5831     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5832   MVT VT = Op.getSimpleValueType();
5833 
5834   if (VT.getVectorElementType() == MVT::i1)
5835     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5836 
5837   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5838 }
5839 
5840 SDValue
5841 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5842                                                       SelectionDAG &DAG) const {
5843   unsigned Opc;
5844   switch (Op.getOpcode()) {
5845   default: llvm_unreachable("Unexpected opcode!");
5846   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5847   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5848   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5849   }
5850 
5851   return lowerToScalableOp(Op, DAG, Opc);
5852 }
5853 
5854 // Lower vector ABS to smax(X, sub(0, X)).
5855 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5856   SDLoc DL(Op);
5857   MVT VT = Op.getSimpleValueType();
5858   SDValue X = Op.getOperand(0);
5859 
5860   assert(VT.isFixedLengthVector() && "Unexpected type");
5861 
5862   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5863   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5864 
5865   SDValue Mask, VL;
5866   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5867 
5868   SDValue SplatZero = DAG.getNode(
5869       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5870       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5871   SDValue NegX =
5872       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5873   SDValue Max =
5874       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5875 
5876   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5877 }
5878 
5879 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5880     SDValue Op, SelectionDAG &DAG) const {
5881   SDLoc DL(Op);
5882   MVT VT = Op.getSimpleValueType();
5883   SDValue Mag = Op.getOperand(0);
5884   SDValue Sign = Op.getOperand(1);
5885   assert(Mag.getValueType() == Sign.getValueType() &&
5886          "Can only handle COPYSIGN with matching types.");
5887 
5888   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5889   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5890   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5891 
5892   SDValue Mask, VL;
5893   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5894 
5895   SDValue CopySign =
5896       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5897 
5898   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5899 }
5900 
5901 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5902     SDValue Op, SelectionDAG &DAG) const {
5903   MVT VT = Op.getSimpleValueType();
5904   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5905 
5906   MVT I1ContainerVT =
5907       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5908 
5909   SDValue CC =
5910       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5911   SDValue Op1 =
5912       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5913   SDValue Op2 =
5914       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5915 
5916   SDLoc DL(Op);
5917   SDValue Mask, VL;
5918   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5919 
5920   SDValue Select =
5921       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5922 
5923   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5924 }
5925 
5926 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5927                                                unsigned NewOpc,
5928                                                bool HasMask) const {
5929   MVT VT = Op.getSimpleValueType();
5930   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5931 
5932   // Create list of operands by converting existing ones to scalable types.
5933   SmallVector<SDValue, 6> Ops;
5934   for (const SDValue &V : Op->op_values()) {
5935     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5936 
5937     // Pass through non-vector operands.
5938     if (!V.getValueType().isVector()) {
5939       Ops.push_back(V);
5940       continue;
5941     }
5942 
5943     // "cast" fixed length vector to a scalable vector.
5944     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5945            "Only fixed length vectors are supported!");
5946     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5947   }
5948 
5949   SDLoc DL(Op);
5950   SDValue Mask, VL;
5951   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5952   if (HasMask)
5953     Ops.push_back(Mask);
5954   Ops.push_back(VL);
5955 
5956   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5957   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5958 }
5959 
5960 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5961 // * Operands of each node are assumed to be in the same order.
5962 // * The EVL operand is promoted from i32 to i64 on RV64.
5963 // * Fixed-length vectors are converted to their scalable-vector container
5964 //   types.
5965 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5966                                        unsigned RISCVISDOpc) const {
5967   SDLoc DL(Op);
5968   MVT VT = Op.getSimpleValueType();
5969   SmallVector<SDValue, 4> Ops;
5970 
5971   for (const auto &OpIdx : enumerate(Op->ops())) {
5972     SDValue V = OpIdx.value();
5973     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5974     // Pass through operands which aren't fixed-length vectors.
5975     if (!V.getValueType().isFixedLengthVector()) {
5976       Ops.push_back(V);
5977       continue;
5978     }
5979     // "cast" fixed length vector to a scalable vector.
5980     MVT OpVT = V.getSimpleValueType();
5981     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5982     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5983            "Only fixed length vectors are supported!");
5984     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5985   }
5986 
5987   if (!VT.isFixedLengthVector())
5988     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5989 
5990   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5991 
5992   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5993 
5994   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5995 }
5996 
5997 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
5998                                             unsigned MaskOpc,
5999                                             unsigned VecOpc) const {
6000   MVT VT = Op.getSimpleValueType();
6001   if (VT.getVectorElementType() != MVT::i1)
6002     return lowerVPOp(Op, DAG, VecOpc);
6003 
6004   // It is safe to drop mask parameter as masked-off elements are undef.
6005   SDValue Op1 = Op->getOperand(0);
6006   SDValue Op2 = Op->getOperand(1);
6007   SDValue VL = Op->getOperand(3);
6008 
6009   MVT ContainerVT = VT;
6010   const bool IsFixed = VT.isFixedLengthVector();
6011   if (IsFixed) {
6012     ContainerVT = getContainerForFixedLengthVector(VT);
6013     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6014     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6015   }
6016 
6017   SDLoc DL(Op);
6018   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6019   if (!IsFixed)
6020     return Val;
6021   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6022 }
6023 
6024 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6025 // matched to a RVV indexed load. The RVV indexed load instructions only
6026 // support the "unsigned unscaled" addressing mode; indices are implicitly
6027 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6028 // signed or scaled indexing is extended to the XLEN value type and scaled
6029 // accordingly.
6030 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6031                                                SelectionDAG &DAG) const {
6032   SDLoc DL(Op);
6033   MVT VT = Op.getSimpleValueType();
6034 
6035   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6036   EVT MemVT = MemSD->getMemoryVT();
6037   MachineMemOperand *MMO = MemSD->getMemOperand();
6038   SDValue Chain = MemSD->getChain();
6039   SDValue BasePtr = MemSD->getBasePtr();
6040 
6041   ISD::LoadExtType LoadExtType;
6042   SDValue Index, Mask, PassThru, VL;
6043 
6044   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6045     Index = VPGN->getIndex();
6046     Mask = VPGN->getMask();
6047     PassThru = DAG.getUNDEF(VT);
6048     VL = VPGN->getVectorLength();
6049     // VP doesn't support extending loads.
6050     LoadExtType = ISD::NON_EXTLOAD;
6051   } else {
6052     // Else it must be a MGATHER.
6053     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6054     Index = MGN->getIndex();
6055     Mask = MGN->getMask();
6056     PassThru = MGN->getPassThru();
6057     LoadExtType = MGN->getExtensionType();
6058   }
6059 
6060   MVT IndexVT = Index.getSimpleValueType();
6061   MVT XLenVT = Subtarget.getXLenVT();
6062 
6063   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6064          "Unexpected VTs!");
6065   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6066   // Targets have to explicitly opt-in for extending vector loads.
6067   assert(LoadExtType == ISD::NON_EXTLOAD &&
6068          "Unexpected extending MGATHER/VP_GATHER");
6069   (void)LoadExtType;
6070 
6071   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6072   // the selection of the masked intrinsics doesn't do this for us.
6073   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6074 
6075   MVT ContainerVT = VT;
6076   if (VT.isFixedLengthVector()) {
6077     // We need to use the larger of the result and index type to determine the
6078     // scalable type to use so we don't increase LMUL for any operand/result.
6079     if (VT.bitsGE(IndexVT)) {
6080       ContainerVT = getContainerForFixedLengthVector(VT);
6081       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6082                                  ContainerVT.getVectorElementCount());
6083     } else {
6084       IndexVT = getContainerForFixedLengthVector(IndexVT);
6085       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6086                                      IndexVT.getVectorElementCount());
6087     }
6088 
6089     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6090 
6091     if (!IsUnmasked) {
6092       MVT MaskVT =
6093           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6094       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6095       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6096     }
6097   }
6098 
6099   if (!VL)
6100     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6101 
6102   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6103     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6104     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6105                                    VL);
6106     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6107                         TrueMask, VL);
6108   }
6109 
6110   unsigned IntID =
6111       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6112   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6113   if (IsUnmasked)
6114     Ops.push_back(DAG.getUNDEF(ContainerVT));
6115   else
6116     Ops.push_back(PassThru);
6117   Ops.push_back(BasePtr);
6118   Ops.push_back(Index);
6119   if (!IsUnmasked)
6120     Ops.push_back(Mask);
6121   Ops.push_back(VL);
6122   if (!IsUnmasked)
6123     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6124 
6125   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6126   SDValue Result =
6127       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6128   Chain = Result.getValue(1);
6129 
6130   if (VT.isFixedLengthVector())
6131     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6132 
6133   return DAG.getMergeValues({Result, Chain}, DL);
6134 }
6135 
6136 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6137 // matched to a RVV indexed store. The RVV indexed store instructions only
6138 // support the "unsigned unscaled" addressing mode; indices are implicitly
6139 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6140 // signed or scaled indexing is extended to the XLEN value type and scaled
6141 // accordingly.
6142 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6143                                                 SelectionDAG &DAG) const {
6144   SDLoc DL(Op);
6145   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6146   EVT MemVT = MemSD->getMemoryVT();
6147   MachineMemOperand *MMO = MemSD->getMemOperand();
6148   SDValue Chain = MemSD->getChain();
6149   SDValue BasePtr = MemSD->getBasePtr();
6150 
6151   bool IsTruncatingStore = false;
6152   SDValue Index, Mask, Val, VL;
6153 
6154   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6155     Index = VPSN->getIndex();
6156     Mask = VPSN->getMask();
6157     Val = VPSN->getValue();
6158     VL = VPSN->getVectorLength();
6159     // VP doesn't support truncating stores.
6160     IsTruncatingStore = false;
6161   } else {
6162     // Else it must be a MSCATTER.
6163     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6164     Index = MSN->getIndex();
6165     Mask = MSN->getMask();
6166     Val = MSN->getValue();
6167     IsTruncatingStore = MSN->isTruncatingStore();
6168   }
6169 
6170   MVT VT = Val.getSimpleValueType();
6171   MVT IndexVT = Index.getSimpleValueType();
6172   MVT XLenVT = Subtarget.getXLenVT();
6173 
6174   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6175          "Unexpected VTs!");
6176   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6177   // Targets have to explicitly opt-in for extending vector loads and
6178   // truncating vector stores.
6179   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6180   (void)IsTruncatingStore;
6181 
6182   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6183   // the selection of the masked intrinsics doesn't do this for us.
6184   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6185 
6186   MVT ContainerVT = VT;
6187   if (VT.isFixedLengthVector()) {
6188     // We need to use the larger of the value and index type to determine the
6189     // scalable type to use so we don't increase LMUL for any operand/result.
6190     if (VT.bitsGE(IndexVT)) {
6191       ContainerVT = getContainerForFixedLengthVector(VT);
6192       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6193                                  ContainerVT.getVectorElementCount());
6194     } else {
6195       IndexVT = getContainerForFixedLengthVector(IndexVT);
6196       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6197                                      IndexVT.getVectorElementCount());
6198     }
6199 
6200     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6201     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6202 
6203     if (!IsUnmasked) {
6204       MVT MaskVT =
6205           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6206       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6207     }
6208   }
6209 
6210   if (!VL)
6211     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6212 
6213   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6214     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6215     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6216                                    VL);
6217     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6218                         TrueMask, VL);
6219   }
6220 
6221   unsigned IntID =
6222       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6223   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6224   Ops.push_back(Val);
6225   Ops.push_back(BasePtr);
6226   Ops.push_back(Index);
6227   if (!IsUnmasked)
6228     Ops.push_back(Mask);
6229   Ops.push_back(VL);
6230 
6231   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6232                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6233 }
6234 
6235 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6236                                                SelectionDAG &DAG) const {
6237   const MVT XLenVT = Subtarget.getXLenVT();
6238   SDLoc DL(Op);
6239   SDValue Chain = Op->getOperand(0);
6240   SDValue SysRegNo = DAG.getTargetConstant(
6241       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6242   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6243   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6244 
6245   // Encoding used for rounding mode in RISCV differs from that used in
6246   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6247   // table, which consists of a sequence of 4-bit fields, each representing
6248   // corresponding FLT_ROUNDS mode.
6249   static const int Table =
6250       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6251       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6252       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6253       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6254       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6255 
6256   SDValue Shift =
6257       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6258   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6259                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6260   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6261                                DAG.getConstant(7, DL, XLenVT));
6262 
6263   return DAG.getMergeValues({Masked, Chain}, DL);
6264 }
6265 
6266 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6267                                                SelectionDAG &DAG) const {
6268   const MVT XLenVT = Subtarget.getXLenVT();
6269   SDLoc DL(Op);
6270   SDValue Chain = Op->getOperand(0);
6271   SDValue RMValue = Op->getOperand(1);
6272   SDValue SysRegNo = DAG.getTargetConstant(
6273       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6274 
6275   // Encoding used for rounding mode in RISCV differs from that used in
6276   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6277   // a table, which consists of a sequence of 4-bit fields, each representing
6278   // corresponding RISCV mode.
6279   static const unsigned Table =
6280       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6281       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6282       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6283       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6284       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6285 
6286   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6287                               DAG.getConstant(2, DL, XLenVT));
6288   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6289                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6290   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6291                         DAG.getConstant(0x7, DL, XLenVT));
6292   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6293                      RMValue);
6294 }
6295 
6296 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6297   switch (IntNo) {
6298   default:
6299     llvm_unreachable("Unexpected Intrinsic");
6300   case Intrinsic::riscv_grev:
6301     return RISCVISD::GREVW;
6302   case Intrinsic::riscv_gorc:
6303     return RISCVISD::GORCW;
6304   case Intrinsic::riscv_bcompress:
6305     return RISCVISD::BCOMPRESSW;
6306   case Intrinsic::riscv_bdecompress:
6307     return RISCVISD::BDECOMPRESSW;
6308   case Intrinsic::riscv_bfp:
6309     return RISCVISD::BFPW;
6310   case Intrinsic::riscv_fsl:
6311     return RISCVISD::FSLW;
6312   case Intrinsic::riscv_fsr:
6313     return RISCVISD::FSRW;
6314   }
6315 }
6316 
6317 // Converts the given intrinsic to a i64 operation with any extension.
6318 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6319                                          unsigned IntNo) {
6320   SDLoc DL(N);
6321   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6322   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6323   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6324   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6325   // ReplaceNodeResults requires we maintain the same type for the return value.
6326   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6327 }
6328 
6329 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6330 // form of the given Opcode.
6331 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6332   switch (Opcode) {
6333   default:
6334     llvm_unreachable("Unexpected opcode");
6335   case ISD::SHL:
6336     return RISCVISD::SLLW;
6337   case ISD::SRA:
6338     return RISCVISD::SRAW;
6339   case ISD::SRL:
6340     return RISCVISD::SRLW;
6341   case ISD::SDIV:
6342     return RISCVISD::DIVW;
6343   case ISD::UDIV:
6344     return RISCVISD::DIVUW;
6345   case ISD::UREM:
6346     return RISCVISD::REMUW;
6347   case ISD::ROTL:
6348     return RISCVISD::ROLW;
6349   case ISD::ROTR:
6350     return RISCVISD::RORW;
6351   case RISCVISD::GREV:
6352     return RISCVISD::GREVW;
6353   case RISCVISD::GORC:
6354     return RISCVISD::GORCW;
6355   }
6356 }
6357 
6358 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6359 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6360 // otherwise be promoted to i64, making it difficult to select the
6361 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6362 // type i8/i16/i32 is lost.
6363 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6364                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6365   SDLoc DL(N);
6366   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6367   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6368   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6369   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6370   // ReplaceNodeResults requires we maintain the same type for the return value.
6371   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6372 }
6373 
6374 // Converts the given 32-bit operation to a i64 operation with signed extension
6375 // semantic to reduce the signed extension instructions.
6376 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6377   SDLoc DL(N);
6378   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6379   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6380   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6381   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6382                                DAG.getValueType(MVT::i32));
6383   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6384 }
6385 
6386 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6387                                              SmallVectorImpl<SDValue> &Results,
6388                                              SelectionDAG &DAG) const {
6389   SDLoc DL(N);
6390   switch (N->getOpcode()) {
6391   default:
6392     llvm_unreachable("Don't know how to custom type legalize this operation!");
6393   case ISD::STRICT_FP_TO_SINT:
6394   case ISD::STRICT_FP_TO_UINT:
6395   case ISD::FP_TO_SINT:
6396   case ISD::FP_TO_UINT: {
6397     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6398            "Unexpected custom legalisation");
6399     bool IsStrict = N->isStrictFPOpcode();
6400     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6401                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6402     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6403     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6404         TargetLowering::TypeSoftenFloat) {
6405       if (!isTypeLegal(Op0.getValueType()))
6406         return;
6407       if (IsStrict) {
6408         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6409                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6410         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6411         SDValue Res = DAG.getNode(
6412             Opc, DL, VTs, N->getOperand(0), Op0,
6413             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6414         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6415         Results.push_back(Res.getValue(1));
6416         return;
6417       }
6418       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6419       SDValue Res =
6420           DAG.getNode(Opc, DL, MVT::i64, Op0,
6421                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6422       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6423       return;
6424     }
6425     // If the FP type needs to be softened, emit a library call using the 'si'
6426     // version. If we left it to default legalization we'd end up with 'di'. If
6427     // the FP type doesn't need to be softened just let generic type
6428     // legalization promote the result type.
6429     RTLIB::Libcall LC;
6430     if (IsSigned)
6431       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6432     else
6433       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6434     MakeLibCallOptions CallOptions;
6435     EVT OpVT = Op0.getValueType();
6436     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6437     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6438     SDValue Result;
6439     std::tie(Result, Chain) =
6440         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6441     Results.push_back(Result);
6442     if (IsStrict)
6443       Results.push_back(Chain);
6444     break;
6445   }
6446   case ISD::READCYCLECOUNTER: {
6447     assert(!Subtarget.is64Bit() &&
6448            "READCYCLECOUNTER only has custom type legalization on riscv32");
6449 
6450     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6451     SDValue RCW =
6452         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6453 
6454     Results.push_back(
6455         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6456     Results.push_back(RCW.getValue(2));
6457     break;
6458   }
6459   case ISD::MUL: {
6460     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6461     unsigned XLen = Subtarget.getXLen();
6462     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6463     if (Size > XLen) {
6464       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6465       SDValue LHS = N->getOperand(0);
6466       SDValue RHS = N->getOperand(1);
6467       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6468 
6469       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6470       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6471       // We need exactly one side to be unsigned.
6472       if (LHSIsU == RHSIsU)
6473         return;
6474 
6475       auto MakeMULPair = [&](SDValue S, SDValue U) {
6476         MVT XLenVT = Subtarget.getXLenVT();
6477         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6478         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6479         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6480         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6481         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6482       };
6483 
6484       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6485       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6486 
6487       // The other operand should be signed, but still prefer MULH when
6488       // possible.
6489       if (RHSIsU && LHSIsS && !RHSIsS)
6490         Results.push_back(MakeMULPair(LHS, RHS));
6491       else if (LHSIsU && RHSIsS && !LHSIsS)
6492         Results.push_back(MakeMULPair(RHS, LHS));
6493 
6494       return;
6495     }
6496     LLVM_FALLTHROUGH;
6497   }
6498   case ISD::ADD:
6499   case ISD::SUB:
6500     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6501            "Unexpected custom legalisation");
6502     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6503     break;
6504   case ISD::SHL:
6505   case ISD::SRA:
6506   case ISD::SRL:
6507     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6508            "Unexpected custom legalisation");
6509     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6510       Results.push_back(customLegalizeToWOp(N, DAG));
6511       break;
6512     }
6513 
6514     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6515     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6516     // shift amount.
6517     if (N->getOpcode() == ISD::SHL) {
6518       SDLoc DL(N);
6519       SDValue NewOp0 =
6520           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6521       SDValue NewOp1 =
6522           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6523       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6524       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6525                                    DAG.getValueType(MVT::i32));
6526       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6527     }
6528 
6529     break;
6530   case ISD::ROTL:
6531   case ISD::ROTR:
6532     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6533            "Unexpected custom legalisation");
6534     Results.push_back(customLegalizeToWOp(N, DAG));
6535     break;
6536   case ISD::CTTZ:
6537   case ISD::CTTZ_ZERO_UNDEF:
6538   case ISD::CTLZ:
6539   case ISD::CTLZ_ZERO_UNDEF: {
6540     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6541            "Unexpected custom legalisation");
6542 
6543     SDValue NewOp0 =
6544         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6545     bool IsCTZ =
6546         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6547     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6548     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6549     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6550     return;
6551   }
6552   case ISD::SDIV:
6553   case ISD::UDIV:
6554   case ISD::UREM: {
6555     MVT VT = N->getSimpleValueType(0);
6556     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6557            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6558            "Unexpected custom legalisation");
6559     // Don't promote division/remainder by constant since we should expand those
6560     // to multiply by magic constant.
6561     // FIXME: What if the expansion is disabled for minsize.
6562     if (N->getOperand(1).getOpcode() == ISD::Constant)
6563       return;
6564 
6565     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6566     // the upper 32 bits. For other types we need to sign or zero extend
6567     // based on the opcode.
6568     unsigned ExtOpc = ISD::ANY_EXTEND;
6569     if (VT != MVT::i32)
6570       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6571                                            : ISD::ZERO_EXTEND;
6572 
6573     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6574     break;
6575   }
6576   case ISD::UADDO:
6577   case ISD::USUBO: {
6578     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6579            "Unexpected custom legalisation");
6580     bool IsAdd = N->getOpcode() == ISD::UADDO;
6581     // Create an ADDW or SUBW.
6582     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6583     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6584     SDValue Res =
6585         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6586     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6587                       DAG.getValueType(MVT::i32));
6588 
6589     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6590     // Since the inputs are sign extended from i32, this is equivalent to
6591     // comparing the lower 32 bits.
6592     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6593     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6594                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6595 
6596     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6597     Results.push_back(Overflow);
6598     return;
6599   }
6600   case ISD::UADDSAT:
6601   case ISD::USUBSAT: {
6602     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6603            "Unexpected custom legalisation");
6604     if (Subtarget.hasStdExtZbb()) {
6605       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6606       // sign extend allows overflow of the lower 32 bits to be detected on
6607       // the promoted size.
6608       SDValue LHS =
6609           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6610       SDValue RHS =
6611           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6612       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6613       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6614       return;
6615     }
6616 
6617     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6618     // promotion for UADDO/USUBO.
6619     Results.push_back(expandAddSubSat(N, DAG));
6620     return;
6621   }
6622   case ISD::BITCAST: {
6623     EVT VT = N->getValueType(0);
6624     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6625     SDValue Op0 = N->getOperand(0);
6626     EVT Op0VT = Op0.getValueType();
6627     MVT XLenVT = Subtarget.getXLenVT();
6628     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6629       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6630       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6631     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6632                Subtarget.hasStdExtF()) {
6633       SDValue FPConv =
6634           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6635       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6636     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6637                isTypeLegal(Op0VT)) {
6638       // Custom-legalize bitcasts from fixed-length vector types to illegal
6639       // scalar types in order to improve codegen. Bitcast the vector to a
6640       // one-element vector type whose element type is the same as the result
6641       // type, and extract the first element.
6642       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6643       if (isTypeLegal(BVT)) {
6644         SDValue BVec = DAG.getBitcast(BVT, Op0);
6645         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6646                                       DAG.getConstant(0, DL, XLenVT)));
6647       }
6648     }
6649     break;
6650   }
6651   case RISCVISD::GREV:
6652   case RISCVISD::GORC: {
6653     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6654            "Unexpected custom legalisation");
6655     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6656     // This is similar to customLegalizeToWOp, except that we pass the second
6657     // operand (a TargetConstant) straight through: it is already of type
6658     // XLenVT.
6659     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6660     SDValue NewOp0 =
6661         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6662     SDValue NewOp1 =
6663         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6664     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6665     // ReplaceNodeResults requires we maintain the same type for the return
6666     // value.
6667     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6668     break;
6669   }
6670   case RISCVISD::SHFL: {
6671     // There is no SHFLIW instruction, but we can just promote the operation.
6672     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6673            "Unexpected custom legalisation");
6674     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6675     SDValue NewOp0 =
6676         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6677     SDValue NewOp1 =
6678         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6679     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6680     // ReplaceNodeResults requires we maintain the same type for the return
6681     // value.
6682     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6683     break;
6684   }
6685   case ISD::BSWAP:
6686   case ISD::BITREVERSE: {
6687     MVT VT = N->getSimpleValueType(0);
6688     MVT XLenVT = Subtarget.getXLenVT();
6689     assert((VT == MVT::i8 || VT == MVT::i16 ||
6690             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6691            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6692     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6693     unsigned Imm = VT.getSizeInBits() - 1;
6694     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6695     if (N->getOpcode() == ISD::BSWAP)
6696       Imm &= ~0x7U;
6697     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6698     SDValue GREVI =
6699         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6700     // ReplaceNodeResults requires we maintain the same type for the return
6701     // value.
6702     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6703     break;
6704   }
6705   case ISD::FSHL:
6706   case ISD::FSHR: {
6707     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6708            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6709     SDValue NewOp0 =
6710         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6711     SDValue NewOp1 =
6712         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6713     SDValue NewShAmt =
6714         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6715     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6716     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6717     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6718                            DAG.getConstant(0x1f, DL, MVT::i64));
6719     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6720     // instruction use different orders. fshl will return its first operand for
6721     // shift of zero, fshr will return its second operand. fsl and fsr both
6722     // return rs1 so the ISD nodes need to have different operand orders.
6723     // Shift amount is in rs2.
6724     unsigned Opc = RISCVISD::FSLW;
6725     if (N->getOpcode() == ISD::FSHR) {
6726       std::swap(NewOp0, NewOp1);
6727       Opc = RISCVISD::FSRW;
6728     }
6729     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6730     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6731     break;
6732   }
6733   case ISD::EXTRACT_VECTOR_ELT: {
6734     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6735     // type is illegal (currently only vXi64 RV32).
6736     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6737     // transferred to the destination register. We issue two of these from the
6738     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6739     // first element.
6740     SDValue Vec = N->getOperand(0);
6741     SDValue Idx = N->getOperand(1);
6742 
6743     // The vector type hasn't been legalized yet so we can't issue target
6744     // specific nodes if it needs legalization.
6745     // FIXME: We would manually legalize if it's important.
6746     if (!isTypeLegal(Vec.getValueType()))
6747       return;
6748 
6749     MVT VecVT = Vec.getSimpleValueType();
6750 
6751     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6752            VecVT.getVectorElementType() == MVT::i64 &&
6753            "Unexpected EXTRACT_VECTOR_ELT legalization");
6754 
6755     // If this is a fixed vector, we need to convert it to a scalable vector.
6756     MVT ContainerVT = VecVT;
6757     if (VecVT.isFixedLengthVector()) {
6758       ContainerVT = getContainerForFixedLengthVector(VecVT);
6759       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6760     }
6761 
6762     MVT XLenVT = Subtarget.getXLenVT();
6763 
6764     // Use a VL of 1 to avoid processing more elements than we need.
6765     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6766     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6767     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6768 
6769     // Unless the index is known to be 0, we must slide the vector down to get
6770     // the desired element into index 0.
6771     if (!isNullConstant(Idx)) {
6772       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6773                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6774     }
6775 
6776     // Extract the lower XLEN bits of the correct vector element.
6777     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6778 
6779     // To extract the upper XLEN bits of the vector element, shift the first
6780     // element right by 32 bits and re-extract the lower XLEN bits.
6781     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6782                                      DAG.getUNDEF(ContainerVT),
6783                                      DAG.getConstant(32, DL, XLenVT), VL);
6784     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6785                                  ThirtyTwoV, Mask, VL);
6786 
6787     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6788 
6789     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6790     break;
6791   }
6792   case ISD::INTRINSIC_WO_CHAIN: {
6793     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6794     switch (IntNo) {
6795     default:
6796       llvm_unreachable(
6797           "Don't know how to custom type legalize this intrinsic!");
6798     case Intrinsic::riscv_grev:
6799     case Intrinsic::riscv_gorc:
6800     case Intrinsic::riscv_bcompress:
6801     case Intrinsic::riscv_bdecompress:
6802     case Intrinsic::riscv_bfp: {
6803       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6804              "Unexpected custom legalisation");
6805       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6806       break;
6807     }
6808     case Intrinsic::riscv_fsl:
6809     case Intrinsic::riscv_fsr: {
6810       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6811              "Unexpected custom legalisation");
6812       SDValue NewOp1 =
6813           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6814       SDValue NewOp2 =
6815           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6816       SDValue NewOp3 =
6817           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6818       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6819       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6820       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6821       break;
6822     }
6823     case Intrinsic::riscv_orc_b: {
6824       // Lower to the GORCI encoding for orc.b with the operand extended.
6825       SDValue NewOp =
6826           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6827       // If Zbp is enabled, use GORCIW which will sign extend the result.
6828       unsigned Opc =
6829           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6830       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6831                                 DAG.getConstant(7, DL, MVT::i64));
6832       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6833       return;
6834     }
6835     case Intrinsic::riscv_shfl:
6836     case Intrinsic::riscv_unshfl: {
6837       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6838              "Unexpected custom legalisation");
6839       SDValue NewOp1 =
6840           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6841       SDValue NewOp2 =
6842           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6843       unsigned Opc =
6844           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6845       // There is no (UN)SHFLIW. If the control word is a constant, we can use
6846       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
6847       // will be shuffled the same way as the lower 32 bit half, but the two
6848       // halves won't cross.
6849       if (isa<ConstantSDNode>(NewOp2)) {
6850         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6851                              DAG.getConstant(0xf, DL, MVT::i64));
6852         Opc =
6853             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6854       }
6855       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6856       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6857       break;
6858     }
6859     case Intrinsic::riscv_vmv_x_s: {
6860       EVT VT = N->getValueType(0);
6861       MVT XLenVT = Subtarget.getXLenVT();
6862       if (VT.bitsLT(XLenVT)) {
6863         // Simple case just extract using vmv.x.s and truncate.
6864         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6865                                       Subtarget.getXLenVT(), N->getOperand(1));
6866         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6867         return;
6868       }
6869 
6870       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6871              "Unexpected custom legalization");
6872 
6873       // We need to do the move in two steps.
6874       SDValue Vec = N->getOperand(1);
6875       MVT VecVT = Vec.getSimpleValueType();
6876 
6877       // First extract the lower XLEN bits of the element.
6878       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6879 
6880       // To extract the upper XLEN bits of the vector element, shift the first
6881       // element right by 32 bits and re-extract the lower XLEN bits.
6882       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6883       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6884       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6885       SDValue ThirtyTwoV =
6886           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
6887                       DAG.getConstant(32, DL, XLenVT), VL);
6888       SDValue LShr32 =
6889           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6890       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6891 
6892       Results.push_back(
6893           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6894       break;
6895     }
6896     }
6897     break;
6898   }
6899   case ISD::VECREDUCE_ADD:
6900   case ISD::VECREDUCE_AND:
6901   case ISD::VECREDUCE_OR:
6902   case ISD::VECREDUCE_XOR:
6903   case ISD::VECREDUCE_SMAX:
6904   case ISD::VECREDUCE_UMAX:
6905   case ISD::VECREDUCE_SMIN:
6906   case ISD::VECREDUCE_UMIN:
6907     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6908       Results.push_back(V);
6909     break;
6910   case ISD::VP_REDUCE_ADD:
6911   case ISD::VP_REDUCE_AND:
6912   case ISD::VP_REDUCE_OR:
6913   case ISD::VP_REDUCE_XOR:
6914   case ISD::VP_REDUCE_SMAX:
6915   case ISD::VP_REDUCE_UMAX:
6916   case ISD::VP_REDUCE_SMIN:
6917   case ISD::VP_REDUCE_UMIN:
6918     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6919       Results.push_back(V);
6920     break;
6921   case ISD::FLT_ROUNDS_: {
6922     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6923     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6924     Results.push_back(Res.getValue(0));
6925     Results.push_back(Res.getValue(1));
6926     break;
6927   }
6928   }
6929 }
6930 
6931 // A structure to hold one of the bit-manipulation patterns below. Together, a
6932 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6933 //   (or (and (shl x, 1), 0xAAAAAAAA),
6934 //       (and (srl x, 1), 0x55555555))
6935 struct RISCVBitmanipPat {
6936   SDValue Op;
6937   unsigned ShAmt;
6938   bool IsSHL;
6939 
6940   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6941     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6942   }
6943 };
6944 
6945 // Matches patterns of the form
6946 //   (and (shl x, C2), (C1 << C2))
6947 //   (and (srl x, C2), C1)
6948 //   (shl (and x, C1), C2)
6949 //   (srl (and x, (C1 << C2)), C2)
6950 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6951 // The expected masks for each shift amount are specified in BitmanipMasks where
6952 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6953 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6954 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6955 // XLen is 64.
6956 static Optional<RISCVBitmanipPat>
6957 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6958   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6959          "Unexpected number of masks");
6960   Optional<uint64_t> Mask;
6961   // Optionally consume a mask around the shift operation.
6962   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6963     Mask = Op.getConstantOperandVal(1);
6964     Op = Op.getOperand(0);
6965   }
6966   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6967     return None;
6968   bool IsSHL = Op.getOpcode() == ISD::SHL;
6969 
6970   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6971     return None;
6972   uint64_t ShAmt = Op.getConstantOperandVal(1);
6973 
6974   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6975   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6976     return None;
6977   // If we don't have enough masks for 64 bit, then we must be trying to
6978   // match SHFL so we're only allowed to shift 1/4 of the width.
6979   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6980     return None;
6981 
6982   SDValue Src = Op.getOperand(0);
6983 
6984   // The expected mask is shifted left when the AND is found around SHL
6985   // patterns.
6986   //   ((x >> 1) & 0x55555555)
6987   //   ((x << 1) & 0xAAAAAAAA)
6988   bool SHLExpMask = IsSHL;
6989 
6990   if (!Mask) {
6991     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6992     // the mask is all ones: consume that now.
6993     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6994       Mask = Src.getConstantOperandVal(1);
6995       Src = Src.getOperand(0);
6996       // The expected mask is now in fact shifted left for SRL, so reverse the
6997       // decision.
6998       //   ((x & 0xAAAAAAAA) >> 1)
6999       //   ((x & 0x55555555) << 1)
7000       SHLExpMask = !SHLExpMask;
7001     } else {
7002       // Use a default shifted mask of all-ones if there's no AND, truncated
7003       // down to the expected width. This simplifies the logic later on.
7004       Mask = maskTrailingOnes<uint64_t>(Width);
7005       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7006     }
7007   }
7008 
7009   unsigned MaskIdx = Log2_32(ShAmt);
7010   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7011 
7012   if (SHLExpMask)
7013     ExpMask <<= ShAmt;
7014 
7015   if (Mask != ExpMask)
7016     return None;
7017 
7018   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7019 }
7020 
7021 // Matches any of the following bit-manipulation patterns:
7022 //   (and (shl x, 1), (0x55555555 << 1))
7023 //   (and (srl x, 1), 0x55555555)
7024 //   (shl (and x, 0x55555555), 1)
7025 //   (srl (and x, (0x55555555 << 1)), 1)
7026 // where the shift amount and mask may vary thus:
7027 //   [1]  = 0x55555555 / 0xAAAAAAAA
7028 //   [2]  = 0x33333333 / 0xCCCCCCCC
7029 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7030 //   [8]  = 0x00FF00FF / 0xFF00FF00
7031 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7032 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7033 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7034   // These are the unshifted masks which we use to match bit-manipulation
7035   // patterns. They may be shifted left in certain circumstances.
7036   static const uint64_t BitmanipMasks[] = {
7037       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7038       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7039 
7040   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7041 }
7042 
7043 // Match the following pattern as a GREVI(W) operation
7044 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7045 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7046                                const RISCVSubtarget &Subtarget) {
7047   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7048   EVT VT = Op.getValueType();
7049 
7050   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7051     auto LHS = matchGREVIPat(Op.getOperand(0));
7052     auto RHS = matchGREVIPat(Op.getOperand(1));
7053     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7054       SDLoc DL(Op);
7055       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7056                          DAG.getConstant(LHS->ShAmt, DL, VT));
7057     }
7058   }
7059   return SDValue();
7060 }
7061 
7062 // Matches any the following pattern as a GORCI(W) operation
7063 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7064 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7065 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7066 // Note that with the variant of 3.,
7067 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7068 // the inner pattern will first be matched as GREVI and then the outer
7069 // pattern will be matched to GORC via the first rule above.
7070 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7071 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7072                                const RISCVSubtarget &Subtarget) {
7073   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7074   EVT VT = Op.getValueType();
7075 
7076   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7077     SDLoc DL(Op);
7078     SDValue Op0 = Op.getOperand(0);
7079     SDValue Op1 = Op.getOperand(1);
7080 
7081     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7082       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7083           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7084           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7085         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7086       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7087       if ((Reverse.getOpcode() == ISD::ROTL ||
7088            Reverse.getOpcode() == ISD::ROTR) &&
7089           Reverse.getOperand(0) == X &&
7090           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7091         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7092         if (RotAmt == (VT.getSizeInBits() / 2))
7093           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7094                              DAG.getConstant(RotAmt, DL, VT));
7095       }
7096       return SDValue();
7097     };
7098 
7099     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7100     if (SDValue V = MatchOROfReverse(Op0, Op1))
7101       return V;
7102     if (SDValue V = MatchOROfReverse(Op1, Op0))
7103       return V;
7104 
7105     // OR is commutable so canonicalize its OR operand to the left
7106     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7107       std::swap(Op0, Op1);
7108     if (Op0.getOpcode() != ISD::OR)
7109       return SDValue();
7110     SDValue OrOp0 = Op0.getOperand(0);
7111     SDValue OrOp1 = Op0.getOperand(1);
7112     auto LHS = matchGREVIPat(OrOp0);
7113     // OR is commutable so swap the operands and try again: x might have been
7114     // on the left
7115     if (!LHS) {
7116       std::swap(OrOp0, OrOp1);
7117       LHS = matchGREVIPat(OrOp0);
7118     }
7119     auto RHS = matchGREVIPat(Op1);
7120     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7121       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7122                          DAG.getConstant(LHS->ShAmt, DL, VT));
7123     }
7124   }
7125   return SDValue();
7126 }
7127 
7128 // Matches any of the following bit-manipulation patterns:
7129 //   (and (shl x, 1), (0x22222222 << 1))
7130 //   (and (srl x, 1), 0x22222222)
7131 //   (shl (and x, 0x22222222), 1)
7132 //   (srl (and x, (0x22222222 << 1)), 1)
7133 // where the shift amount and mask may vary thus:
7134 //   [1]  = 0x22222222 / 0x44444444
7135 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7136 //   [4]  = 0x00F000F0 / 0x0F000F00
7137 //   [8]  = 0x0000FF00 / 0x00FF0000
7138 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7139 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7140   // These are the unshifted masks which we use to match bit-manipulation
7141   // patterns. They may be shifted left in certain circumstances.
7142   static const uint64_t BitmanipMasks[] = {
7143       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7144       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7145 
7146   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7147 }
7148 
7149 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7150 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7151                                const RISCVSubtarget &Subtarget) {
7152   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7153   EVT VT = Op.getValueType();
7154 
7155   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7156     return SDValue();
7157 
7158   SDValue Op0 = Op.getOperand(0);
7159   SDValue Op1 = Op.getOperand(1);
7160 
7161   // Or is commutable so canonicalize the second OR to the LHS.
7162   if (Op0.getOpcode() != ISD::OR)
7163     std::swap(Op0, Op1);
7164   if (Op0.getOpcode() != ISD::OR)
7165     return SDValue();
7166 
7167   // We found an inner OR, so our operands are the operands of the inner OR
7168   // and the other operand of the outer OR.
7169   SDValue A = Op0.getOperand(0);
7170   SDValue B = Op0.getOperand(1);
7171   SDValue C = Op1;
7172 
7173   auto Match1 = matchSHFLPat(A);
7174   auto Match2 = matchSHFLPat(B);
7175 
7176   // If neither matched, we failed.
7177   if (!Match1 && !Match2)
7178     return SDValue();
7179 
7180   // We had at least one match. if one failed, try the remaining C operand.
7181   if (!Match1) {
7182     std::swap(A, C);
7183     Match1 = matchSHFLPat(A);
7184     if (!Match1)
7185       return SDValue();
7186   } else if (!Match2) {
7187     std::swap(B, C);
7188     Match2 = matchSHFLPat(B);
7189     if (!Match2)
7190       return SDValue();
7191   }
7192   assert(Match1 && Match2);
7193 
7194   // Make sure our matches pair up.
7195   if (!Match1->formsPairWith(*Match2))
7196     return SDValue();
7197 
7198   // All the remains is to make sure C is an AND with the same input, that masks
7199   // out the bits that are being shuffled.
7200   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7201       C.getOperand(0) != Match1->Op)
7202     return SDValue();
7203 
7204   uint64_t Mask = C.getConstantOperandVal(1);
7205 
7206   static const uint64_t BitmanipMasks[] = {
7207       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7208       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7209   };
7210 
7211   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7212   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7213   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7214 
7215   if (Mask != ExpMask)
7216     return SDValue();
7217 
7218   SDLoc DL(Op);
7219   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7220                      DAG.getConstant(Match1->ShAmt, DL, VT));
7221 }
7222 
7223 // Optimize (add (shl x, c0), (shl y, c1)) ->
7224 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7225 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7226                                   const RISCVSubtarget &Subtarget) {
7227   // Perform this optimization only in the zba extension.
7228   if (!Subtarget.hasStdExtZba())
7229     return SDValue();
7230 
7231   // Skip for vector types and larger types.
7232   EVT VT = N->getValueType(0);
7233   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7234     return SDValue();
7235 
7236   // The two operand nodes must be SHL and have no other use.
7237   SDValue N0 = N->getOperand(0);
7238   SDValue N1 = N->getOperand(1);
7239   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7240       !N0->hasOneUse() || !N1->hasOneUse())
7241     return SDValue();
7242 
7243   // Check c0 and c1.
7244   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7245   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7246   if (!N0C || !N1C)
7247     return SDValue();
7248   int64_t C0 = N0C->getSExtValue();
7249   int64_t C1 = N1C->getSExtValue();
7250   if (C0 <= 0 || C1 <= 0)
7251     return SDValue();
7252 
7253   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7254   int64_t Bits = std::min(C0, C1);
7255   int64_t Diff = std::abs(C0 - C1);
7256   if (Diff != 1 && Diff != 2 && Diff != 3)
7257     return SDValue();
7258 
7259   // Build nodes.
7260   SDLoc DL(N);
7261   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7262   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7263   SDValue NA0 =
7264       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7265   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7266   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7267 }
7268 
7269 // Combine
7270 // ROTR ((GREV x, 24), 16) -> (GREVI x, 8)
7271 // ROTL ((GREV x, 24), 16) -> (GREVI x, 8)
7272 // RORW ((GREVW x, 24), 16) -> (GREVIW x, 8)
7273 // ROLW ((GREVW x, 24), 16) -> (GREVIW x, 8)
7274 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG) {
7275   SDValue Src = N->getOperand(0);
7276   SDLoc DL(N);
7277   unsigned Opc;
7278 
7279   if ((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL) &&
7280       Src.getOpcode() == RISCVISD::GREV)
7281     Opc = RISCVISD::GREV;
7282   else if ((N->getOpcode() == RISCVISD::RORW ||
7283             N->getOpcode() == RISCVISD::ROLW) &&
7284            Src.getOpcode() == RISCVISD::GREVW)
7285     Opc = RISCVISD::GREVW;
7286   else
7287     return SDValue();
7288 
7289   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7290       !isa<ConstantSDNode>(Src.getOperand(1)))
7291     return SDValue();
7292 
7293   unsigned ShAmt1 = N->getConstantOperandVal(1);
7294   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7295   if (ShAmt1 != 16 && ShAmt2 != 24)
7296     return SDValue();
7297 
7298   Src = Src.getOperand(0);
7299   return DAG.getNode(Opc, DL, N->getValueType(0), Src,
7300                      DAG.getConstant(8, DL, N->getOperand(1).getValueType()));
7301 }
7302 
7303 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7304 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7305 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7306 // not undo itself, but they are redundant.
7307 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7308   SDValue Src = N->getOperand(0);
7309 
7310   if (Src.getOpcode() != N->getOpcode())
7311     return SDValue();
7312 
7313   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7314       !isa<ConstantSDNode>(Src.getOperand(1)))
7315     return SDValue();
7316 
7317   unsigned ShAmt1 = N->getConstantOperandVal(1);
7318   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7319   Src = Src.getOperand(0);
7320 
7321   unsigned CombinedShAmt;
7322   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
7323     CombinedShAmt = ShAmt1 | ShAmt2;
7324   else
7325     CombinedShAmt = ShAmt1 ^ ShAmt2;
7326 
7327   if (CombinedShAmt == 0)
7328     return Src;
7329 
7330   SDLoc DL(N);
7331   return DAG.getNode(
7332       N->getOpcode(), DL, N->getValueType(0), Src,
7333       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7334 }
7335 
7336 // Combine a constant select operand into its use:
7337 //
7338 // (and (select cond, -1, c), x)
7339 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7340 // (or  (select cond, 0, c), x)
7341 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7342 // (xor (select cond, 0, c), x)
7343 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7344 // (add (select cond, 0, c), x)
7345 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7346 // (sub x, (select cond, 0, c))
7347 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7348 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7349                                    SelectionDAG &DAG, bool AllOnes) {
7350   EVT VT = N->getValueType(0);
7351 
7352   // Skip vectors.
7353   if (VT.isVector())
7354     return SDValue();
7355 
7356   if ((Slct.getOpcode() != ISD::SELECT &&
7357        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7358       !Slct.hasOneUse())
7359     return SDValue();
7360 
7361   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7362     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7363   };
7364 
7365   bool SwapSelectOps;
7366   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7367   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7368   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7369   SDValue NonConstantVal;
7370   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7371     SwapSelectOps = false;
7372     NonConstantVal = FalseVal;
7373   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7374     SwapSelectOps = true;
7375     NonConstantVal = TrueVal;
7376   } else
7377     return SDValue();
7378 
7379   // Slct is now know to be the desired identity constant when CC is true.
7380   TrueVal = OtherOp;
7381   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7382   // Unless SwapSelectOps says the condition should be false.
7383   if (SwapSelectOps)
7384     std::swap(TrueVal, FalseVal);
7385 
7386   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7387     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7388                        {Slct.getOperand(0), Slct.getOperand(1),
7389                         Slct.getOperand(2), TrueVal, FalseVal});
7390 
7391   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7392                      {Slct.getOperand(0), TrueVal, FalseVal});
7393 }
7394 
7395 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7396 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7397                                               bool AllOnes) {
7398   SDValue N0 = N->getOperand(0);
7399   SDValue N1 = N->getOperand(1);
7400   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7401     return Result;
7402   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7403     return Result;
7404   return SDValue();
7405 }
7406 
7407 // Transform (add (mul x, c0), c1) ->
7408 //           (add (mul (add x, c1/c0), c0), c1%c0).
7409 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7410 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7411 // to an infinite loop in DAGCombine if transformed.
7412 // Or transform (add (mul x, c0), c1) ->
7413 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7414 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7415 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7416 // lead to an infinite loop in DAGCombine if transformed.
7417 // Or transform (add (mul x, c0), c1) ->
7418 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7419 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7420 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7421 // lead to an infinite loop in DAGCombine if transformed.
7422 // Or transform (add (mul x, c0), c1) ->
7423 //              (mul (add x, c1/c0), c0).
7424 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7425 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7426                                      const RISCVSubtarget &Subtarget) {
7427   // Skip for vector types and larger types.
7428   EVT VT = N->getValueType(0);
7429   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7430     return SDValue();
7431   // The first operand node must be a MUL and has no other use.
7432   SDValue N0 = N->getOperand(0);
7433   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7434     return SDValue();
7435   // Check if c0 and c1 match above conditions.
7436   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7437   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7438   if (!N0C || !N1C)
7439     return SDValue();
7440   // If N0C has multiple uses it's possible one of the cases in
7441   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7442   // in an infinite loop.
7443   if (!N0C->hasOneUse())
7444     return SDValue();
7445   int64_t C0 = N0C->getSExtValue();
7446   int64_t C1 = N1C->getSExtValue();
7447   int64_t CA, CB;
7448   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7449     return SDValue();
7450   // Search for proper CA (non-zero) and CB that both are simm12.
7451   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7452       !isInt<12>(C0 * (C1 / C0))) {
7453     CA = C1 / C0;
7454     CB = C1 % C0;
7455   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7456              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7457     CA = C1 / C0 + 1;
7458     CB = C1 % C0 - C0;
7459   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7460              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7461     CA = C1 / C0 - 1;
7462     CB = C1 % C0 + C0;
7463   } else
7464     return SDValue();
7465   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7466   SDLoc DL(N);
7467   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7468                              DAG.getConstant(CA, DL, VT));
7469   SDValue New1 =
7470       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7471   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7472 }
7473 
7474 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7475                                  const RISCVSubtarget &Subtarget) {
7476   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7477     return V;
7478   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7479     return V;
7480   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7481   //      (select lhs, rhs, cc, x, (add x, y))
7482   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7483 }
7484 
7485 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7486   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7487   //      (select lhs, rhs, cc, x, (sub x, y))
7488   SDValue N0 = N->getOperand(0);
7489   SDValue N1 = N->getOperand(1);
7490   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7491 }
7492 
7493 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7494   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7495   //      (select lhs, rhs, cc, x, (and x, y))
7496   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7497 }
7498 
7499 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7500                                 const RISCVSubtarget &Subtarget) {
7501   if (Subtarget.hasStdExtZbp()) {
7502     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7503       return GREV;
7504     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7505       return GORC;
7506     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7507       return SHFL;
7508   }
7509 
7510   // fold (or (select cond, 0, y), x) ->
7511   //      (select cond, x, (or x, y))
7512   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7513 }
7514 
7515 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7516   // fold (xor (select cond, 0, y), x) ->
7517   //      (select cond, x, (xor x, y))
7518   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7519 }
7520 
7521 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7522 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7523 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7524 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7525 // ADDW/SUBW/MULW.
7526 static SDValue performANY_EXTENDCombine(SDNode *N,
7527                                         TargetLowering::DAGCombinerInfo &DCI,
7528                                         const RISCVSubtarget &Subtarget) {
7529   if (!Subtarget.is64Bit())
7530     return SDValue();
7531 
7532   SelectionDAG &DAG = DCI.DAG;
7533 
7534   SDValue Src = N->getOperand(0);
7535   EVT VT = N->getValueType(0);
7536   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7537     return SDValue();
7538 
7539   // The opcode must be one that can implicitly sign_extend.
7540   // FIXME: Additional opcodes.
7541   switch (Src.getOpcode()) {
7542   default:
7543     return SDValue();
7544   case ISD::MUL:
7545     if (!Subtarget.hasStdExtM())
7546       return SDValue();
7547     LLVM_FALLTHROUGH;
7548   case ISD::ADD:
7549   case ISD::SUB:
7550     break;
7551   }
7552 
7553   // Only handle cases where the result is used by a CopyToReg. That likely
7554   // means the value is a liveout of the basic block. This helps prevent
7555   // infinite combine loops like PR51206.
7556   if (none_of(N->uses(),
7557               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7558     return SDValue();
7559 
7560   SmallVector<SDNode *, 4> SetCCs;
7561   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7562                             UE = Src.getNode()->use_end();
7563        UI != UE; ++UI) {
7564     SDNode *User = *UI;
7565     if (User == N)
7566       continue;
7567     if (UI.getUse().getResNo() != Src.getResNo())
7568       continue;
7569     // All i32 setccs are legalized by sign extending operands.
7570     if (User->getOpcode() == ISD::SETCC) {
7571       SetCCs.push_back(User);
7572       continue;
7573     }
7574     // We don't know if we can extend this user.
7575     break;
7576   }
7577 
7578   // If we don't have any SetCCs, this isn't worthwhile.
7579   if (SetCCs.empty())
7580     return SDValue();
7581 
7582   SDLoc DL(N);
7583   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7584   DCI.CombineTo(N, SExt);
7585 
7586   // Promote all the setccs.
7587   for (SDNode *SetCC : SetCCs) {
7588     SmallVector<SDValue, 4> Ops;
7589 
7590     for (unsigned j = 0; j != 2; ++j) {
7591       SDValue SOp = SetCC->getOperand(j);
7592       if (SOp == Src)
7593         Ops.push_back(SExt);
7594       else
7595         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7596     }
7597 
7598     Ops.push_back(SetCC->getOperand(2));
7599     DCI.CombineTo(SetCC,
7600                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7601   }
7602   return SDValue(N, 0);
7603 }
7604 
7605 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7606 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7607 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7608                                              bool Commute = false) {
7609   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7610           N->getOpcode() == RISCVISD::SUB_VL) &&
7611          "Unexpected opcode");
7612   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7613   SDValue Op0 = N->getOperand(0);
7614   SDValue Op1 = N->getOperand(1);
7615   if (Commute)
7616     std::swap(Op0, Op1);
7617 
7618   MVT VT = N->getSimpleValueType(0);
7619 
7620   // Determine the narrow size for a widening add/sub.
7621   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7622   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7623                                   VT.getVectorElementCount());
7624 
7625   SDValue Mask = N->getOperand(2);
7626   SDValue VL = N->getOperand(3);
7627 
7628   SDLoc DL(N);
7629 
7630   // If the RHS is a sext or zext, we can form a widening op.
7631   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7632        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7633       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7634     unsigned ExtOpc = Op1.getOpcode();
7635     Op1 = Op1.getOperand(0);
7636     // Re-introduce narrower extends if needed.
7637     if (Op1.getValueType() != NarrowVT)
7638       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7639 
7640     unsigned WOpc;
7641     if (ExtOpc == RISCVISD::VSEXT_VL)
7642       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7643     else
7644       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7645 
7646     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7647   }
7648 
7649   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7650   // sext/zext?
7651 
7652   return SDValue();
7653 }
7654 
7655 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7656 // vwsub(u).vv/vx.
7657 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7658   SDValue Op0 = N->getOperand(0);
7659   SDValue Op1 = N->getOperand(1);
7660   SDValue Mask = N->getOperand(2);
7661   SDValue VL = N->getOperand(3);
7662 
7663   MVT VT = N->getSimpleValueType(0);
7664   MVT NarrowVT = Op1.getSimpleValueType();
7665   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7666 
7667   unsigned VOpc;
7668   switch (N->getOpcode()) {
7669   default: llvm_unreachable("Unexpected opcode");
7670   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7671   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
7672   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
7673   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
7674   }
7675 
7676   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7677                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
7678 
7679   SDLoc DL(N);
7680 
7681   // If the LHS is a sext or zext, we can narrow this op to the same size as
7682   // the RHS.
7683   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
7684        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
7685       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
7686     unsigned ExtOpc = Op0.getOpcode();
7687     Op0 = Op0.getOperand(0);
7688     // Re-introduce narrower extends if needed.
7689     if (Op0.getValueType() != NarrowVT)
7690       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7691     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
7692   }
7693 
7694   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7695                N->getOpcode() == RISCVISD::VWADDU_W_VL;
7696 
7697   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
7698   // to commute and use a vwadd(u).vx instead.
7699   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
7700       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
7701     Op0 = Op0.getOperand(1);
7702 
7703     // See if have enough sign bits or zero bits in the scalar to use a
7704     // widening add/sub by splatting to smaller element size.
7705     unsigned EltBits = VT.getScalarSizeInBits();
7706     unsigned ScalarBits = Op0.getValueSizeInBits();
7707     // Make sure we're getting all element bits from the scalar register.
7708     // FIXME: Support implicit sign extension of vmv.v.x?
7709     if (ScalarBits < EltBits)
7710       return SDValue();
7711 
7712     if (IsSigned) {
7713       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
7714         return SDValue();
7715     } else {
7716       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7717       if (!DAG.MaskedValueIsZero(Op0, Mask))
7718         return SDValue();
7719     }
7720 
7721     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7722                       DAG.getUNDEF(NarrowVT), Op0, VL);
7723     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
7724   }
7725 
7726   return SDValue();
7727 }
7728 
7729 // Try to form VWMUL, VWMULU or VWMULSU.
7730 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7731 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7732                                        bool Commute) {
7733   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7734   SDValue Op0 = N->getOperand(0);
7735   SDValue Op1 = N->getOperand(1);
7736   if (Commute)
7737     std::swap(Op0, Op1);
7738 
7739   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7740   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7741   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7742   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7743     return SDValue();
7744 
7745   SDValue Mask = N->getOperand(2);
7746   SDValue VL = N->getOperand(3);
7747 
7748   // Make sure the mask and VL match.
7749   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7750     return SDValue();
7751 
7752   MVT VT = N->getSimpleValueType(0);
7753 
7754   // Determine the narrow size for a widening multiply.
7755   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7756   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7757                                   VT.getVectorElementCount());
7758 
7759   SDLoc DL(N);
7760 
7761   // See if the other operand is the same opcode.
7762   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7763     if (!Op1.hasOneUse())
7764       return SDValue();
7765 
7766     // Make sure the mask and VL match.
7767     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7768       return SDValue();
7769 
7770     Op1 = Op1.getOperand(0);
7771   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7772     // The operand is a splat of a scalar.
7773 
7774     // The pasthru must be undef for tail agnostic
7775     if (!Op1.getOperand(0).isUndef())
7776       return SDValue();
7777     // The VL must be the same.
7778     if (Op1.getOperand(2) != VL)
7779       return SDValue();
7780 
7781     // Get the scalar value.
7782     Op1 = Op1.getOperand(1);
7783 
7784     // See if have enough sign bits or zero bits in the scalar to use a
7785     // widening multiply by splatting to smaller element size.
7786     unsigned EltBits = VT.getScalarSizeInBits();
7787     unsigned ScalarBits = Op1.getValueSizeInBits();
7788     // Make sure we're getting all element bits from the scalar register.
7789     // FIXME: Support implicit sign extension of vmv.v.x?
7790     if (ScalarBits < EltBits)
7791       return SDValue();
7792 
7793     // If the LHS is a sign extend, try to use vwmul.
7794     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
7795       // Can use vwmul.
7796     } else {
7797       // Otherwise try to use vwmulu or vwmulsu.
7798       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7799       if (DAG.MaskedValueIsZero(Op1, Mask))
7800         IsVWMULSU = IsSignExt;
7801       else
7802         return SDValue();
7803     }
7804 
7805     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7806                       DAG.getUNDEF(NarrowVT), Op1, VL);
7807   } else
7808     return SDValue();
7809 
7810   Op0 = Op0.getOperand(0);
7811 
7812   // Re-introduce narrower extends if needed.
7813   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7814   if (Op0.getValueType() != NarrowVT)
7815     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7816   // vwmulsu requires second operand to be zero extended.
7817   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
7818   if (Op1.getValueType() != NarrowVT)
7819     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7820 
7821   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
7822   if (!IsVWMULSU)
7823     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7824   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7825 }
7826 
7827 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7828   switch (Op.getOpcode()) {
7829   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7830   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7831   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7832   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7833   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7834   }
7835 
7836   return RISCVFPRndMode::Invalid;
7837 }
7838 
7839 // Fold
7840 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7841 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7842 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7843 //   (fp_to_int (fceil X))      -> fcvt X, rup
7844 //   (fp_to_int (fround X))     -> fcvt X, rmm
7845 static SDValue performFP_TO_INTCombine(SDNode *N,
7846                                        TargetLowering::DAGCombinerInfo &DCI,
7847                                        const RISCVSubtarget &Subtarget) {
7848   SelectionDAG &DAG = DCI.DAG;
7849   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7850   MVT XLenVT = Subtarget.getXLenVT();
7851 
7852   // Only handle XLen or i32 types. Other types narrower than XLen will
7853   // eventually be legalized to XLenVT.
7854   EVT VT = N->getValueType(0);
7855   if (VT != MVT::i32 && VT != XLenVT)
7856     return SDValue();
7857 
7858   SDValue Src = N->getOperand(0);
7859 
7860   // Ensure the FP type is also legal.
7861   if (!TLI.isTypeLegal(Src.getValueType()))
7862     return SDValue();
7863 
7864   // Don't do this for f16 with Zfhmin and not Zfh.
7865   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7866     return SDValue();
7867 
7868   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7869   if (FRM == RISCVFPRndMode::Invalid)
7870     return SDValue();
7871 
7872   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
7873 
7874   unsigned Opc;
7875   if (VT == XLenVT)
7876     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7877   else
7878     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7879 
7880   SDLoc DL(N);
7881   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
7882                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7883   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
7884 }
7885 
7886 // Fold
7887 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
7888 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
7889 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
7890 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
7891 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
7892 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
7893                                        TargetLowering::DAGCombinerInfo &DCI,
7894                                        const RISCVSubtarget &Subtarget) {
7895   SelectionDAG &DAG = DCI.DAG;
7896   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7897   MVT XLenVT = Subtarget.getXLenVT();
7898 
7899   // Only handle XLen types. Other types narrower than XLen will eventually be
7900   // legalized to XLenVT.
7901   EVT DstVT = N->getValueType(0);
7902   if (DstVT != XLenVT)
7903     return SDValue();
7904 
7905   SDValue Src = N->getOperand(0);
7906 
7907   // Ensure the FP type is also legal.
7908   if (!TLI.isTypeLegal(Src.getValueType()))
7909     return SDValue();
7910 
7911   // Don't do this for f16 with Zfhmin and not Zfh.
7912   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7913     return SDValue();
7914 
7915   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7916 
7917   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7918   if (FRM == RISCVFPRndMode::Invalid)
7919     return SDValue();
7920 
7921   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
7922 
7923   unsigned Opc;
7924   if (SatVT == DstVT)
7925     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7926   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
7927     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7928   else
7929     return SDValue();
7930   // FIXME: Support other SatVTs by clamping before or after the conversion.
7931 
7932   Src = Src.getOperand(0);
7933 
7934   SDLoc DL(N);
7935   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
7936                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7937 
7938   // RISCV FP-to-int conversions saturate to the destination register size, but
7939   // don't produce 0 for nan.
7940   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
7941   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
7942 }
7943 
7944 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7945                                                DAGCombinerInfo &DCI) const {
7946   SelectionDAG &DAG = DCI.DAG;
7947 
7948   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7949   // bits are demanded. N will be added to the Worklist if it was not deleted.
7950   // Caller should return SDValue(N, 0) if this returns true.
7951   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7952     SDValue Op = N->getOperand(OpNo);
7953     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7954     if (!SimplifyDemandedBits(Op, Mask, DCI))
7955       return false;
7956 
7957     if (N->getOpcode() != ISD::DELETED_NODE)
7958       DCI.AddToWorklist(N);
7959     return true;
7960   };
7961 
7962   switch (N->getOpcode()) {
7963   default:
7964     break;
7965   case RISCVISD::SplitF64: {
7966     SDValue Op0 = N->getOperand(0);
7967     // If the input to SplitF64 is just BuildPairF64 then the operation is
7968     // redundant. Instead, use BuildPairF64's operands directly.
7969     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
7970       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
7971 
7972     if (Op0->isUndef()) {
7973       SDValue Lo = DAG.getUNDEF(MVT::i32);
7974       SDValue Hi = DAG.getUNDEF(MVT::i32);
7975       return DCI.CombineTo(N, Lo, Hi);
7976     }
7977 
7978     SDLoc DL(N);
7979 
7980     // It's cheaper to materialise two 32-bit integers than to load a double
7981     // from the constant pool and transfer it to integer registers through the
7982     // stack.
7983     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
7984       APInt V = C->getValueAPF().bitcastToAPInt();
7985       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
7986       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
7987       return DCI.CombineTo(N, Lo, Hi);
7988     }
7989 
7990     // This is a target-specific version of a DAGCombine performed in
7991     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7992     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7993     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7994     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7995         !Op0.getNode()->hasOneUse())
7996       break;
7997     SDValue NewSplitF64 =
7998         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
7999                     Op0.getOperand(0));
8000     SDValue Lo = NewSplitF64.getValue(0);
8001     SDValue Hi = NewSplitF64.getValue(1);
8002     APInt SignBit = APInt::getSignMask(32);
8003     if (Op0.getOpcode() == ISD::FNEG) {
8004       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8005                                   DAG.getConstant(SignBit, DL, MVT::i32));
8006       return DCI.CombineTo(N, Lo, NewHi);
8007     }
8008     assert(Op0.getOpcode() == ISD::FABS);
8009     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8010                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8011     return DCI.CombineTo(N, Lo, NewHi);
8012   }
8013   case RISCVISD::SLLW:
8014   case RISCVISD::SRAW:
8015   case RISCVISD::SRLW:
8016   case RISCVISD::ROLW:
8017   case RISCVISD::RORW: {
8018     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8019     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8020         SimplifyDemandedLowBitsHelper(1, 5))
8021       return SDValue(N, 0);
8022 
8023     return combineROTR_ROTL_RORW_ROLW(N, DAG);
8024   }
8025   case ISD::ROTR:
8026   case ISD::ROTL:
8027     return combineROTR_ROTL_RORW_ROLW(N, DAG);
8028   case RISCVISD::CLZW:
8029   case RISCVISD::CTZW: {
8030     // Only the lower 32 bits of the first operand are read
8031     if (SimplifyDemandedLowBitsHelper(0, 32))
8032       return SDValue(N, 0);
8033     break;
8034   }
8035   case RISCVISD::GREV:
8036   case RISCVISD::GORC: {
8037     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8038     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8039     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8040     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8041       return SDValue(N, 0);
8042 
8043     return combineGREVI_GORCI(N, DAG);
8044   }
8045   case RISCVISD::GREVW:
8046   case RISCVISD::GORCW: {
8047     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8048     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8049         SimplifyDemandedLowBitsHelper(1, 5))
8050       return SDValue(N, 0);
8051 
8052     return combineGREVI_GORCI(N, DAG);
8053   }
8054   case RISCVISD::SHFL:
8055   case RISCVISD::UNSHFL: {
8056     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8057     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8058     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8059     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8060       return SDValue(N, 0);
8061 
8062     break;
8063   }
8064   case RISCVISD::SHFLW:
8065   case RISCVISD::UNSHFLW: {
8066     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8067     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8068         SimplifyDemandedLowBitsHelper(1, 4))
8069       return SDValue(N, 0);
8070 
8071     break;
8072   }
8073   case RISCVISD::BCOMPRESSW:
8074   case RISCVISD::BDECOMPRESSW: {
8075     // Only the lower 32 bits of LHS and RHS are read.
8076     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8077         SimplifyDemandedLowBitsHelper(1, 32))
8078       return SDValue(N, 0);
8079 
8080     break;
8081   }
8082   case RISCVISD::FMV_X_ANYEXTH:
8083   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8084     SDLoc DL(N);
8085     SDValue Op0 = N->getOperand(0);
8086     MVT VT = N->getSimpleValueType(0);
8087     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8088     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8089     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8090     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8091          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8092         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8093          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8094       assert(Op0.getOperand(0).getValueType() == VT &&
8095              "Unexpected value type!");
8096       return Op0.getOperand(0);
8097     }
8098 
8099     // This is a target-specific version of a DAGCombine performed in
8100     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8101     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8102     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8103     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8104         !Op0.getNode()->hasOneUse())
8105       break;
8106     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8107     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8108     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8109     if (Op0.getOpcode() == ISD::FNEG)
8110       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8111                          DAG.getConstant(SignBit, DL, VT));
8112 
8113     assert(Op0.getOpcode() == ISD::FABS);
8114     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8115                        DAG.getConstant(~SignBit, DL, VT));
8116   }
8117   case ISD::ADD:
8118     return performADDCombine(N, DAG, Subtarget);
8119   case ISD::SUB:
8120     return performSUBCombine(N, DAG);
8121   case ISD::AND:
8122     return performANDCombine(N, DAG);
8123   case ISD::OR:
8124     return performORCombine(N, DAG, Subtarget);
8125   case ISD::XOR:
8126     return performXORCombine(N, DAG);
8127   case ISD::ANY_EXTEND:
8128     return performANY_EXTENDCombine(N, DCI, Subtarget);
8129   case ISD::ZERO_EXTEND:
8130     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8131     // type legalization. This is safe because fp_to_uint produces poison if
8132     // it overflows.
8133     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8134       SDValue Src = N->getOperand(0);
8135       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8136           isTypeLegal(Src.getOperand(0).getValueType()))
8137         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8138                            Src.getOperand(0));
8139       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8140           isTypeLegal(Src.getOperand(1).getValueType())) {
8141         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8142         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8143                                   Src.getOperand(0), Src.getOperand(1));
8144         DCI.CombineTo(N, Res);
8145         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8146         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8147         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8148       }
8149     }
8150     return SDValue();
8151   case RISCVISD::SELECT_CC: {
8152     // Transform
8153     SDValue LHS = N->getOperand(0);
8154     SDValue RHS = N->getOperand(1);
8155     SDValue TrueV = N->getOperand(3);
8156     SDValue FalseV = N->getOperand(4);
8157 
8158     // If the True and False values are the same, we don't need a select_cc.
8159     if (TrueV == FalseV)
8160       return TrueV;
8161 
8162     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8163     if (!ISD::isIntEqualitySetCC(CCVal))
8164       break;
8165 
8166     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8167     //      (select_cc X, Y, lt, trueV, falseV)
8168     // Sometimes the setcc is introduced after select_cc has been formed.
8169     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8170         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8171       // If we're looking for eq 0 instead of ne 0, we need to invert the
8172       // condition.
8173       bool Invert = CCVal == ISD::SETEQ;
8174       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8175       if (Invert)
8176         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8177 
8178       SDLoc DL(N);
8179       RHS = LHS.getOperand(1);
8180       LHS = LHS.getOperand(0);
8181       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8182 
8183       SDValue TargetCC = DAG.getCondCode(CCVal);
8184       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8185                          {LHS, RHS, TargetCC, TrueV, FalseV});
8186     }
8187 
8188     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8189     //      (select_cc X, Y, eq/ne, trueV, falseV)
8190     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8191       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8192                          {LHS.getOperand(0), LHS.getOperand(1),
8193                           N->getOperand(2), TrueV, FalseV});
8194     // (select_cc X, 1, setne, trueV, falseV) ->
8195     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8196     // This can occur when legalizing some floating point comparisons.
8197     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8198     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8199       SDLoc DL(N);
8200       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8201       SDValue TargetCC = DAG.getCondCode(CCVal);
8202       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8203       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8204                          {LHS, RHS, TargetCC, TrueV, FalseV});
8205     }
8206 
8207     break;
8208   }
8209   case RISCVISD::BR_CC: {
8210     SDValue LHS = N->getOperand(1);
8211     SDValue RHS = N->getOperand(2);
8212     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8213     if (!ISD::isIntEqualitySetCC(CCVal))
8214       break;
8215 
8216     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8217     //      (br_cc X, Y, lt, dest)
8218     // Sometimes the setcc is introduced after br_cc has been formed.
8219     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8220         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8221       // If we're looking for eq 0 instead of ne 0, we need to invert the
8222       // condition.
8223       bool Invert = CCVal == ISD::SETEQ;
8224       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8225       if (Invert)
8226         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8227 
8228       SDLoc DL(N);
8229       RHS = LHS.getOperand(1);
8230       LHS = LHS.getOperand(0);
8231       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8232 
8233       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8234                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8235                          N->getOperand(4));
8236     }
8237 
8238     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8239     //      (br_cc X, Y, eq/ne, trueV, falseV)
8240     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8241       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8242                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8243                          N->getOperand(3), N->getOperand(4));
8244 
8245     // (br_cc X, 1, setne, br_cc) ->
8246     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8247     // This can occur when legalizing some floating point comparisons.
8248     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8249     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8250       SDLoc DL(N);
8251       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8252       SDValue TargetCC = DAG.getCondCode(CCVal);
8253       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8254       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8255                          N->getOperand(0), LHS, RHS, TargetCC,
8256                          N->getOperand(4));
8257     }
8258     break;
8259   }
8260   case ISD::FP_TO_SINT:
8261   case ISD::FP_TO_UINT:
8262     return performFP_TO_INTCombine(N, DCI, Subtarget);
8263   case ISD::FP_TO_SINT_SAT:
8264   case ISD::FP_TO_UINT_SAT:
8265     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8266   case ISD::FCOPYSIGN: {
8267     EVT VT = N->getValueType(0);
8268     if (!VT.isVector())
8269       break;
8270     // There is a form of VFSGNJ which injects the negated sign of its second
8271     // operand. Try and bubble any FNEG up after the extend/round to produce
8272     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8273     // TRUNC=1.
8274     SDValue In2 = N->getOperand(1);
8275     // Avoid cases where the extend/round has multiple uses, as duplicating
8276     // those is typically more expensive than removing a fneg.
8277     if (!In2.hasOneUse())
8278       break;
8279     if (In2.getOpcode() != ISD::FP_EXTEND &&
8280         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8281       break;
8282     In2 = In2.getOperand(0);
8283     if (In2.getOpcode() != ISD::FNEG)
8284       break;
8285     SDLoc DL(N);
8286     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8287     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8288                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8289   }
8290   case ISD::MGATHER:
8291   case ISD::MSCATTER:
8292   case ISD::VP_GATHER:
8293   case ISD::VP_SCATTER: {
8294     if (!DCI.isBeforeLegalize())
8295       break;
8296     SDValue Index, ScaleOp;
8297     bool IsIndexScaled = false;
8298     bool IsIndexSigned = false;
8299     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8300       Index = VPGSN->getIndex();
8301       ScaleOp = VPGSN->getScale();
8302       IsIndexScaled = VPGSN->isIndexScaled();
8303       IsIndexSigned = VPGSN->isIndexSigned();
8304     } else {
8305       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8306       Index = MGSN->getIndex();
8307       ScaleOp = MGSN->getScale();
8308       IsIndexScaled = MGSN->isIndexScaled();
8309       IsIndexSigned = MGSN->isIndexSigned();
8310     }
8311     EVT IndexVT = Index.getValueType();
8312     MVT XLenVT = Subtarget.getXLenVT();
8313     // RISCV indexed loads only support the "unsigned unscaled" addressing
8314     // mode, so anything else must be manually legalized.
8315     bool NeedsIdxLegalization =
8316         IsIndexScaled ||
8317         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8318     if (!NeedsIdxLegalization)
8319       break;
8320 
8321     SDLoc DL(N);
8322 
8323     // Any index legalization should first promote to XLenVT, so we don't lose
8324     // bits when scaling. This may create an illegal index type so we let
8325     // LLVM's legalization take care of the splitting.
8326     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8327     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8328       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8329       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8330                           DL, IndexVT, Index);
8331     }
8332 
8333     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8334     if (IsIndexScaled && Scale != 1) {
8335       // Manually scale the indices by the element size.
8336       // TODO: Sanitize the scale operand here?
8337       // TODO: For VP nodes, should we use VP_SHL here?
8338       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8339       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8340       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8341     }
8342 
8343     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8344     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8345       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8346                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8347                               VPGN->getScale(), VPGN->getMask(),
8348                               VPGN->getVectorLength()},
8349                              VPGN->getMemOperand(), NewIndexTy);
8350     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8351       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8352                               {VPSN->getChain(), VPSN->getValue(),
8353                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8354                                VPSN->getMask(), VPSN->getVectorLength()},
8355                               VPSN->getMemOperand(), NewIndexTy);
8356     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8357       return DAG.getMaskedGather(
8358           N->getVTList(), MGN->getMemoryVT(), DL,
8359           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8360            MGN->getBasePtr(), Index, MGN->getScale()},
8361           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8362     const auto *MSN = cast<MaskedScatterSDNode>(N);
8363     return DAG.getMaskedScatter(
8364         N->getVTList(), MSN->getMemoryVT(), DL,
8365         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8366          Index, MSN->getScale()},
8367         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8368   }
8369   case RISCVISD::SRA_VL:
8370   case RISCVISD::SRL_VL:
8371   case RISCVISD::SHL_VL: {
8372     SDValue ShAmt = N->getOperand(1);
8373     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8374       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8375       SDLoc DL(N);
8376       SDValue VL = N->getOperand(3);
8377       EVT VT = N->getValueType(0);
8378       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8379                           ShAmt.getOperand(1), VL);
8380       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8381                          N->getOperand(2), N->getOperand(3));
8382     }
8383     break;
8384   }
8385   case ISD::SRA:
8386   case ISD::SRL:
8387   case ISD::SHL: {
8388     SDValue ShAmt = N->getOperand(1);
8389     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8390       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8391       SDLoc DL(N);
8392       EVT VT = N->getValueType(0);
8393       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8394                           ShAmt.getOperand(1),
8395                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8396       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8397     }
8398     break;
8399   }
8400   case RISCVISD::ADD_VL:
8401     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8402       return V;
8403     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8404   case RISCVISD::SUB_VL:
8405     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8406   case RISCVISD::VWADD_W_VL:
8407   case RISCVISD::VWADDU_W_VL:
8408   case RISCVISD::VWSUB_W_VL:
8409   case RISCVISD::VWSUBU_W_VL:
8410     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8411   case RISCVISD::MUL_VL:
8412     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8413       return V;
8414     // Mul is commutative.
8415     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8416   case ISD::STORE: {
8417     auto *Store = cast<StoreSDNode>(N);
8418     SDValue Val = Store->getValue();
8419     // Combine store of vmv.x.s to vse with VL of 1.
8420     // FIXME: Support FP.
8421     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8422       SDValue Src = Val.getOperand(0);
8423       EVT VecVT = Src.getValueType();
8424       EVT MemVT = Store->getMemoryVT();
8425       // The memory VT and the element type must match.
8426       if (VecVT.getVectorElementType() == MemVT) {
8427         SDLoc DL(N);
8428         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8429         return DAG.getStoreVP(
8430             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8431             DAG.getConstant(1, DL, MaskVT),
8432             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8433             Store->getMemOperand(), Store->getAddressingMode(),
8434             Store->isTruncatingStore(), /*IsCompress*/ false);
8435       }
8436     }
8437 
8438     break;
8439   }
8440   case ISD::SPLAT_VECTOR: {
8441     EVT VT = N->getValueType(0);
8442     // Only perform this combine on legal MVT types.
8443     if (!isTypeLegal(VT))
8444       break;
8445     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8446                                          DAG, Subtarget))
8447       return Gather;
8448     break;
8449   }
8450   case RISCVISD::VMV_V_X_VL: {
8451     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8452     // scalar input.
8453     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8454     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8455     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8456       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8457         return SDValue(N, 0);
8458 
8459     break;
8460   }
8461   }
8462 
8463   return SDValue();
8464 }
8465 
8466 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8467     const SDNode *N, CombineLevel Level) const {
8468   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8469   // materialised in fewer instructions than `(OP _, c1)`:
8470   //
8471   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8472   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8473   SDValue N0 = N->getOperand(0);
8474   EVT Ty = N0.getValueType();
8475   if (Ty.isScalarInteger() &&
8476       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8477     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8478     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8479     if (C1 && C2) {
8480       const APInt &C1Int = C1->getAPIntValue();
8481       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8482 
8483       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8484       // and the combine should happen, to potentially allow further combines
8485       // later.
8486       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8487           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8488         return true;
8489 
8490       // We can materialise `c1` in an add immediate, so it's "free", and the
8491       // combine should be prevented.
8492       if (C1Int.getMinSignedBits() <= 64 &&
8493           isLegalAddImmediate(C1Int.getSExtValue()))
8494         return false;
8495 
8496       // Neither constant will fit into an immediate, so find materialisation
8497       // costs.
8498       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8499                                               Subtarget.getFeatureBits(),
8500                                               /*CompressionCost*/true);
8501       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8502           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8503           /*CompressionCost*/true);
8504 
8505       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8506       // combine should be prevented.
8507       if (C1Cost < ShiftedC1Cost)
8508         return false;
8509     }
8510   }
8511   return true;
8512 }
8513 
8514 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8515     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8516     TargetLoweringOpt &TLO) const {
8517   // Delay this optimization as late as possible.
8518   if (!TLO.LegalOps)
8519     return false;
8520 
8521   EVT VT = Op.getValueType();
8522   if (VT.isVector())
8523     return false;
8524 
8525   // Only handle AND for now.
8526   if (Op.getOpcode() != ISD::AND)
8527     return false;
8528 
8529   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8530   if (!C)
8531     return false;
8532 
8533   const APInt &Mask = C->getAPIntValue();
8534 
8535   // Clear all non-demanded bits initially.
8536   APInt ShrunkMask = Mask & DemandedBits;
8537 
8538   // Try to make a smaller immediate by setting undemanded bits.
8539 
8540   APInt ExpandedMask = Mask | ~DemandedBits;
8541 
8542   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8543     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8544   };
8545   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8546     if (NewMask == Mask)
8547       return true;
8548     SDLoc DL(Op);
8549     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8550     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8551     return TLO.CombineTo(Op, NewOp);
8552   };
8553 
8554   // If the shrunk mask fits in sign extended 12 bits, let the target
8555   // independent code apply it.
8556   if (ShrunkMask.isSignedIntN(12))
8557     return false;
8558 
8559   // Preserve (and X, 0xffff) when zext.h is supported.
8560   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8561     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8562     if (IsLegalMask(NewMask))
8563       return UseMask(NewMask);
8564   }
8565 
8566   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8567   if (VT == MVT::i64) {
8568     APInt NewMask = APInt(64, 0xffffffff);
8569     if (IsLegalMask(NewMask))
8570       return UseMask(NewMask);
8571   }
8572 
8573   // For the remaining optimizations, we need to be able to make a negative
8574   // number through a combination of mask and undemanded bits.
8575   if (!ExpandedMask.isNegative())
8576     return false;
8577 
8578   // What is the fewest number of bits we need to represent the negative number.
8579   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8580 
8581   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8582   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8583   APInt NewMask = ShrunkMask;
8584   if (MinSignedBits <= 12)
8585     NewMask.setBitsFrom(11);
8586   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8587     NewMask.setBitsFrom(31);
8588   else
8589     return false;
8590 
8591   // Check that our new mask is a subset of the demanded mask.
8592   assert(IsLegalMask(NewMask));
8593   return UseMask(NewMask);
8594 }
8595 
8596 static void computeGREV(APInt &Src, unsigned ShAmt) {
8597   ShAmt &= Src.getBitWidth() - 1;
8598   uint64_t x = Src.getZExtValue();
8599   if (ShAmt & 1)
8600     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8601   if (ShAmt & 2)
8602     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8603   if (ShAmt & 4)
8604     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8605   if (ShAmt & 8)
8606     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8607   if (ShAmt & 16)
8608     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8609   if (ShAmt & 32)
8610     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8611   Src = x;
8612 }
8613 
8614 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8615                                                         KnownBits &Known,
8616                                                         const APInt &DemandedElts,
8617                                                         const SelectionDAG &DAG,
8618                                                         unsigned Depth) const {
8619   unsigned BitWidth = Known.getBitWidth();
8620   unsigned Opc = Op.getOpcode();
8621   assert((Opc >= ISD::BUILTIN_OP_END ||
8622           Opc == ISD::INTRINSIC_WO_CHAIN ||
8623           Opc == ISD::INTRINSIC_W_CHAIN ||
8624           Opc == ISD::INTRINSIC_VOID) &&
8625          "Should use MaskedValueIsZero if you don't know whether Op"
8626          " is a target node!");
8627 
8628   Known.resetAll();
8629   switch (Opc) {
8630   default: break;
8631   case RISCVISD::SELECT_CC: {
8632     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8633     // If we don't know any bits, early out.
8634     if (Known.isUnknown())
8635       break;
8636     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8637 
8638     // Only known if known in both the LHS and RHS.
8639     Known = KnownBits::commonBits(Known, Known2);
8640     break;
8641   }
8642   case RISCVISD::REMUW: {
8643     KnownBits Known2;
8644     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8645     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8646     // We only care about the lower 32 bits.
8647     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8648     // Restore the original width by sign extending.
8649     Known = Known.sext(BitWidth);
8650     break;
8651   }
8652   case RISCVISD::DIVUW: {
8653     KnownBits Known2;
8654     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8655     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8656     // We only care about the lower 32 bits.
8657     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8658     // Restore the original width by sign extending.
8659     Known = Known.sext(BitWidth);
8660     break;
8661   }
8662   case RISCVISD::CTZW: {
8663     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8664     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8665     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8666     Known.Zero.setBitsFrom(LowBits);
8667     break;
8668   }
8669   case RISCVISD::CLZW: {
8670     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8671     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8672     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8673     Known.Zero.setBitsFrom(LowBits);
8674     break;
8675   }
8676   case RISCVISD::GREV:
8677   case RISCVISD::GREVW: {
8678     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8679       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8680       if (Opc == RISCVISD::GREVW)
8681         Known = Known.trunc(32);
8682       unsigned ShAmt = C->getZExtValue();
8683       computeGREV(Known.Zero, ShAmt);
8684       computeGREV(Known.One, ShAmt);
8685       if (Opc == RISCVISD::GREVW)
8686         Known = Known.sext(BitWidth);
8687     }
8688     break;
8689   }
8690   case RISCVISD::READ_VLENB: {
8691     // If we know the minimum VLen from Zvl extensions, we can use that to
8692     // determine the trailing zeros of VLENB.
8693     // FIXME: Limit to 128 bit vectors until we have more testing.
8694     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
8695     if (MinVLenB > 0)
8696       Known.Zero.setLowBits(Log2_32(MinVLenB));
8697     // We assume VLENB is no more than 65536 / 8 bytes.
8698     Known.Zero.setBitsFrom(14);
8699     break;
8700   }
8701   case ISD::INTRINSIC_W_CHAIN:
8702   case ISD::INTRINSIC_WO_CHAIN: {
8703     unsigned IntNo =
8704         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8705     switch (IntNo) {
8706     default:
8707       // We can't do anything for most intrinsics.
8708       break;
8709     case Intrinsic::riscv_vsetvli:
8710     case Intrinsic::riscv_vsetvlimax:
8711     case Intrinsic::riscv_vsetvli_opt:
8712     case Intrinsic::riscv_vsetvlimax_opt:
8713       // Assume that VL output is positive and would fit in an int32_t.
8714       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8715       if (BitWidth >= 32)
8716         Known.Zero.setBitsFrom(31);
8717       break;
8718     }
8719     break;
8720   }
8721   }
8722 }
8723 
8724 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8725     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8726     unsigned Depth) const {
8727   switch (Op.getOpcode()) {
8728   default:
8729     break;
8730   case RISCVISD::SELECT_CC: {
8731     unsigned Tmp =
8732         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8733     if (Tmp == 1) return 1;  // Early out.
8734     unsigned Tmp2 =
8735         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8736     return std::min(Tmp, Tmp2);
8737   }
8738   case RISCVISD::SLLW:
8739   case RISCVISD::SRAW:
8740   case RISCVISD::SRLW:
8741   case RISCVISD::DIVW:
8742   case RISCVISD::DIVUW:
8743   case RISCVISD::REMUW:
8744   case RISCVISD::ROLW:
8745   case RISCVISD::RORW:
8746   case RISCVISD::GREVW:
8747   case RISCVISD::GORCW:
8748   case RISCVISD::FSLW:
8749   case RISCVISD::FSRW:
8750   case RISCVISD::SHFLW:
8751   case RISCVISD::UNSHFLW:
8752   case RISCVISD::BCOMPRESSW:
8753   case RISCVISD::BDECOMPRESSW:
8754   case RISCVISD::BFPW:
8755   case RISCVISD::FCVT_W_RV64:
8756   case RISCVISD::FCVT_WU_RV64:
8757   case RISCVISD::STRICT_FCVT_W_RV64:
8758   case RISCVISD::STRICT_FCVT_WU_RV64:
8759     // TODO: As the result is sign-extended, this is conservatively correct. A
8760     // more precise answer could be calculated for SRAW depending on known
8761     // bits in the shift amount.
8762     return 33;
8763   case RISCVISD::SHFL:
8764   case RISCVISD::UNSHFL: {
8765     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8766     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8767     // will stay within the upper 32 bits. If there were more than 32 sign bits
8768     // before there will be at least 33 sign bits after.
8769     if (Op.getValueType() == MVT::i64 &&
8770         isa<ConstantSDNode>(Op.getOperand(1)) &&
8771         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8772       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8773       if (Tmp > 32)
8774         return 33;
8775     }
8776     break;
8777   }
8778   case RISCVISD::VMV_X_S: {
8779     // The number of sign bits of the scalar result is computed by obtaining the
8780     // element type of the input vector operand, subtracting its width from the
8781     // XLEN, and then adding one (sign bit within the element type). If the
8782     // element type is wider than XLen, the least-significant XLEN bits are
8783     // taken.
8784     unsigned XLen = Subtarget.getXLen();
8785     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
8786     if (EltBits <= XLen)
8787       return XLen - EltBits + 1;
8788     break;
8789   }
8790   }
8791 
8792   return 1;
8793 }
8794 
8795 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
8796                                                   MachineBasicBlock *BB) {
8797   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
8798 
8799   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
8800   // Should the count have wrapped while it was being read, we need to try
8801   // again.
8802   // ...
8803   // read:
8804   // rdcycleh x3 # load high word of cycle
8805   // rdcycle  x2 # load low word of cycle
8806   // rdcycleh x4 # load high word of cycle
8807   // bne x3, x4, read # check if high word reads match, otherwise try again
8808   // ...
8809 
8810   MachineFunction &MF = *BB->getParent();
8811   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8812   MachineFunction::iterator It = ++BB->getIterator();
8813 
8814   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8815   MF.insert(It, LoopMBB);
8816 
8817   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8818   MF.insert(It, DoneMBB);
8819 
8820   // Transfer the remainder of BB and its successor edges to DoneMBB.
8821   DoneMBB->splice(DoneMBB->begin(), BB,
8822                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8823   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
8824 
8825   BB->addSuccessor(LoopMBB);
8826 
8827   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8828   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8829   Register LoReg = MI.getOperand(0).getReg();
8830   Register HiReg = MI.getOperand(1).getReg();
8831   DebugLoc DL = MI.getDebugLoc();
8832 
8833   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
8834   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
8835       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8836       .addReg(RISCV::X0);
8837   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
8838       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
8839       .addReg(RISCV::X0);
8840   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
8841       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8842       .addReg(RISCV::X0);
8843 
8844   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
8845       .addReg(HiReg)
8846       .addReg(ReadAgainReg)
8847       .addMBB(LoopMBB);
8848 
8849   LoopMBB->addSuccessor(LoopMBB);
8850   LoopMBB->addSuccessor(DoneMBB);
8851 
8852   MI.eraseFromParent();
8853 
8854   return DoneMBB;
8855 }
8856 
8857 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
8858                                              MachineBasicBlock *BB) {
8859   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
8860 
8861   MachineFunction &MF = *BB->getParent();
8862   DebugLoc DL = MI.getDebugLoc();
8863   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8864   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8865   Register LoReg = MI.getOperand(0).getReg();
8866   Register HiReg = MI.getOperand(1).getReg();
8867   Register SrcReg = MI.getOperand(2).getReg();
8868   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
8869   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8870 
8871   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
8872                           RI);
8873   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8874   MachineMemOperand *MMOLo =
8875       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
8876   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8877       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
8878   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
8879       .addFrameIndex(FI)
8880       .addImm(0)
8881       .addMemOperand(MMOLo);
8882   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
8883       .addFrameIndex(FI)
8884       .addImm(4)
8885       .addMemOperand(MMOHi);
8886   MI.eraseFromParent(); // The pseudo instruction is gone now.
8887   return BB;
8888 }
8889 
8890 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
8891                                                  MachineBasicBlock *BB) {
8892   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
8893          "Unexpected instruction");
8894 
8895   MachineFunction &MF = *BB->getParent();
8896   DebugLoc DL = MI.getDebugLoc();
8897   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8898   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8899   Register DstReg = MI.getOperand(0).getReg();
8900   Register LoReg = MI.getOperand(1).getReg();
8901   Register HiReg = MI.getOperand(2).getReg();
8902   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
8903   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8904 
8905   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8906   MachineMemOperand *MMOLo =
8907       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
8908   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8909       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
8910   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8911       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
8912       .addFrameIndex(FI)
8913       .addImm(0)
8914       .addMemOperand(MMOLo);
8915   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8916       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
8917       .addFrameIndex(FI)
8918       .addImm(4)
8919       .addMemOperand(MMOHi);
8920   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
8921   MI.eraseFromParent(); // The pseudo instruction is gone now.
8922   return BB;
8923 }
8924 
8925 static bool isSelectPseudo(MachineInstr &MI) {
8926   switch (MI.getOpcode()) {
8927   default:
8928     return false;
8929   case RISCV::Select_GPR_Using_CC_GPR:
8930   case RISCV::Select_FPR16_Using_CC_GPR:
8931   case RISCV::Select_FPR32_Using_CC_GPR:
8932   case RISCV::Select_FPR64_Using_CC_GPR:
8933     return true;
8934   }
8935 }
8936 
8937 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
8938                                         unsigned RelOpcode, unsigned EqOpcode,
8939                                         const RISCVSubtarget &Subtarget) {
8940   DebugLoc DL = MI.getDebugLoc();
8941   Register DstReg = MI.getOperand(0).getReg();
8942   Register Src1Reg = MI.getOperand(1).getReg();
8943   Register Src2Reg = MI.getOperand(2).getReg();
8944   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
8945   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
8946   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
8947 
8948   // Save the current FFLAGS.
8949   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
8950 
8951   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
8952                  .addReg(Src1Reg)
8953                  .addReg(Src2Reg);
8954   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8955     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
8956 
8957   // Restore the FFLAGS.
8958   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
8959       .addReg(SavedFFlags, RegState::Kill);
8960 
8961   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
8962   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
8963                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
8964                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
8965   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8966     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
8967 
8968   // Erase the pseudoinstruction.
8969   MI.eraseFromParent();
8970   return BB;
8971 }
8972 
8973 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
8974                                            MachineBasicBlock *BB,
8975                                            const RISCVSubtarget &Subtarget) {
8976   // To "insert" Select_* instructions, we actually have to insert the triangle
8977   // control-flow pattern.  The incoming instructions know the destination vreg
8978   // to set, the condition code register to branch on, the true/false values to
8979   // select between, and the condcode to use to select the appropriate branch.
8980   //
8981   // We produce the following control flow:
8982   //     HeadMBB
8983   //     |  \
8984   //     |  IfFalseMBB
8985   //     | /
8986   //    TailMBB
8987   //
8988   // When we find a sequence of selects we attempt to optimize their emission
8989   // by sharing the control flow. Currently we only handle cases where we have
8990   // multiple selects with the exact same condition (same LHS, RHS and CC).
8991   // The selects may be interleaved with other instructions if the other
8992   // instructions meet some requirements we deem safe:
8993   // - They are debug instructions. Otherwise,
8994   // - They do not have side-effects, do not access memory and their inputs do
8995   //   not depend on the results of the select pseudo-instructions.
8996   // The TrueV/FalseV operands of the selects cannot depend on the result of
8997   // previous selects in the sequence.
8998   // These conditions could be further relaxed. See the X86 target for a
8999   // related approach and more information.
9000   Register LHS = MI.getOperand(1).getReg();
9001   Register RHS = MI.getOperand(2).getReg();
9002   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9003 
9004   SmallVector<MachineInstr *, 4> SelectDebugValues;
9005   SmallSet<Register, 4> SelectDests;
9006   SelectDests.insert(MI.getOperand(0).getReg());
9007 
9008   MachineInstr *LastSelectPseudo = &MI;
9009 
9010   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9011        SequenceMBBI != E; ++SequenceMBBI) {
9012     if (SequenceMBBI->isDebugInstr())
9013       continue;
9014     else if (isSelectPseudo(*SequenceMBBI)) {
9015       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9016           SequenceMBBI->getOperand(2).getReg() != RHS ||
9017           SequenceMBBI->getOperand(3).getImm() != CC ||
9018           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9019           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9020         break;
9021       LastSelectPseudo = &*SequenceMBBI;
9022       SequenceMBBI->collectDebugValues(SelectDebugValues);
9023       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9024     } else {
9025       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9026           SequenceMBBI->mayLoadOrStore())
9027         break;
9028       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9029             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9030           }))
9031         break;
9032     }
9033   }
9034 
9035   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9036   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9037   DebugLoc DL = MI.getDebugLoc();
9038   MachineFunction::iterator I = ++BB->getIterator();
9039 
9040   MachineBasicBlock *HeadMBB = BB;
9041   MachineFunction *F = BB->getParent();
9042   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9043   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9044 
9045   F->insert(I, IfFalseMBB);
9046   F->insert(I, TailMBB);
9047 
9048   // Transfer debug instructions associated with the selects to TailMBB.
9049   for (MachineInstr *DebugInstr : SelectDebugValues) {
9050     TailMBB->push_back(DebugInstr->removeFromParent());
9051   }
9052 
9053   // Move all instructions after the sequence to TailMBB.
9054   TailMBB->splice(TailMBB->end(), HeadMBB,
9055                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9056   // Update machine-CFG edges by transferring all successors of the current
9057   // block to the new block which will contain the Phi nodes for the selects.
9058   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9059   // Set the successors for HeadMBB.
9060   HeadMBB->addSuccessor(IfFalseMBB);
9061   HeadMBB->addSuccessor(TailMBB);
9062 
9063   // Insert appropriate branch.
9064   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9065     .addReg(LHS)
9066     .addReg(RHS)
9067     .addMBB(TailMBB);
9068 
9069   // IfFalseMBB just falls through to TailMBB.
9070   IfFalseMBB->addSuccessor(TailMBB);
9071 
9072   // Create PHIs for all of the select pseudo-instructions.
9073   auto SelectMBBI = MI.getIterator();
9074   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9075   auto InsertionPoint = TailMBB->begin();
9076   while (SelectMBBI != SelectEnd) {
9077     auto Next = std::next(SelectMBBI);
9078     if (isSelectPseudo(*SelectMBBI)) {
9079       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9080       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9081               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9082           .addReg(SelectMBBI->getOperand(4).getReg())
9083           .addMBB(HeadMBB)
9084           .addReg(SelectMBBI->getOperand(5).getReg())
9085           .addMBB(IfFalseMBB);
9086       SelectMBBI->eraseFromParent();
9087     }
9088     SelectMBBI = Next;
9089   }
9090 
9091   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9092   return TailMBB;
9093 }
9094 
9095 MachineBasicBlock *
9096 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9097                                                  MachineBasicBlock *BB) const {
9098   switch (MI.getOpcode()) {
9099   default:
9100     llvm_unreachable("Unexpected instr type to insert");
9101   case RISCV::ReadCycleWide:
9102     assert(!Subtarget.is64Bit() &&
9103            "ReadCycleWrite is only to be used on riscv32");
9104     return emitReadCycleWidePseudo(MI, BB);
9105   case RISCV::Select_GPR_Using_CC_GPR:
9106   case RISCV::Select_FPR16_Using_CC_GPR:
9107   case RISCV::Select_FPR32_Using_CC_GPR:
9108   case RISCV::Select_FPR64_Using_CC_GPR:
9109     return emitSelectPseudo(MI, BB, Subtarget);
9110   case RISCV::BuildPairF64Pseudo:
9111     return emitBuildPairF64Pseudo(MI, BB);
9112   case RISCV::SplitF64Pseudo:
9113     return emitSplitF64Pseudo(MI, BB);
9114   case RISCV::PseudoQuietFLE_H:
9115     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9116   case RISCV::PseudoQuietFLT_H:
9117     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9118   case RISCV::PseudoQuietFLE_S:
9119     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9120   case RISCV::PseudoQuietFLT_S:
9121     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9122   case RISCV::PseudoQuietFLE_D:
9123     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9124   case RISCV::PseudoQuietFLT_D:
9125     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9126   }
9127 }
9128 
9129 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9130                                                         SDNode *Node) const {
9131   // Add FRM dependency to any instructions with dynamic rounding mode.
9132   unsigned Opc = MI.getOpcode();
9133   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9134   if (Idx < 0)
9135     return;
9136   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9137     return;
9138   // If the instruction already reads FRM, don't add another read.
9139   if (MI.readsRegister(RISCV::FRM))
9140     return;
9141   MI.addOperand(
9142       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9143 }
9144 
9145 // Calling Convention Implementation.
9146 // The expectations for frontend ABI lowering vary from target to target.
9147 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9148 // details, but this is a longer term goal. For now, we simply try to keep the
9149 // role of the frontend as simple and well-defined as possible. The rules can
9150 // be summarised as:
9151 // * Never split up large scalar arguments. We handle them here.
9152 // * If a hardfloat calling convention is being used, and the struct may be
9153 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9154 // available, then pass as two separate arguments. If either the GPRs or FPRs
9155 // are exhausted, then pass according to the rule below.
9156 // * If a struct could never be passed in registers or directly in a stack
9157 // slot (as it is larger than 2*XLEN and the floating point rules don't
9158 // apply), then pass it using a pointer with the byval attribute.
9159 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9160 // word-sized array or a 2*XLEN scalar (depending on alignment).
9161 // * The frontend can determine whether a struct is returned by reference or
9162 // not based on its size and fields. If it will be returned by reference, the
9163 // frontend must modify the prototype so a pointer with the sret annotation is
9164 // passed as the first argument. This is not necessary for large scalar
9165 // returns.
9166 // * Struct return values and varargs should be coerced to structs containing
9167 // register-size fields in the same situations they would be for fixed
9168 // arguments.
9169 
9170 static const MCPhysReg ArgGPRs[] = {
9171   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9172   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9173 };
9174 static const MCPhysReg ArgFPR16s[] = {
9175   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9176   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9177 };
9178 static const MCPhysReg ArgFPR32s[] = {
9179   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9180   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9181 };
9182 static const MCPhysReg ArgFPR64s[] = {
9183   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9184   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9185 };
9186 // This is an interim calling convention and it may be changed in the future.
9187 static const MCPhysReg ArgVRs[] = {
9188     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9189     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9190     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9191 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9192                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9193                                      RISCV::V20M2, RISCV::V22M2};
9194 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9195                                      RISCV::V20M4};
9196 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9197 
9198 // Pass a 2*XLEN argument that has been split into two XLEN values through
9199 // registers or the stack as necessary.
9200 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9201                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9202                                 MVT ValVT2, MVT LocVT2,
9203                                 ISD::ArgFlagsTy ArgFlags2) {
9204   unsigned XLenInBytes = XLen / 8;
9205   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9206     // At least one half can be passed via register.
9207     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9208                                      VA1.getLocVT(), CCValAssign::Full));
9209   } else {
9210     // Both halves must be passed on the stack, with proper alignment.
9211     Align StackAlign =
9212         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9213     State.addLoc(
9214         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9215                             State.AllocateStack(XLenInBytes, StackAlign),
9216                             VA1.getLocVT(), CCValAssign::Full));
9217     State.addLoc(CCValAssign::getMem(
9218         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9219         LocVT2, CCValAssign::Full));
9220     return false;
9221   }
9222 
9223   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9224     // The second half can also be passed via register.
9225     State.addLoc(
9226         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9227   } else {
9228     // The second half is passed via the stack, without additional alignment.
9229     State.addLoc(CCValAssign::getMem(
9230         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9231         LocVT2, CCValAssign::Full));
9232   }
9233 
9234   return false;
9235 }
9236 
9237 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9238                                Optional<unsigned> FirstMaskArgument,
9239                                CCState &State, const RISCVTargetLowering &TLI) {
9240   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9241   if (RC == &RISCV::VRRegClass) {
9242     // Assign the first mask argument to V0.
9243     // This is an interim calling convention and it may be changed in the
9244     // future.
9245     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9246       return State.AllocateReg(RISCV::V0);
9247     return State.AllocateReg(ArgVRs);
9248   }
9249   if (RC == &RISCV::VRM2RegClass)
9250     return State.AllocateReg(ArgVRM2s);
9251   if (RC == &RISCV::VRM4RegClass)
9252     return State.AllocateReg(ArgVRM4s);
9253   if (RC == &RISCV::VRM8RegClass)
9254     return State.AllocateReg(ArgVRM8s);
9255   llvm_unreachable("Unhandled register class for ValueType");
9256 }
9257 
9258 // Implements the RISC-V calling convention. Returns true upon failure.
9259 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9260                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9261                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9262                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9263                      Optional<unsigned> FirstMaskArgument) {
9264   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9265   assert(XLen == 32 || XLen == 64);
9266   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9267 
9268   // Any return value split in to more than two values can't be returned
9269   // directly. Vectors are returned via the available vector registers.
9270   if (!LocVT.isVector() && IsRet && ValNo > 1)
9271     return true;
9272 
9273   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9274   // variadic argument, or if no F16/F32 argument registers are available.
9275   bool UseGPRForF16_F32 = true;
9276   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9277   // variadic argument, or if no F64 argument registers are available.
9278   bool UseGPRForF64 = true;
9279 
9280   switch (ABI) {
9281   default:
9282     llvm_unreachable("Unexpected ABI");
9283   case RISCVABI::ABI_ILP32:
9284   case RISCVABI::ABI_LP64:
9285     break;
9286   case RISCVABI::ABI_ILP32F:
9287   case RISCVABI::ABI_LP64F:
9288     UseGPRForF16_F32 = !IsFixed;
9289     break;
9290   case RISCVABI::ABI_ILP32D:
9291   case RISCVABI::ABI_LP64D:
9292     UseGPRForF16_F32 = !IsFixed;
9293     UseGPRForF64 = !IsFixed;
9294     break;
9295   }
9296 
9297   // FPR16, FPR32, and FPR64 alias each other.
9298   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9299     UseGPRForF16_F32 = true;
9300     UseGPRForF64 = true;
9301   }
9302 
9303   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9304   // similar local variables rather than directly checking against the target
9305   // ABI.
9306 
9307   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9308     LocVT = XLenVT;
9309     LocInfo = CCValAssign::BCvt;
9310   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9311     LocVT = MVT::i64;
9312     LocInfo = CCValAssign::BCvt;
9313   }
9314 
9315   // If this is a variadic argument, the RISC-V calling convention requires
9316   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9317   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9318   // be used regardless of whether the original argument was split during
9319   // legalisation or not. The argument will not be passed by registers if the
9320   // original type is larger than 2*XLEN, so the register alignment rule does
9321   // not apply.
9322   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9323   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9324       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9325     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9326     // Skip 'odd' register if necessary.
9327     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9328       State.AllocateReg(ArgGPRs);
9329   }
9330 
9331   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9332   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9333       State.getPendingArgFlags();
9334 
9335   assert(PendingLocs.size() == PendingArgFlags.size() &&
9336          "PendingLocs and PendingArgFlags out of sync");
9337 
9338   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9339   // registers are exhausted.
9340   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9341     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9342            "Can't lower f64 if it is split");
9343     // Depending on available argument GPRS, f64 may be passed in a pair of
9344     // GPRs, split between a GPR and the stack, or passed completely on the
9345     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9346     // cases.
9347     Register Reg = State.AllocateReg(ArgGPRs);
9348     LocVT = MVT::i32;
9349     if (!Reg) {
9350       unsigned StackOffset = State.AllocateStack(8, Align(8));
9351       State.addLoc(
9352           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9353       return false;
9354     }
9355     if (!State.AllocateReg(ArgGPRs))
9356       State.AllocateStack(4, Align(4));
9357     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9358     return false;
9359   }
9360 
9361   // Fixed-length vectors are located in the corresponding scalable-vector
9362   // container types.
9363   if (ValVT.isFixedLengthVector())
9364     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9365 
9366   // Split arguments might be passed indirectly, so keep track of the pending
9367   // values. Split vectors are passed via a mix of registers and indirectly, so
9368   // treat them as we would any other argument.
9369   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9370     LocVT = XLenVT;
9371     LocInfo = CCValAssign::Indirect;
9372     PendingLocs.push_back(
9373         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9374     PendingArgFlags.push_back(ArgFlags);
9375     if (!ArgFlags.isSplitEnd()) {
9376       return false;
9377     }
9378   }
9379 
9380   // If the split argument only had two elements, it should be passed directly
9381   // in registers or on the stack.
9382   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9383       PendingLocs.size() <= 2) {
9384     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9385     // Apply the normal calling convention rules to the first half of the
9386     // split argument.
9387     CCValAssign VA = PendingLocs[0];
9388     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9389     PendingLocs.clear();
9390     PendingArgFlags.clear();
9391     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9392                                ArgFlags);
9393   }
9394 
9395   // Allocate to a register if possible, or else a stack slot.
9396   Register Reg;
9397   unsigned StoreSizeBytes = XLen / 8;
9398   Align StackAlign = Align(XLen / 8);
9399 
9400   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9401     Reg = State.AllocateReg(ArgFPR16s);
9402   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9403     Reg = State.AllocateReg(ArgFPR32s);
9404   else if (ValVT == MVT::f64 && !UseGPRForF64)
9405     Reg = State.AllocateReg(ArgFPR64s);
9406   else if (ValVT.isVector()) {
9407     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9408     if (!Reg) {
9409       // For return values, the vector must be passed fully via registers or
9410       // via the stack.
9411       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9412       // but we're using all of them.
9413       if (IsRet)
9414         return true;
9415       // Try using a GPR to pass the address
9416       if ((Reg = State.AllocateReg(ArgGPRs))) {
9417         LocVT = XLenVT;
9418         LocInfo = CCValAssign::Indirect;
9419       } else if (ValVT.isScalableVector()) {
9420         LocVT = XLenVT;
9421         LocInfo = CCValAssign::Indirect;
9422       } else {
9423         // Pass fixed-length vectors on the stack.
9424         LocVT = ValVT;
9425         StoreSizeBytes = ValVT.getStoreSize();
9426         // Align vectors to their element sizes, being careful for vXi1
9427         // vectors.
9428         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9429       }
9430     }
9431   } else {
9432     Reg = State.AllocateReg(ArgGPRs);
9433   }
9434 
9435   unsigned StackOffset =
9436       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9437 
9438   // If we reach this point and PendingLocs is non-empty, we must be at the
9439   // end of a split argument that must be passed indirectly.
9440   if (!PendingLocs.empty()) {
9441     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9442     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9443 
9444     for (auto &It : PendingLocs) {
9445       if (Reg)
9446         It.convertToReg(Reg);
9447       else
9448         It.convertToMem(StackOffset);
9449       State.addLoc(It);
9450     }
9451     PendingLocs.clear();
9452     PendingArgFlags.clear();
9453     return false;
9454   }
9455 
9456   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9457           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9458          "Expected an XLenVT or vector types at this stage");
9459 
9460   if (Reg) {
9461     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9462     return false;
9463   }
9464 
9465   // When a floating-point value is passed on the stack, no bit-conversion is
9466   // needed.
9467   if (ValVT.isFloatingPoint()) {
9468     LocVT = ValVT;
9469     LocInfo = CCValAssign::Full;
9470   }
9471   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9472   return false;
9473 }
9474 
9475 template <typename ArgTy>
9476 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9477   for (const auto &ArgIdx : enumerate(Args)) {
9478     MVT ArgVT = ArgIdx.value().VT;
9479     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9480       return ArgIdx.index();
9481   }
9482   return None;
9483 }
9484 
9485 void RISCVTargetLowering::analyzeInputArgs(
9486     MachineFunction &MF, CCState &CCInfo,
9487     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9488     RISCVCCAssignFn Fn) const {
9489   unsigned NumArgs = Ins.size();
9490   FunctionType *FType = MF.getFunction().getFunctionType();
9491 
9492   Optional<unsigned> FirstMaskArgument;
9493   if (Subtarget.hasVInstructions())
9494     FirstMaskArgument = preAssignMask(Ins);
9495 
9496   for (unsigned i = 0; i != NumArgs; ++i) {
9497     MVT ArgVT = Ins[i].VT;
9498     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9499 
9500     Type *ArgTy = nullptr;
9501     if (IsRet)
9502       ArgTy = FType->getReturnType();
9503     else if (Ins[i].isOrigArg())
9504       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9505 
9506     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9507     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9508            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9509            FirstMaskArgument)) {
9510       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9511                         << EVT(ArgVT).getEVTString() << '\n');
9512       llvm_unreachable(nullptr);
9513     }
9514   }
9515 }
9516 
9517 void RISCVTargetLowering::analyzeOutputArgs(
9518     MachineFunction &MF, CCState &CCInfo,
9519     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9520     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9521   unsigned NumArgs = Outs.size();
9522 
9523   Optional<unsigned> FirstMaskArgument;
9524   if (Subtarget.hasVInstructions())
9525     FirstMaskArgument = preAssignMask(Outs);
9526 
9527   for (unsigned i = 0; i != NumArgs; i++) {
9528     MVT ArgVT = Outs[i].VT;
9529     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9530     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9531 
9532     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9533     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9534            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9535            FirstMaskArgument)) {
9536       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9537                         << EVT(ArgVT).getEVTString() << "\n");
9538       llvm_unreachable(nullptr);
9539     }
9540   }
9541 }
9542 
9543 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9544 // values.
9545 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9546                                    const CCValAssign &VA, const SDLoc &DL,
9547                                    const RISCVSubtarget &Subtarget) {
9548   switch (VA.getLocInfo()) {
9549   default:
9550     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9551   case CCValAssign::Full:
9552     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9553       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9554     break;
9555   case CCValAssign::BCvt:
9556     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9557       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9558     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9559       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9560     else
9561       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9562     break;
9563   }
9564   return Val;
9565 }
9566 
9567 // The caller is responsible for loading the full value if the argument is
9568 // passed with CCValAssign::Indirect.
9569 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9570                                 const CCValAssign &VA, const SDLoc &DL,
9571                                 const RISCVTargetLowering &TLI) {
9572   MachineFunction &MF = DAG.getMachineFunction();
9573   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9574   EVT LocVT = VA.getLocVT();
9575   SDValue Val;
9576   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9577   Register VReg = RegInfo.createVirtualRegister(RC);
9578   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9579   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9580 
9581   if (VA.getLocInfo() == CCValAssign::Indirect)
9582     return Val;
9583 
9584   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9585 }
9586 
9587 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9588                                    const CCValAssign &VA, const SDLoc &DL,
9589                                    const RISCVSubtarget &Subtarget) {
9590   EVT LocVT = VA.getLocVT();
9591 
9592   switch (VA.getLocInfo()) {
9593   default:
9594     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9595   case CCValAssign::Full:
9596     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9597       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9598     break;
9599   case CCValAssign::BCvt:
9600     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9601       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9602     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9603       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9604     else
9605       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9606     break;
9607   }
9608   return Val;
9609 }
9610 
9611 // The caller is responsible for loading the full value if the argument is
9612 // passed with CCValAssign::Indirect.
9613 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9614                                 const CCValAssign &VA, const SDLoc &DL) {
9615   MachineFunction &MF = DAG.getMachineFunction();
9616   MachineFrameInfo &MFI = MF.getFrameInfo();
9617   EVT LocVT = VA.getLocVT();
9618   EVT ValVT = VA.getValVT();
9619   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9620   if (ValVT.isScalableVector()) {
9621     // When the value is a scalable vector, we save the pointer which points to
9622     // the scalable vector value in the stack. The ValVT will be the pointer
9623     // type, instead of the scalable vector type.
9624     ValVT = LocVT;
9625   }
9626   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9627                                  /*IsImmutable=*/true);
9628   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9629   SDValue Val;
9630 
9631   ISD::LoadExtType ExtType;
9632   switch (VA.getLocInfo()) {
9633   default:
9634     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9635   case CCValAssign::Full:
9636   case CCValAssign::Indirect:
9637   case CCValAssign::BCvt:
9638     ExtType = ISD::NON_EXTLOAD;
9639     break;
9640   }
9641   Val = DAG.getExtLoad(
9642       ExtType, DL, LocVT, Chain, FIN,
9643       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9644   return Val;
9645 }
9646 
9647 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9648                                        const CCValAssign &VA, const SDLoc &DL) {
9649   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9650          "Unexpected VA");
9651   MachineFunction &MF = DAG.getMachineFunction();
9652   MachineFrameInfo &MFI = MF.getFrameInfo();
9653   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9654 
9655   if (VA.isMemLoc()) {
9656     // f64 is passed on the stack.
9657     int FI =
9658         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9659     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9660     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9661                        MachinePointerInfo::getFixedStack(MF, FI));
9662   }
9663 
9664   assert(VA.isRegLoc() && "Expected register VA assignment");
9665 
9666   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9667   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9668   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9669   SDValue Hi;
9670   if (VA.getLocReg() == RISCV::X17) {
9671     // Second half of f64 is passed on the stack.
9672     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9673     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9674     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9675                      MachinePointerInfo::getFixedStack(MF, FI));
9676   } else {
9677     // Second half of f64 is passed in another GPR.
9678     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9679     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9680     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9681   }
9682   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9683 }
9684 
9685 // FastCC has less than 1% performance improvement for some particular
9686 // benchmark. But theoretically, it may has benenfit for some cases.
9687 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9688                             unsigned ValNo, MVT ValVT, MVT LocVT,
9689                             CCValAssign::LocInfo LocInfo,
9690                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9691                             bool IsFixed, bool IsRet, Type *OrigTy,
9692                             const RISCVTargetLowering &TLI,
9693                             Optional<unsigned> FirstMaskArgument) {
9694 
9695   // X5 and X6 might be used for save-restore libcall.
9696   static const MCPhysReg GPRList[] = {
9697       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9698       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9699       RISCV::X29, RISCV::X30, RISCV::X31};
9700 
9701   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9702     if (unsigned Reg = State.AllocateReg(GPRList)) {
9703       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9704       return false;
9705     }
9706   }
9707 
9708   if (LocVT == MVT::f16) {
9709     static const MCPhysReg FPR16List[] = {
9710         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9711         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9712         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9713         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9714     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9715       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9716       return false;
9717     }
9718   }
9719 
9720   if (LocVT == MVT::f32) {
9721     static const MCPhysReg FPR32List[] = {
9722         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9723         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9724         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9725         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9726     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9727       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9728       return false;
9729     }
9730   }
9731 
9732   if (LocVT == MVT::f64) {
9733     static const MCPhysReg FPR64List[] = {
9734         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9735         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9736         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9737         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9738     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9739       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9740       return false;
9741     }
9742   }
9743 
9744   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9745     unsigned Offset4 = State.AllocateStack(4, Align(4));
9746     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9747     return false;
9748   }
9749 
9750   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9751     unsigned Offset5 = State.AllocateStack(8, Align(8));
9752     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9753     return false;
9754   }
9755 
9756   if (LocVT.isVector()) {
9757     if (unsigned Reg =
9758             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9759       // Fixed-length vectors are located in the corresponding scalable-vector
9760       // container types.
9761       if (ValVT.isFixedLengthVector())
9762         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9763       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9764     } else {
9765       // Try and pass the address via a "fast" GPR.
9766       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9767         LocInfo = CCValAssign::Indirect;
9768         LocVT = TLI.getSubtarget().getXLenVT();
9769         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9770       } else if (ValVT.isFixedLengthVector()) {
9771         auto StackAlign =
9772             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9773         unsigned StackOffset =
9774             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9775         State.addLoc(
9776             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9777       } else {
9778         // Can't pass scalable vectors on the stack.
9779         return true;
9780       }
9781     }
9782 
9783     return false;
9784   }
9785 
9786   return true; // CC didn't match.
9787 }
9788 
9789 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9790                          CCValAssign::LocInfo LocInfo,
9791                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9792 
9793   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9794     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
9795     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
9796     static const MCPhysReg GPRList[] = {
9797         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
9798         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
9799     if (unsigned Reg = State.AllocateReg(GPRList)) {
9800       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9801       return false;
9802     }
9803   }
9804 
9805   if (LocVT == MVT::f32) {
9806     // Pass in STG registers: F1, ..., F6
9807     //                        fs0 ... fs5
9808     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
9809                                           RISCV::F18_F, RISCV::F19_F,
9810                                           RISCV::F20_F, RISCV::F21_F};
9811     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9812       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9813       return false;
9814     }
9815   }
9816 
9817   if (LocVT == MVT::f64) {
9818     // Pass in STG registers: D1, ..., D6
9819     //                        fs6 ... fs11
9820     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
9821                                           RISCV::F24_D, RISCV::F25_D,
9822                                           RISCV::F26_D, RISCV::F27_D};
9823     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9824       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9825       return false;
9826     }
9827   }
9828 
9829   report_fatal_error("No registers left in GHC calling convention");
9830   return true;
9831 }
9832 
9833 // Transform physical registers into virtual registers.
9834 SDValue RISCVTargetLowering::LowerFormalArguments(
9835     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
9836     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
9837     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
9838 
9839   MachineFunction &MF = DAG.getMachineFunction();
9840 
9841   switch (CallConv) {
9842   default:
9843     report_fatal_error("Unsupported calling convention");
9844   case CallingConv::C:
9845   case CallingConv::Fast:
9846     break;
9847   case CallingConv::GHC:
9848     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
9849         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
9850       report_fatal_error(
9851         "GHC calling convention requires the F and D instruction set extensions");
9852   }
9853 
9854   const Function &Func = MF.getFunction();
9855   if (Func.hasFnAttribute("interrupt")) {
9856     if (!Func.arg_empty())
9857       report_fatal_error(
9858         "Functions with the interrupt attribute cannot have arguments!");
9859 
9860     StringRef Kind =
9861       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9862 
9863     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
9864       report_fatal_error(
9865         "Function interrupt attribute argument not supported!");
9866   }
9867 
9868   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9869   MVT XLenVT = Subtarget.getXLenVT();
9870   unsigned XLenInBytes = Subtarget.getXLen() / 8;
9871   // Used with vargs to acumulate store chains.
9872   std::vector<SDValue> OutChains;
9873 
9874   // Assign locations to all of the incoming arguments.
9875   SmallVector<CCValAssign, 16> ArgLocs;
9876   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9877 
9878   if (CallConv == CallingConv::GHC)
9879     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
9880   else
9881     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
9882                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9883                                                    : CC_RISCV);
9884 
9885   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
9886     CCValAssign &VA = ArgLocs[i];
9887     SDValue ArgValue;
9888     // Passing f64 on RV32D with a soft float ABI must be handled as a special
9889     // case.
9890     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
9891       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
9892     else if (VA.isRegLoc())
9893       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
9894     else
9895       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
9896 
9897     if (VA.getLocInfo() == CCValAssign::Indirect) {
9898       // If the original argument was split and passed by reference (e.g. i128
9899       // on RV32), we need to load all parts of it here (using the same
9900       // address). Vectors may be partly split to registers and partly to the
9901       // stack, in which case the base address is partly offset and subsequent
9902       // stores are relative to that.
9903       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
9904                                    MachinePointerInfo()));
9905       unsigned ArgIndex = Ins[i].OrigArgIndex;
9906       unsigned ArgPartOffset = Ins[i].PartOffset;
9907       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9908       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
9909         CCValAssign &PartVA = ArgLocs[i + 1];
9910         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
9911         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9912         if (PartVA.getValVT().isScalableVector())
9913           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9914         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
9915         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
9916                                      MachinePointerInfo()));
9917         ++i;
9918       }
9919       continue;
9920     }
9921     InVals.push_back(ArgValue);
9922   }
9923 
9924   if (IsVarArg) {
9925     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
9926     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
9927     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
9928     MachineFrameInfo &MFI = MF.getFrameInfo();
9929     MachineRegisterInfo &RegInfo = MF.getRegInfo();
9930     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
9931 
9932     // Offset of the first variable argument from stack pointer, and size of
9933     // the vararg save area. For now, the varargs save area is either zero or
9934     // large enough to hold a0-a7.
9935     int VaArgOffset, VarArgsSaveSize;
9936 
9937     // If all registers are allocated, then all varargs must be passed on the
9938     // stack and we don't need to save any argregs.
9939     if (ArgRegs.size() == Idx) {
9940       VaArgOffset = CCInfo.getNextStackOffset();
9941       VarArgsSaveSize = 0;
9942     } else {
9943       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
9944       VaArgOffset = -VarArgsSaveSize;
9945     }
9946 
9947     // Record the frame index of the first variable argument
9948     // which is a value necessary to VASTART.
9949     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9950     RVFI->setVarArgsFrameIndex(FI);
9951 
9952     // If saving an odd number of registers then create an extra stack slot to
9953     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
9954     // offsets to even-numbered registered remain 2*XLEN-aligned.
9955     if (Idx % 2) {
9956       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
9957       VarArgsSaveSize += XLenInBytes;
9958     }
9959 
9960     // Copy the integer registers that may have been used for passing varargs
9961     // to the vararg save area.
9962     for (unsigned I = Idx; I < ArgRegs.size();
9963          ++I, VaArgOffset += XLenInBytes) {
9964       const Register Reg = RegInfo.createVirtualRegister(RC);
9965       RegInfo.addLiveIn(ArgRegs[I], Reg);
9966       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
9967       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9968       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9969       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
9970                                    MachinePointerInfo::getFixedStack(MF, FI));
9971       cast<StoreSDNode>(Store.getNode())
9972           ->getMemOperand()
9973           ->setValue((Value *)nullptr);
9974       OutChains.push_back(Store);
9975     }
9976     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
9977   }
9978 
9979   // All stores are grouped in one node to allow the matching between
9980   // the size of Ins and InVals. This only happens for vararg functions.
9981   if (!OutChains.empty()) {
9982     OutChains.push_back(Chain);
9983     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
9984   }
9985 
9986   return Chain;
9987 }
9988 
9989 /// isEligibleForTailCallOptimization - Check whether the call is eligible
9990 /// for tail call optimization.
9991 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
9992 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
9993     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
9994     const SmallVector<CCValAssign, 16> &ArgLocs) const {
9995 
9996   auto &Callee = CLI.Callee;
9997   auto CalleeCC = CLI.CallConv;
9998   auto &Outs = CLI.Outs;
9999   auto &Caller = MF.getFunction();
10000   auto CallerCC = Caller.getCallingConv();
10001 
10002   // Exception-handling functions need a special set of instructions to
10003   // indicate a return to the hardware. Tail-calling another function would
10004   // probably break this.
10005   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10006   // should be expanded as new function attributes are introduced.
10007   if (Caller.hasFnAttribute("interrupt"))
10008     return false;
10009 
10010   // Do not tail call opt if the stack is used to pass parameters.
10011   if (CCInfo.getNextStackOffset() != 0)
10012     return false;
10013 
10014   // Do not tail call opt if any parameters need to be passed indirectly.
10015   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10016   // passed indirectly. So the address of the value will be passed in a
10017   // register, or if not available, then the address is put on the stack. In
10018   // order to pass indirectly, space on the stack often needs to be allocated
10019   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10020   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10021   // are passed CCValAssign::Indirect.
10022   for (auto &VA : ArgLocs)
10023     if (VA.getLocInfo() == CCValAssign::Indirect)
10024       return false;
10025 
10026   // Do not tail call opt if either caller or callee uses struct return
10027   // semantics.
10028   auto IsCallerStructRet = Caller.hasStructRetAttr();
10029   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10030   if (IsCallerStructRet || IsCalleeStructRet)
10031     return false;
10032 
10033   // Externally-defined functions with weak linkage should not be
10034   // tail-called. The behaviour of branch instructions in this situation (as
10035   // used for tail calls) is implementation-defined, so we cannot rely on the
10036   // linker replacing the tail call with a return.
10037   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10038     const GlobalValue *GV = G->getGlobal();
10039     if (GV->hasExternalWeakLinkage())
10040       return false;
10041   }
10042 
10043   // The callee has to preserve all registers the caller needs to preserve.
10044   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10045   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10046   if (CalleeCC != CallerCC) {
10047     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10048     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10049       return false;
10050   }
10051 
10052   // Byval parameters hand the function a pointer directly into the stack area
10053   // we want to reuse during a tail call. Working around this *is* possible
10054   // but less efficient and uglier in LowerCall.
10055   for (auto &Arg : Outs)
10056     if (Arg.Flags.isByVal())
10057       return false;
10058 
10059   return true;
10060 }
10061 
10062 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10063   return DAG.getDataLayout().getPrefTypeAlign(
10064       VT.getTypeForEVT(*DAG.getContext()));
10065 }
10066 
10067 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10068 // and output parameter nodes.
10069 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10070                                        SmallVectorImpl<SDValue> &InVals) const {
10071   SelectionDAG &DAG = CLI.DAG;
10072   SDLoc &DL = CLI.DL;
10073   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10074   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10075   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10076   SDValue Chain = CLI.Chain;
10077   SDValue Callee = CLI.Callee;
10078   bool &IsTailCall = CLI.IsTailCall;
10079   CallingConv::ID CallConv = CLI.CallConv;
10080   bool IsVarArg = CLI.IsVarArg;
10081   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10082   MVT XLenVT = Subtarget.getXLenVT();
10083 
10084   MachineFunction &MF = DAG.getMachineFunction();
10085 
10086   // Analyze the operands of the call, assigning locations to each operand.
10087   SmallVector<CCValAssign, 16> ArgLocs;
10088   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10089 
10090   if (CallConv == CallingConv::GHC)
10091     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10092   else
10093     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10094                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10095                                                     : CC_RISCV);
10096 
10097   // Check if it's really possible to do a tail call.
10098   if (IsTailCall)
10099     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10100 
10101   if (IsTailCall)
10102     ++NumTailCalls;
10103   else if (CLI.CB && CLI.CB->isMustTailCall())
10104     report_fatal_error("failed to perform tail call elimination on a call "
10105                        "site marked musttail");
10106 
10107   // Get a count of how many bytes are to be pushed on the stack.
10108   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10109 
10110   // Create local copies for byval args
10111   SmallVector<SDValue, 8> ByValArgs;
10112   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10113     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10114     if (!Flags.isByVal())
10115       continue;
10116 
10117     SDValue Arg = OutVals[i];
10118     unsigned Size = Flags.getByValSize();
10119     Align Alignment = Flags.getNonZeroByValAlign();
10120 
10121     int FI =
10122         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10123     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10124     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10125 
10126     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10127                           /*IsVolatile=*/false,
10128                           /*AlwaysInline=*/false, IsTailCall,
10129                           MachinePointerInfo(), MachinePointerInfo());
10130     ByValArgs.push_back(FIPtr);
10131   }
10132 
10133   if (!IsTailCall)
10134     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10135 
10136   // Copy argument values to their designated locations.
10137   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10138   SmallVector<SDValue, 8> MemOpChains;
10139   SDValue StackPtr;
10140   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10141     CCValAssign &VA = ArgLocs[i];
10142     SDValue ArgValue = OutVals[i];
10143     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10144 
10145     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10146     bool IsF64OnRV32DSoftABI =
10147         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10148     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10149       SDValue SplitF64 = DAG.getNode(
10150           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10151       SDValue Lo = SplitF64.getValue(0);
10152       SDValue Hi = SplitF64.getValue(1);
10153 
10154       Register RegLo = VA.getLocReg();
10155       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10156 
10157       if (RegLo == RISCV::X17) {
10158         // Second half of f64 is passed on the stack.
10159         // Work out the address of the stack slot.
10160         if (!StackPtr.getNode())
10161           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10162         // Emit the store.
10163         MemOpChains.push_back(
10164             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10165       } else {
10166         // Second half of f64 is passed in another GPR.
10167         assert(RegLo < RISCV::X31 && "Invalid register pair");
10168         Register RegHigh = RegLo + 1;
10169         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10170       }
10171       continue;
10172     }
10173 
10174     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10175     // as any other MemLoc.
10176 
10177     // Promote the value if needed.
10178     // For now, only handle fully promoted and indirect arguments.
10179     if (VA.getLocInfo() == CCValAssign::Indirect) {
10180       // Store the argument in a stack slot and pass its address.
10181       Align StackAlign =
10182           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10183                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10184       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10185       // If the original argument was split (e.g. i128), we need
10186       // to store the required parts of it here (and pass just one address).
10187       // Vectors may be partly split to registers and partly to the stack, in
10188       // which case the base address is partly offset and subsequent stores are
10189       // relative to that.
10190       unsigned ArgIndex = Outs[i].OrigArgIndex;
10191       unsigned ArgPartOffset = Outs[i].PartOffset;
10192       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10193       // Calculate the total size to store. We don't have access to what we're
10194       // actually storing other than performing the loop and collecting the
10195       // info.
10196       SmallVector<std::pair<SDValue, SDValue>> Parts;
10197       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10198         SDValue PartValue = OutVals[i + 1];
10199         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10200         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10201         EVT PartVT = PartValue.getValueType();
10202         if (PartVT.isScalableVector())
10203           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10204         StoredSize += PartVT.getStoreSize();
10205         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10206         Parts.push_back(std::make_pair(PartValue, Offset));
10207         ++i;
10208       }
10209       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10210       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10211       MemOpChains.push_back(
10212           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10213                        MachinePointerInfo::getFixedStack(MF, FI)));
10214       for (const auto &Part : Parts) {
10215         SDValue PartValue = Part.first;
10216         SDValue PartOffset = Part.second;
10217         SDValue Address =
10218             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10219         MemOpChains.push_back(
10220             DAG.getStore(Chain, DL, PartValue, Address,
10221                          MachinePointerInfo::getFixedStack(MF, FI)));
10222       }
10223       ArgValue = SpillSlot;
10224     } else {
10225       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10226     }
10227 
10228     // Use local copy if it is a byval arg.
10229     if (Flags.isByVal())
10230       ArgValue = ByValArgs[j++];
10231 
10232     if (VA.isRegLoc()) {
10233       // Queue up the argument copies and emit them at the end.
10234       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10235     } else {
10236       assert(VA.isMemLoc() && "Argument not register or memory");
10237       assert(!IsTailCall && "Tail call not allowed if stack is used "
10238                             "for passing parameters");
10239 
10240       // Work out the address of the stack slot.
10241       if (!StackPtr.getNode())
10242         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10243       SDValue Address =
10244           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10245                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10246 
10247       // Emit the store.
10248       MemOpChains.push_back(
10249           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10250     }
10251   }
10252 
10253   // Join the stores, which are independent of one another.
10254   if (!MemOpChains.empty())
10255     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10256 
10257   SDValue Glue;
10258 
10259   // Build a sequence of copy-to-reg nodes, chained and glued together.
10260   for (auto &Reg : RegsToPass) {
10261     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10262     Glue = Chain.getValue(1);
10263   }
10264 
10265   // Validate that none of the argument registers have been marked as
10266   // reserved, if so report an error. Do the same for the return address if this
10267   // is not a tailcall.
10268   validateCCReservedRegs(RegsToPass, MF);
10269   if (!IsTailCall &&
10270       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10271     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10272         MF.getFunction(),
10273         "Return address register required, but has been reserved."});
10274 
10275   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10276   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10277   // split it and then direct call can be matched by PseudoCALL.
10278   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10279     const GlobalValue *GV = S->getGlobal();
10280 
10281     unsigned OpFlags = RISCVII::MO_CALL;
10282     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10283       OpFlags = RISCVII::MO_PLT;
10284 
10285     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10286   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10287     unsigned OpFlags = RISCVII::MO_CALL;
10288 
10289     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10290                                                  nullptr))
10291       OpFlags = RISCVII::MO_PLT;
10292 
10293     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10294   }
10295 
10296   // The first call operand is the chain and the second is the target address.
10297   SmallVector<SDValue, 8> Ops;
10298   Ops.push_back(Chain);
10299   Ops.push_back(Callee);
10300 
10301   // Add argument registers to the end of the list so that they are
10302   // known live into the call.
10303   for (auto &Reg : RegsToPass)
10304     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10305 
10306   if (!IsTailCall) {
10307     // Add a register mask operand representing the call-preserved registers.
10308     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10309     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10310     assert(Mask && "Missing call preserved mask for calling convention");
10311     Ops.push_back(DAG.getRegisterMask(Mask));
10312   }
10313 
10314   // Glue the call to the argument copies, if any.
10315   if (Glue.getNode())
10316     Ops.push_back(Glue);
10317 
10318   // Emit the call.
10319   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10320 
10321   if (IsTailCall) {
10322     MF.getFrameInfo().setHasTailCall();
10323     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10324   }
10325 
10326   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10327   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10328   Glue = Chain.getValue(1);
10329 
10330   // Mark the end of the call, which is glued to the call itself.
10331   Chain = DAG.getCALLSEQ_END(Chain,
10332                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10333                              DAG.getConstant(0, DL, PtrVT, true),
10334                              Glue, DL);
10335   Glue = Chain.getValue(1);
10336 
10337   // Assign locations to each value returned by this call.
10338   SmallVector<CCValAssign, 16> RVLocs;
10339   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10340   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10341 
10342   // Copy all of the result registers out of their specified physreg.
10343   for (auto &VA : RVLocs) {
10344     // Copy the value out
10345     SDValue RetValue =
10346         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10347     // Glue the RetValue to the end of the call sequence
10348     Chain = RetValue.getValue(1);
10349     Glue = RetValue.getValue(2);
10350 
10351     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10352       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10353       SDValue RetValue2 =
10354           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10355       Chain = RetValue2.getValue(1);
10356       Glue = RetValue2.getValue(2);
10357       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10358                              RetValue2);
10359     }
10360 
10361     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10362 
10363     InVals.push_back(RetValue);
10364   }
10365 
10366   return Chain;
10367 }
10368 
10369 bool RISCVTargetLowering::CanLowerReturn(
10370     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10371     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10372   SmallVector<CCValAssign, 16> RVLocs;
10373   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10374 
10375   Optional<unsigned> FirstMaskArgument;
10376   if (Subtarget.hasVInstructions())
10377     FirstMaskArgument = preAssignMask(Outs);
10378 
10379   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10380     MVT VT = Outs[i].VT;
10381     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10382     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10383     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10384                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10385                  *this, FirstMaskArgument))
10386       return false;
10387   }
10388   return true;
10389 }
10390 
10391 SDValue
10392 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10393                                  bool IsVarArg,
10394                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10395                                  const SmallVectorImpl<SDValue> &OutVals,
10396                                  const SDLoc &DL, SelectionDAG &DAG) const {
10397   const MachineFunction &MF = DAG.getMachineFunction();
10398   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10399 
10400   // Stores the assignment of the return value to a location.
10401   SmallVector<CCValAssign, 16> RVLocs;
10402 
10403   // Info about the registers and stack slot.
10404   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10405                  *DAG.getContext());
10406 
10407   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10408                     nullptr, CC_RISCV);
10409 
10410   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10411     report_fatal_error("GHC functions return void only");
10412 
10413   SDValue Glue;
10414   SmallVector<SDValue, 4> RetOps(1, Chain);
10415 
10416   // Copy the result values into the output registers.
10417   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10418     SDValue Val = OutVals[i];
10419     CCValAssign &VA = RVLocs[i];
10420     assert(VA.isRegLoc() && "Can only return in registers!");
10421 
10422     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10423       // Handle returning f64 on RV32D with a soft float ABI.
10424       assert(VA.isRegLoc() && "Expected return via registers");
10425       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10426                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10427       SDValue Lo = SplitF64.getValue(0);
10428       SDValue Hi = SplitF64.getValue(1);
10429       Register RegLo = VA.getLocReg();
10430       assert(RegLo < RISCV::X31 && "Invalid register pair");
10431       Register RegHi = RegLo + 1;
10432 
10433       if (STI.isRegisterReservedByUser(RegLo) ||
10434           STI.isRegisterReservedByUser(RegHi))
10435         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10436             MF.getFunction(),
10437             "Return value register required, but has been reserved."});
10438 
10439       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10440       Glue = Chain.getValue(1);
10441       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10442       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10443       Glue = Chain.getValue(1);
10444       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10445     } else {
10446       // Handle a 'normal' return.
10447       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10448       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10449 
10450       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10451         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10452             MF.getFunction(),
10453             "Return value register required, but has been reserved."});
10454 
10455       // Guarantee that all emitted copies are stuck together.
10456       Glue = Chain.getValue(1);
10457       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10458     }
10459   }
10460 
10461   RetOps[0] = Chain; // Update chain.
10462 
10463   // Add the glue node if we have it.
10464   if (Glue.getNode()) {
10465     RetOps.push_back(Glue);
10466   }
10467 
10468   unsigned RetOpc = RISCVISD::RET_FLAG;
10469   // Interrupt service routines use different return instructions.
10470   const Function &Func = DAG.getMachineFunction().getFunction();
10471   if (Func.hasFnAttribute("interrupt")) {
10472     if (!Func.getReturnType()->isVoidTy())
10473       report_fatal_error(
10474           "Functions with the interrupt attribute must have void return type!");
10475 
10476     MachineFunction &MF = DAG.getMachineFunction();
10477     StringRef Kind =
10478       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10479 
10480     if (Kind == "user")
10481       RetOpc = RISCVISD::URET_FLAG;
10482     else if (Kind == "supervisor")
10483       RetOpc = RISCVISD::SRET_FLAG;
10484     else
10485       RetOpc = RISCVISD::MRET_FLAG;
10486   }
10487 
10488   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10489 }
10490 
10491 void RISCVTargetLowering::validateCCReservedRegs(
10492     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10493     MachineFunction &MF) const {
10494   const Function &F = MF.getFunction();
10495   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10496 
10497   if (llvm::any_of(Regs, [&STI](auto Reg) {
10498         return STI.isRegisterReservedByUser(Reg.first);
10499       }))
10500     F.getContext().diagnose(DiagnosticInfoUnsupported{
10501         F, "Argument register required, but has been reserved."});
10502 }
10503 
10504 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10505   return CI->isTailCall();
10506 }
10507 
10508 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10509 #define NODE_NAME_CASE(NODE)                                                   \
10510   case RISCVISD::NODE:                                                         \
10511     return "RISCVISD::" #NODE;
10512   // clang-format off
10513   switch ((RISCVISD::NodeType)Opcode) {
10514   case RISCVISD::FIRST_NUMBER:
10515     break;
10516   NODE_NAME_CASE(RET_FLAG)
10517   NODE_NAME_CASE(URET_FLAG)
10518   NODE_NAME_CASE(SRET_FLAG)
10519   NODE_NAME_CASE(MRET_FLAG)
10520   NODE_NAME_CASE(CALL)
10521   NODE_NAME_CASE(SELECT_CC)
10522   NODE_NAME_CASE(BR_CC)
10523   NODE_NAME_CASE(BuildPairF64)
10524   NODE_NAME_CASE(SplitF64)
10525   NODE_NAME_CASE(TAIL)
10526   NODE_NAME_CASE(MULHSU)
10527   NODE_NAME_CASE(SLLW)
10528   NODE_NAME_CASE(SRAW)
10529   NODE_NAME_CASE(SRLW)
10530   NODE_NAME_CASE(DIVW)
10531   NODE_NAME_CASE(DIVUW)
10532   NODE_NAME_CASE(REMUW)
10533   NODE_NAME_CASE(ROLW)
10534   NODE_NAME_CASE(RORW)
10535   NODE_NAME_CASE(CLZW)
10536   NODE_NAME_CASE(CTZW)
10537   NODE_NAME_CASE(FSLW)
10538   NODE_NAME_CASE(FSRW)
10539   NODE_NAME_CASE(FSL)
10540   NODE_NAME_CASE(FSR)
10541   NODE_NAME_CASE(FMV_H_X)
10542   NODE_NAME_CASE(FMV_X_ANYEXTH)
10543   NODE_NAME_CASE(FMV_W_X_RV64)
10544   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10545   NODE_NAME_CASE(FCVT_X)
10546   NODE_NAME_CASE(FCVT_XU)
10547   NODE_NAME_CASE(FCVT_W_RV64)
10548   NODE_NAME_CASE(FCVT_WU_RV64)
10549   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10550   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10551   NODE_NAME_CASE(READ_CYCLE_WIDE)
10552   NODE_NAME_CASE(GREV)
10553   NODE_NAME_CASE(GREVW)
10554   NODE_NAME_CASE(GORC)
10555   NODE_NAME_CASE(GORCW)
10556   NODE_NAME_CASE(SHFL)
10557   NODE_NAME_CASE(SHFLW)
10558   NODE_NAME_CASE(UNSHFL)
10559   NODE_NAME_CASE(UNSHFLW)
10560   NODE_NAME_CASE(BFP)
10561   NODE_NAME_CASE(BFPW)
10562   NODE_NAME_CASE(BCOMPRESS)
10563   NODE_NAME_CASE(BCOMPRESSW)
10564   NODE_NAME_CASE(BDECOMPRESS)
10565   NODE_NAME_CASE(BDECOMPRESSW)
10566   NODE_NAME_CASE(VMV_V_X_VL)
10567   NODE_NAME_CASE(VFMV_V_F_VL)
10568   NODE_NAME_CASE(VMV_X_S)
10569   NODE_NAME_CASE(VMV_S_X_VL)
10570   NODE_NAME_CASE(VFMV_S_F_VL)
10571   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10572   NODE_NAME_CASE(READ_VLENB)
10573   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10574   NODE_NAME_CASE(VSLIDEUP_VL)
10575   NODE_NAME_CASE(VSLIDE1UP_VL)
10576   NODE_NAME_CASE(VSLIDEDOWN_VL)
10577   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10578   NODE_NAME_CASE(VID_VL)
10579   NODE_NAME_CASE(VFNCVT_ROD_VL)
10580   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10581   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10582   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10583   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10584   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10585   NODE_NAME_CASE(VECREDUCE_AND_VL)
10586   NODE_NAME_CASE(VECREDUCE_OR_VL)
10587   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10588   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10589   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10590   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10591   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10592   NODE_NAME_CASE(ADD_VL)
10593   NODE_NAME_CASE(AND_VL)
10594   NODE_NAME_CASE(MUL_VL)
10595   NODE_NAME_CASE(OR_VL)
10596   NODE_NAME_CASE(SDIV_VL)
10597   NODE_NAME_CASE(SHL_VL)
10598   NODE_NAME_CASE(SREM_VL)
10599   NODE_NAME_CASE(SRA_VL)
10600   NODE_NAME_CASE(SRL_VL)
10601   NODE_NAME_CASE(SUB_VL)
10602   NODE_NAME_CASE(UDIV_VL)
10603   NODE_NAME_CASE(UREM_VL)
10604   NODE_NAME_CASE(XOR_VL)
10605   NODE_NAME_CASE(SADDSAT_VL)
10606   NODE_NAME_CASE(UADDSAT_VL)
10607   NODE_NAME_CASE(SSUBSAT_VL)
10608   NODE_NAME_CASE(USUBSAT_VL)
10609   NODE_NAME_CASE(FADD_VL)
10610   NODE_NAME_CASE(FSUB_VL)
10611   NODE_NAME_CASE(FMUL_VL)
10612   NODE_NAME_CASE(FDIV_VL)
10613   NODE_NAME_CASE(FNEG_VL)
10614   NODE_NAME_CASE(FABS_VL)
10615   NODE_NAME_CASE(FSQRT_VL)
10616   NODE_NAME_CASE(FMA_VL)
10617   NODE_NAME_CASE(FCOPYSIGN_VL)
10618   NODE_NAME_CASE(SMIN_VL)
10619   NODE_NAME_CASE(SMAX_VL)
10620   NODE_NAME_CASE(UMIN_VL)
10621   NODE_NAME_CASE(UMAX_VL)
10622   NODE_NAME_CASE(FMINNUM_VL)
10623   NODE_NAME_CASE(FMAXNUM_VL)
10624   NODE_NAME_CASE(MULHS_VL)
10625   NODE_NAME_CASE(MULHU_VL)
10626   NODE_NAME_CASE(FP_TO_SINT_VL)
10627   NODE_NAME_CASE(FP_TO_UINT_VL)
10628   NODE_NAME_CASE(SINT_TO_FP_VL)
10629   NODE_NAME_CASE(UINT_TO_FP_VL)
10630   NODE_NAME_CASE(FP_EXTEND_VL)
10631   NODE_NAME_CASE(FP_ROUND_VL)
10632   NODE_NAME_CASE(VWMUL_VL)
10633   NODE_NAME_CASE(VWMULU_VL)
10634   NODE_NAME_CASE(VWMULSU_VL)
10635   NODE_NAME_CASE(VWADD_VL)
10636   NODE_NAME_CASE(VWADDU_VL)
10637   NODE_NAME_CASE(VWSUB_VL)
10638   NODE_NAME_CASE(VWSUBU_VL)
10639   NODE_NAME_CASE(VWADD_W_VL)
10640   NODE_NAME_CASE(VWADDU_W_VL)
10641   NODE_NAME_CASE(VWSUB_W_VL)
10642   NODE_NAME_CASE(VWSUBU_W_VL)
10643   NODE_NAME_CASE(SETCC_VL)
10644   NODE_NAME_CASE(VSELECT_VL)
10645   NODE_NAME_CASE(VP_MERGE_VL)
10646   NODE_NAME_CASE(VMAND_VL)
10647   NODE_NAME_CASE(VMOR_VL)
10648   NODE_NAME_CASE(VMXOR_VL)
10649   NODE_NAME_CASE(VMCLR_VL)
10650   NODE_NAME_CASE(VMSET_VL)
10651   NODE_NAME_CASE(VRGATHER_VX_VL)
10652   NODE_NAME_CASE(VRGATHER_VV_VL)
10653   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10654   NODE_NAME_CASE(VSEXT_VL)
10655   NODE_NAME_CASE(VZEXT_VL)
10656   NODE_NAME_CASE(VCPOP_VL)
10657   NODE_NAME_CASE(VLE_VL)
10658   NODE_NAME_CASE(VSE_VL)
10659   NODE_NAME_CASE(READ_CSR)
10660   NODE_NAME_CASE(WRITE_CSR)
10661   NODE_NAME_CASE(SWAP_CSR)
10662   }
10663   // clang-format on
10664   return nullptr;
10665 #undef NODE_NAME_CASE
10666 }
10667 
10668 /// getConstraintType - Given a constraint letter, return the type of
10669 /// constraint it is for this target.
10670 RISCVTargetLowering::ConstraintType
10671 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10672   if (Constraint.size() == 1) {
10673     switch (Constraint[0]) {
10674     default:
10675       break;
10676     case 'f':
10677       return C_RegisterClass;
10678     case 'I':
10679     case 'J':
10680     case 'K':
10681       return C_Immediate;
10682     case 'A':
10683       return C_Memory;
10684     case 'S': // A symbolic address
10685       return C_Other;
10686     }
10687   } else {
10688     if (Constraint == "vr" || Constraint == "vm")
10689       return C_RegisterClass;
10690   }
10691   return TargetLowering::getConstraintType(Constraint);
10692 }
10693 
10694 std::pair<unsigned, const TargetRegisterClass *>
10695 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10696                                                   StringRef Constraint,
10697                                                   MVT VT) const {
10698   // First, see if this is a constraint that directly corresponds to a
10699   // RISCV register class.
10700   if (Constraint.size() == 1) {
10701     switch (Constraint[0]) {
10702     case 'r':
10703       // TODO: Support fixed vectors up to XLen for P extension?
10704       if (VT.isVector())
10705         break;
10706       return std::make_pair(0U, &RISCV::GPRRegClass);
10707     case 'f':
10708       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10709         return std::make_pair(0U, &RISCV::FPR16RegClass);
10710       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10711         return std::make_pair(0U, &RISCV::FPR32RegClass);
10712       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10713         return std::make_pair(0U, &RISCV::FPR64RegClass);
10714       break;
10715     default:
10716       break;
10717     }
10718   } else if (Constraint == "vr") {
10719     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10720                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10721       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10722         return std::make_pair(0U, RC);
10723     }
10724   } else if (Constraint == "vm") {
10725     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10726       return std::make_pair(0U, &RISCV::VMV0RegClass);
10727   }
10728 
10729   // Clang will correctly decode the usage of register name aliases into their
10730   // official names. However, other frontends like `rustc` do not. This allows
10731   // users of these frontends to use the ABI names for registers in LLVM-style
10732   // register constraints.
10733   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10734                                .Case("{zero}", RISCV::X0)
10735                                .Case("{ra}", RISCV::X1)
10736                                .Case("{sp}", RISCV::X2)
10737                                .Case("{gp}", RISCV::X3)
10738                                .Case("{tp}", RISCV::X4)
10739                                .Case("{t0}", RISCV::X5)
10740                                .Case("{t1}", RISCV::X6)
10741                                .Case("{t2}", RISCV::X7)
10742                                .Cases("{s0}", "{fp}", RISCV::X8)
10743                                .Case("{s1}", RISCV::X9)
10744                                .Case("{a0}", RISCV::X10)
10745                                .Case("{a1}", RISCV::X11)
10746                                .Case("{a2}", RISCV::X12)
10747                                .Case("{a3}", RISCV::X13)
10748                                .Case("{a4}", RISCV::X14)
10749                                .Case("{a5}", RISCV::X15)
10750                                .Case("{a6}", RISCV::X16)
10751                                .Case("{a7}", RISCV::X17)
10752                                .Case("{s2}", RISCV::X18)
10753                                .Case("{s3}", RISCV::X19)
10754                                .Case("{s4}", RISCV::X20)
10755                                .Case("{s5}", RISCV::X21)
10756                                .Case("{s6}", RISCV::X22)
10757                                .Case("{s7}", RISCV::X23)
10758                                .Case("{s8}", RISCV::X24)
10759                                .Case("{s9}", RISCV::X25)
10760                                .Case("{s10}", RISCV::X26)
10761                                .Case("{s11}", RISCV::X27)
10762                                .Case("{t3}", RISCV::X28)
10763                                .Case("{t4}", RISCV::X29)
10764                                .Case("{t5}", RISCV::X30)
10765                                .Case("{t6}", RISCV::X31)
10766                                .Default(RISCV::NoRegister);
10767   if (XRegFromAlias != RISCV::NoRegister)
10768     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10769 
10770   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10771   // TableGen record rather than the AsmName to choose registers for InlineAsm
10772   // constraints, plus we want to match those names to the widest floating point
10773   // register type available, manually select floating point registers here.
10774   //
10775   // The second case is the ABI name of the register, so that frontends can also
10776   // use the ABI names in register constraint lists.
10777   if (Subtarget.hasStdExtF()) {
10778     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10779                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10780                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10781                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10782                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10783                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10784                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10785                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10786                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10787                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10788                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10789                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10790                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10791                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10792                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10793                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
10794                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
10795                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
10796                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
10797                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
10798                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
10799                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
10800                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
10801                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
10802                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
10803                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
10804                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
10805                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
10806                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
10807                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
10808                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
10809                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
10810                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
10811                         .Default(RISCV::NoRegister);
10812     if (FReg != RISCV::NoRegister) {
10813       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
10814       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
10815         unsigned RegNo = FReg - RISCV::F0_F;
10816         unsigned DReg = RISCV::F0_D + RegNo;
10817         return std::make_pair(DReg, &RISCV::FPR64RegClass);
10818       }
10819       if (VT == MVT::f32 || VT == MVT::Other)
10820         return std::make_pair(FReg, &RISCV::FPR32RegClass);
10821       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
10822         unsigned RegNo = FReg - RISCV::F0_F;
10823         unsigned HReg = RISCV::F0_H + RegNo;
10824         return std::make_pair(HReg, &RISCV::FPR16RegClass);
10825       }
10826     }
10827   }
10828 
10829   if (Subtarget.hasVInstructions()) {
10830     Register VReg = StringSwitch<Register>(Constraint.lower())
10831                         .Case("{v0}", RISCV::V0)
10832                         .Case("{v1}", RISCV::V1)
10833                         .Case("{v2}", RISCV::V2)
10834                         .Case("{v3}", RISCV::V3)
10835                         .Case("{v4}", RISCV::V4)
10836                         .Case("{v5}", RISCV::V5)
10837                         .Case("{v6}", RISCV::V6)
10838                         .Case("{v7}", RISCV::V7)
10839                         .Case("{v8}", RISCV::V8)
10840                         .Case("{v9}", RISCV::V9)
10841                         .Case("{v10}", RISCV::V10)
10842                         .Case("{v11}", RISCV::V11)
10843                         .Case("{v12}", RISCV::V12)
10844                         .Case("{v13}", RISCV::V13)
10845                         .Case("{v14}", RISCV::V14)
10846                         .Case("{v15}", RISCV::V15)
10847                         .Case("{v16}", RISCV::V16)
10848                         .Case("{v17}", RISCV::V17)
10849                         .Case("{v18}", RISCV::V18)
10850                         .Case("{v19}", RISCV::V19)
10851                         .Case("{v20}", RISCV::V20)
10852                         .Case("{v21}", RISCV::V21)
10853                         .Case("{v22}", RISCV::V22)
10854                         .Case("{v23}", RISCV::V23)
10855                         .Case("{v24}", RISCV::V24)
10856                         .Case("{v25}", RISCV::V25)
10857                         .Case("{v26}", RISCV::V26)
10858                         .Case("{v27}", RISCV::V27)
10859                         .Case("{v28}", RISCV::V28)
10860                         .Case("{v29}", RISCV::V29)
10861                         .Case("{v30}", RISCV::V30)
10862                         .Case("{v31}", RISCV::V31)
10863                         .Default(RISCV::NoRegister);
10864     if (VReg != RISCV::NoRegister) {
10865       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
10866         return std::make_pair(VReg, &RISCV::VMRegClass);
10867       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
10868         return std::make_pair(VReg, &RISCV::VRRegClass);
10869       for (const auto *RC :
10870            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10871         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
10872           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
10873           return std::make_pair(VReg, RC);
10874         }
10875       }
10876     }
10877   }
10878 
10879   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10880 }
10881 
10882 unsigned
10883 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
10884   // Currently only support length 1 constraints.
10885   if (ConstraintCode.size() == 1) {
10886     switch (ConstraintCode[0]) {
10887     case 'A':
10888       return InlineAsm::Constraint_A;
10889     default:
10890       break;
10891     }
10892   }
10893 
10894   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
10895 }
10896 
10897 void RISCVTargetLowering::LowerAsmOperandForConstraint(
10898     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
10899     SelectionDAG &DAG) const {
10900   // Currently only support length 1 constraints.
10901   if (Constraint.length() == 1) {
10902     switch (Constraint[0]) {
10903     case 'I':
10904       // Validate & create a 12-bit signed immediate operand.
10905       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10906         uint64_t CVal = C->getSExtValue();
10907         if (isInt<12>(CVal))
10908           Ops.push_back(
10909               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10910       }
10911       return;
10912     case 'J':
10913       // Validate & create an integer zero operand.
10914       if (auto *C = dyn_cast<ConstantSDNode>(Op))
10915         if (C->getZExtValue() == 0)
10916           Ops.push_back(
10917               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
10918       return;
10919     case 'K':
10920       // Validate & create a 5-bit unsigned immediate operand.
10921       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10922         uint64_t CVal = C->getZExtValue();
10923         if (isUInt<5>(CVal))
10924           Ops.push_back(
10925               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10926       }
10927       return;
10928     case 'S':
10929       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10930         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
10931                                                  GA->getValueType(0)));
10932       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
10933         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
10934                                                 BA->getValueType(0)));
10935       }
10936       return;
10937     default:
10938       break;
10939     }
10940   }
10941   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
10942 }
10943 
10944 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
10945                                                    Instruction *Inst,
10946                                                    AtomicOrdering Ord) const {
10947   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
10948     return Builder.CreateFence(Ord);
10949   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
10950     return Builder.CreateFence(AtomicOrdering::Release);
10951   return nullptr;
10952 }
10953 
10954 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
10955                                                     Instruction *Inst,
10956                                                     AtomicOrdering Ord) const {
10957   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
10958     return Builder.CreateFence(AtomicOrdering::Acquire);
10959   return nullptr;
10960 }
10961 
10962 TargetLowering::AtomicExpansionKind
10963 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
10964   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
10965   // point operations can't be used in an lr/sc sequence without breaking the
10966   // forward-progress guarantee.
10967   if (AI->isFloatingPointOperation())
10968     return AtomicExpansionKind::CmpXChg;
10969 
10970   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
10971   if (Size == 8 || Size == 16)
10972     return AtomicExpansionKind::MaskedIntrinsic;
10973   return AtomicExpansionKind::None;
10974 }
10975 
10976 static Intrinsic::ID
10977 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
10978   if (XLen == 32) {
10979     switch (BinOp) {
10980     default:
10981       llvm_unreachable("Unexpected AtomicRMW BinOp");
10982     case AtomicRMWInst::Xchg:
10983       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
10984     case AtomicRMWInst::Add:
10985       return Intrinsic::riscv_masked_atomicrmw_add_i32;
10986     case AtomicRMWInst::Sub:
10987       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
10988     case AtomicRMWInst::Nand:
10989       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
10990     case AtomicRMWInst::Max:
10991       return Intrinsic::riscv_masked_atomicrmw_max_i32;
10992     case AtomicRMWInst::Min:
10993       return Intrinsic::riscv_masked_atomicrmw_min_i32;
10994     case AtomicRMWInst::UMax:
10995       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
10996     case AtomicRMWInst::UMin:
10997       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
10998     }
10999   }
11000 
11001   if (XLen == 64) {
11002     switch (BinOp) {
11003     default:
11004       llvm_unreachable("Unexpected AtomicRMW BinOp");
11005     case AtomicRMWInst::Xchg:
11006       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11007     case AtomicRMWInst::Add:
11008       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11009     case AtomicRMWInst::Sub:
11010       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11011     case AtomicRMWInst::Nand:
11012       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11013     case AtomicRMWInst::Max:
11014       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11015     case AtomicRMWInst::Min:
11016       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11017     case AtomicRMWInst::UMax:
11018       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11019     case AtomicRMWInst::UMin:
11020       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11021     }
11022   }
11023 
11024   llvm_unreachable("Unexpected XLen\n");
11025 }
11026 
11027 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11028     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11029     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11030   unsigned XLen = Subtarget.getXLen();
11031   Value *Ordering =
11032       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11033   Type *Tys[] = {AlignedAddr->getType()};
11034   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11035       AI->getModule(),
11036       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11037 
11038   if (XLen == 64) {
11039     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11040     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11041     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11042   }
11043 
11044   Value *Result;
11045 
11046   // Must pass the shift amount needed to sign extend the loaded value prior
11047   // to performing a signed comparison for min/max. ShiftAmt is the number of
11048   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11049   // is the number of bits to left+right shift the value in order to
11050   // sign-extend.
11051   if (AI->getOperation() == AtomicRMWInst::Min ||
11052       AI->getOperation() == AtomicRMWInst::Max) {
11053     const DataLayout &DL = AI->getModule()->getDataLayout();
11054     unsigned ValWidth =
11055         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11056     Value *SextShamt =
11057         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11058     Result = Builder.CreateCall(LrwOpScwLoop,
11059                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11060   } else {
11061     Result =
11062         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11063   }
11064 
11065   if (XLen == 64)
11066     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11067   return Result;
11068 }
11069 
11070 TargetLowering::AtomicExpansionKind
11071 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11072     AtomicCmpXchgInst *CI) const {
11073   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11074   if (Size == 8 || Size == 16)
11075     return AtomicExpansionKind::MaskedIntrinsic;
11076   return AtomicExpansionKind::None;
11077 }
11078 
11079 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11080     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11081     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11082   unsigned XLen = Subtarget.getXLen();
11083   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11084   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11085   if (XLen == 64) {
11086     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11087     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11088     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11089     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11090   }
11091   Type *Tys[] = {AlignedAddr->getType()};
11092   Function *MaskedCmpXchg =
11093       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11094   Value *Result = Builder.CreateCall(
11095       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11096   if (XLen == 64)
11097     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11098   return Result;
11099 }
11100 
11101 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11102   return false;
11103 }
11104 
11105 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11106                                                EVT VT) const {
11107   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11108     return false;
11109 
11110   switch (FPVT.getSimpleVT().SimpleTy) {
11111   case MVT::f16:
11112     return Subtarget.hasStdExtZfh();
11113   case MVT::f32:
11114     return Subtarget.hasStdExtF();
11115   case MVT::f64:
11116     return Subtarget.hasStdExtD();
11117   default:
11118     return false;
11119   }
11120 }
11121 
11122 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11123   // If we are using the small code model, we can reduce size of jump table
11124   // entry to 4 bytes.
11125   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11126       getTargetMachine().getCodeModel() == CodeModel::Small) {
11127     return MachineJumpTableInfo::EK_Custom32;
11128   }
11129   return TargetLowering::getJumpTableEncoding();
11130 }
11131 
11132 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11133     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11134     unsigned uid, MCContext &Ctx) const {
11135   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11136          getTargetMachine().getCodeModel() == CodeModel::Small);
11137   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11138 }
11139 
11140 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11141                                                      EVT VT) const {
11142   VT = VT.getScalarType();
11143 
11144   if (!VT.isSimple())
11145     return false;
11146 
11147   switch (VT.getSimpleVT().SimpleTy) {
11148   case MVT::f16:
11149     return Subtarget.hasStdExtZfh();
11150   case MVT::f32:
11151     return Subtarget.hasStdExtF();
11152   case MVT::f64:
11153     return Subtarget.hasStdExtD();
11154   default:
11155     break;
11156   }
11157 
11158   return false;
11159 }
11160 
11161 Register RISCVTargetLowering::getExceptionPointerRegister(
11162     const Constant *PersonalityFn) const {
11163   return RISCV::X10;
11164 }
11165 
11166 Register RISCVTargetLowering::getExceptionSelectorRegister(
11167     const Constant *PersonalityFn) const {
11168   return RISCV::X11;
11169 }
11170 
11171 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11172   // Return false to suppress the unnecessary extensions if the LibCall
11173   // arguments or return value is f32 type for LP64 ABI.
11174   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11175   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11176     return false;
11177 
11178   return true;
11179 }
11180 
11181 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11182   if (Subtarget.is64Bit() && Type == MVT::i32)
11183     return true;
11184 
11185   return IsSigned;
11186 }
11187 
11188 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11189                                                  SDValue C) const {
11190   // Check integral scalar types.
11191   if (VT.isScalarInteger()) {
11192     // Omit the optimization if the sub target has the M extension and the data
11193     // size exceeds XLen.
11194     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11195       return false;
11196     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11197       // Break the MUL to a SLLI and an ADD/SUB.
11198       const APInt &Imm = ConstNode->getAPIntValue();
11199       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11200           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11201         return true;
11202       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11203       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11204           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11205            (Imm - 8).isPowerOf2()))
11206         return true;
11207       // Omit the following optimization if the sub target has the M extension
11208       // and the data size >= XLen.
11209       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11210         return false;
11211       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11212       // a pair of LUI/ADDI.
11213       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11214         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11215         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11216             (1 - ImmS).isPowerOf2())
11217         return true;
11218       }
11219     }
11220   }
11221 
11222   return false;
11223 }
11224 
11225 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11226                                                       SDValue ConstNode) const {
11227   // Let the DAGCombiner decide for vectors.
11228   EVT VT = AddNode.getValueType();
11229   if (VT.isVector())
11230     return true;
11231 
11232   // Let the DAGCombiner decide for larger types.
11233   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11234     return true;
11235 
11236   // It is worse if c1 is simm12 while c1*c2 is not.
11237   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11238   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11239   const APInt &C1 = C1Node->getAPIntValue();
11240   const APInt &C2 = C2Node->getAPIntValue();
11241   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11242     return false;
11243 
11244   // Default to true and let the DAGCombiner decide.
11245   return true;
11246 }
11247 
11248 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11249     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11250     bool *Fast) const {
11251   if (!VT.isVector())
11252     return false;
11253 
11254   EVT ElemVT = VT.getVectorElementType();
11255   if (Alignment >= ElemVT.getStoreSize()) {
11256     if (Fast)
11257       *Fast = true;
11258     return true;
11259   }
11260 
11261   return false;
11262 }
11263 
11264 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11265     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11266     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11267   bool IsABIRegCopy = CC.hasValue();
11268   EVT ValueVT = Val.getValueType();
11269   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11270     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11271     // and cast to f32.
11272     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11273     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11274     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11275                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11276     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11277     Parts[0] = Val;
11278     return true;
11279   }
11280 
11281   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11282     LLVMContext &Context = *DAG.getContext();
11283     EVT ValueEltVT = ValueVT.getVectorElementType();
11284     EVT PartEltVT = PartVT.getVectorElementType();
11285     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11286     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11287     if (PartVTBitSize % ValueVTBitSize == 0) {
11288       assert(PartVTBitSize >= ValueVTBitSize);
11289       // If the element types are different, bitcast to the same element type of
11290       // PartVT first.
11291       // Give an example here, we want copy a <vscale x 1 x i8> value to
11292       // <vscale x 4 x i16>.
11293       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11294       // subvector, then we can bitcast to <vscale x 4 x i16>.
11295       if (ValueEltVT != PartEltVT) {
11296         if (PartVTBitSize > ValueVTBitSize) {
11297           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11298           assert(Count != 0 && "The number of element should not be zero.");
11299           EVT SameEltTypeVT =
11300               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11301           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11302                             DAG.getUNDEF(SameEltTypeVT), Val,
11303                             DAG.getVectorIdxConstant(0, DL));
11304         }
11305         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11306       } else {
11307         Val =
11308             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11309                         Val, DAG.getVectorIdxConstant(0, DL));
11310       }
11311       Parts[0] = Val;
11312       return true;
11313     }
11314   }
11315   return false;
11316 }
11317 
11318 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11319     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11320     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11321   bool IsABIRegCopy = CC.hasValue();
11322   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11323     SDValue Val = Parts[0];
11324 
11325     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11326     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11327     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11328     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11329     return Val;
11330   }
11331 
11332   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11333     LLVMContext &Context = *DAG.getContext();
11334     SDValue Val = Parts[0];
11335     EVT ValueEltVT = ValueVT.getVectorElementType();
11336     EVT PartEltVT = PartVT.getVectorElementType();
11337     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11338     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11339     if (PartVTBitSize % ValueVTBitSize == 0) {
11340       assert(PartVTBitSize >= ValueVTBitSize);
11341       EVT SameEltTypeVT = ValueVT;
11342       // If the element types are different, convert it to the same element type
11343       // of PartVT.
11344       // Give an example here, we want copy a <vscale x 1 x i8> value from
11345       // <vscale x 4 x i16>.
11346       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11347       // then we can extract <vscale x 1 x i8>.
11348       if (ValueEltVT != PartEltVT) {
11349         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11350         assert(Count != 0 && "The number of element should not be zero.");
11351         SameEltTypeVT =
11352             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11353         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11354       }
11355       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11356                         DAG.getVectorIdxConstant(0, DL));
11357       return Val;
11358     }
11359   }
11360   return SDValue();
11361 }
11362 
11363 SDValue
11364 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11365                                    SelectionDAG &DAG,
11366                                    SmallVectorImpl<SDNode *> &Created) const {
11367   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11368   if (isIntDivCheap(N->getValueType(0), Attr))
11369     return SDValue(N, 0); // Lower SDIV as SDIV
11370 
11371   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11372          "Unexpected divisor!");
11373 
11374   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11375   if (!Subtarget.hasStdExtZbt())
11376     return SDValue();
11377 
11378   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11379   // Besides, more critical path instructions will be generated when dividing
11380   // by 2. So we keep using the original DAGs for these cases.
11381   unsigned Lg2 = Divisor.countTrailingZeros();
11382   if (Lg2 == 1 || Lg2 >= 12)
11383     return SDValue();
11384 
11385   // fold (sdiv X, pow2)
11386   EVT VT = N->getValueType(0);
11387   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11388     return SDValue();
11389 
11390   SDLoc DL(N);
11391   SDValue N0 = N->getOperand(0);
11392   SDValue Zero = DAG.getConstant(0, DL, VT);
11393   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11394 
11395   // Add (N0 < 0) ? Pow2 - 1 : 0;
11396   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11397   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11398   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11399 
11400   Created.push_back(Cmp.getNode());
11401   Created.push_back(Add.getNode());
11402   Created.push_back(Sel.getNode());
11403 
11404   // Divide by pow2.
11405   SDValue SRA =
11406       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11407 
11408   // If we're dividing by a positive value, we're done.  Otherwise, we must
11409   // negate the result.
11410   if (Divisor.isNonNegative())
11411     return SRA;
11412 
11413   Created.push_back(SRA.getNode());
11414   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11415 }
11416 
11417 #define GET_REGISTER_MATCHER
11418 #include "RISCVGenAsmMatcher.inc"
11419 
11420 Register
11421 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11422                                        const MachineFunction &MF) const {
11423   Register Reg = MatchRegisterAltName(RegName);
11424   if (Reg == RISCV::NoRegister)
11425     Reg = MatchRegisterName(RegName);
11426   if (Reg == RISCV::NoRegister)
11427     report_fatal_error(
11428         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11429   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11430   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11431     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11432                              StringRef(RegName) + "\"."));
11433   return Reg;
11434 }
11435 
11436 namespace llvm {
11437 namespace RISCVVIntrinsicsTable {
11438 
11439 #define GET_RISCVVIntrinsicsTable_IMPL
11440 #include "RISCVGenSearchableTables.inc"
11441 
11442 } // namespace RISCVVIntrinsicsTable
11443 
11444 } // namespace llvm
11445