1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       // Disable the smallest fractional LMUL types if ELEN is less than
116       // RVVBitsPerBlock.
117       unsigned MinElts = RISCV::RVVBitsPerBlock / Subtarget.getELEN();
118       if (VT.getVectorMinNumElements() < MinElts)
119         return;
120 
121       unsigned Size = VT.getSizeInBits().getKnownMinValue();
122       const TargetRegisterClass *RC;
123       if (Size <= RISCV::RVVBitsPerBlock)
124         RC = &RISCV::VRRegClass;
125       else if (Size == 2 * RISCV::RVVBitsPerBlock)
126         RC = &RISCV::VRM2RegClass;
127       else if (Size == 4 * RISCV::RVVBitsPerBlock)
128         RC = &RISCV::VRM4RegClass;
129       else if (Size == 8 * RISCV::RVVBitsPerBlock)
130         RC = &RISCV::VRM8RegClass;
131       else
132         llvm_unreachable("Unexpected size");
133 
134       addRegisterClass(VT, RC);
135     };
136 
137     for (MVT VT : BoolVecVTs)
138       addRegClassForRVV(VT);
139     for (MVT VT : IntVecVTs) {
140       if (VT.getVectorElementType() == MVT::i64 &&
141           !Subtarget.hasVInstructionsI64())
142         continue;
143       addRegClassForRVV(VT);
144     }
145 
146     if (Subtarget.hasVInstructionsF16())
147       for (MVT VT : F16VecVTs)
148         addRegClassForRVV(VT);
149 
150     if (Subtarget.hasVInstructionsF32())
151       for (MVT VT : F32VecVTs)
152         addRegClassForRVV(VT);
153 
154     if (Subtarget.hasVInstructionsF64())
155       for (MVT VT : F64VecVTs)
156         addRegClassForRVV(VT);
157 
158     if (Subtarget.useRVVForFixedLengthVectors()) {
159       auto addRegClassForFixedVectors = [this](MVT VT) {
160         MVT ContainerVT = getContainerForFixedLengthVector(VT);
161         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
162         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
163         addRegisterClass(VT, TRI.getRegClass(RCID));
164       };
165       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
166         if (useRVVForFixedLengthVectorVT(VT))
167           addRegClassForFixedVectors(VT);
168 
169       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
170         if (useRVVForFixedLengthVectorVT(VT))
171           addRegClassForFixedVectors(VT);
172     }
173   }
174 
175   // Compute derived properties from the register classes.
176   computeRegisterProperties(STI.getRegisterInfo());
177 
178   setStackPointerRegisterToSaveRestore(RISCV::X2);
179 
180   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
181                    MVT::i1, Promote);
182 
183   // TODO: add all necessary setOperationAction calls.
184   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
185 
186   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
187   setOperationAction(ISD::BR_CC, XLenVT, Expand);
188   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
189   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
190 
191   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
192 
193   setOperationAction(ISD::VASTART, MVT::Other, Custom);
194   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
195 
196   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
197 
198   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
199 
200   if (!Subtarget.hasStdExtZbb())
201     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
202 
203   if (Subtarget.is64Bit()) {
204     setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
205 
206     setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
207                        MVT::i32, Custom);
208 
209     setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
210                        MVT::i32, Custom);
211   } else {
212     setLibcallName(
213         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
214         nullptr);
215     setLibcallName(RTLIB::MULO_I64, nullptr);
216   }
217 
218   if (!Subtarget.hasStdExtM()) {
219     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV,
220                         ISD::SREM, ISD::UREM},
221                        XLenVT, Expand);
222   } else {
223     if (Subtarget.is64Bit()) {
224       setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
225 
226       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
227                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
228     } else {
229       setOperationAction(ISD::MUL, MVT::i64, Custom);
230     }
231   }
232 
233   setOperationAction(
234       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
235       Expand);
236 
237   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
238                      Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
241       Subtarget.hasStdExtZbkb()) {
242     if (Subtarget.is64Bit())
243       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
244   } else {
245     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
246   }
247 
248   if (Subtarget.hasStdExtZbp()) {
249     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
250     // more combining.
251     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
252 
253     // BSWAP i8 doesn't exist.
254     setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
255 
256     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
257 
258     if (Subtarget.is64Bit())
259       setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
260   } else {
261     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
262     // pattern match it directly in isel.
263     setOperationAction(ISD::BSWAP, XLenVT,
264                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
265                            ? Legal
266                            : Expand);
267     // Zbkb can use rev8+brev8 to implement bitreverse.
268     setOperationAction(ISD::BITREVERSE, XLenVT,
269                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
270   }
271 
272   if (Subtarget.hasStdExtZbb()) {
273     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
274                        Legal);
275 
276     if (Subtarget.is64Bit())
277       setOperationAction(
278           {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
279           MVT::i32, Custom);
280   } else {
281     setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
282 
283     if (Subtarget.is64Bit())
284       setOperationAction(ISD::ABS, MVT::i32, Custom);
285   }
286 
287   if (Subtarget.hasStdExtZbt()) {
288     setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
289     setOperationAction(ISD::SELECT, XLenVT, Legal);
290 
291     if (Subtarget.is64Bit())
292       setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
293   } else {
294     setOperationAction(ISD::SELECT, XLenVT, Custom);
295   }
296 
297   static const unsigned FPLegalNodeTypes[] = {
298       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
299       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
300       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
301       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
302       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
303       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
304 
305   static const ISD::CondCode FPCCToExpand[] = {
306       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
307       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
308       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
309 
310   static const unsigned FPOpToExpand[] = {
311       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
312       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
313 
314   if (Subtarget.hasStdExtZfh())
315     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
316 
317   if (Subtarget.hasStdExtZfh()) {
318     setOperationAction(FPLegalNodeTypes, MVT::f16, Legal);
319     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
320     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
321     setCondCodeAction(FPCCToExpand, MVT::f16, Expand);
322     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
323     setOperationAction(ISD::SELECT, MVT::f16, Custom);
324     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
325 
326     setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT,
327                         ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
328                         ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN,
329                         ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG,
330                         ISD::FLOG2, ISD::FLOG10},
331                        MVT::f16, Promote);
332 
333     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
334     // complete support for all operations in LegalizeDAG.
335 
336     // We need to custom promote this.
337     if (Subtarget.is64Bit())
338       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
339   }
340 
341   if (Subtarget.hasStdExtF()) {
342     setOperationAction(FPLegalNodeTypes, MVT::f32, Legal);
343     setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
344     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
345     setOperationAction(ISD::SELECT, MVT::f32, Custom);
346     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
347     setOperationAction(FPOpToExpand, MVT::f32, Expand);
348     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
349     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
350   }
351 
352   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
353     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
354 
355   if (Subtarget.hasStdExtD()) {
356     setOperationAction(FPLegalNodeTypes, MVT::f64, Legal);
357     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
358     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
359     setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
360     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
361     setOperationAction(ISD::SELECT, MVT::f64, Custom);
362     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
363     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
364     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
365     setOperationAction(FPOpToExpand, MVT::f64, Expand);
366     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
367     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
368   }
369 
370   if (Subtarget.is64Bit())
371     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
372                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
373                        MVT::i32, Custom);
374 
375   if (Subtarget.hasStdExtF()) {
376     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
377                        Custom);
378 
379     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
380                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
381                        XLenVT, Legal);
382 
383     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
384     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
385   }
386 
387   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
388                       ISD::JumpTable},
389                      XLenVT, Custom);
390 
391   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
392 
393   if (Subtarget.is64Bit())
394     setOperationAction(ISD::Constant, MVT::i64, Custom);
395 
396   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
397   // Unfortunately this can't be determined just from the ISA naming string.
398   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
399                      Subtarget.is64Bit() ? Legal : Custom);
400 
401   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
402   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
403   if (Subtarget.is64Bit())
404     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
405 
406   if (Subtarget.hasStdExtA()) {
407     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
408     setMinCmpXchgSizeInBits(32);
409   } else {
410     setMaxAtomicSizeInBitsSupported(0);
411   }
412 
413   setBooleanContents(ZeroOrOneBooleanContent);
414 
415   if (Subtarget.hasVInstructions()) {
416     setBooleanVectorContents(ZeroOrOneBooleanContent);
417 
418     setOperationAction(ISD::VSCALE, XLenVT, Custom);
419 
420     // RVV intrinsics may have illegal operands.
421     // We also need to custom legalize vmv.x.s.
422     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
423                        {MVT::i8, MVT::i16}, Custom);
424     if (Subtarget.is64Bit())
425       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
426     else
427       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
428                          MVT::i64, Custom);
429 
430     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
431                        MVT::Other, Custom);
432 
433     static const unsigned IntegerVPOps[] = {
434         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
435         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
436         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
437         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
438         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
439         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
440         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
441         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
442         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SIGN_EXTEND,
443         ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE};
444 
445     static const unsigned FloatingPointVPOps[] = {
446         ISD::VP_FADD,        ISD::VP_FSUB,
447         ISD::VP_FMUL,        ISD::VP_FDIV,
448         ISD::VP_FNEG,        ISD::VP_FMA,
449         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
450         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
451         ISD::VP_MERGE,       ISD::VP_SELECT,
452         ISD::VP_SITOFP,      ISD::VP_UITOFP,
453         ISD::VP_SETCC,       ISD::VP_FP_ROUND,
454         ISD::VP_FP_EXTEND};
455 
456     static const unsigned IntegerVecReduceOps[] = {
457         ISD::VECREDUCE_ADD,  ISD::VECREDUCE_AND,  ISD::VECREDUCE_OR,
458         ISD::VECREDUCE_XOR,  ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
459         ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN};
460 
461     static const unsigned FloatingPointVecReduceOps[] = {
462         ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, ISD::VECREDUCE_FMIN,
463         ISD::VECREDUCE_FMAX};
464 
465     if (!Subtarget.is64Bit()) {
466       // We must custom-lower certain vXi64 operations on RV32 due to the vector
467       // element type being illegal.
468       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
469                          MVT::i64, Custom);
470 
471       setOperationAction(IntegerVecReduceOps, MVT::i64, Custom);
472 
473       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
474                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
475                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
476                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
477                          MVT::i64, Custom);
478     }
479 
480     for (MVT VT : BoolVecVTs) {
481       if (!isTypeLegal(VT))
482         continue;
483 
484       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
485 
486       // Mask VTs are custom-expanded into a series of standard nodes
487       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
488                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
489                          VT, Custom);
490 
491       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
492                          Custom);
493 
494       setOperationAction(ISD::SELECT, VT, Custom);
495       setOperationAction(
496           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
497           Expand);
498 
499       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
500 
501       setOperationAction(
502           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
503           Custom);
504 
505       setOperationAction(
506           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
507           Custom);
508 
509       // RVV has native int->float & float->int conversions where the
510       // element type sizes are within one power-of-two of each other. Any
511       // wider distances between type sizes have to be lowered as sequences
512       // which progressively narrow the gap in stages.
513       setOperationAction(
514           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
515           VT, Custom);
516 
517       // Expand all extending loads to types larger than this, and truncating
518       // stores from types larger than this.
519       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
520         setTruncStoreAction(OtherVT, VT, Expand);
521         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
522                          VT, Expand);
523       }
524 
525       setOperationAction(
526           {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT,
527           Custom);
528       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
529 
530       setOperationPromotedToType(
531           ISD::VECTOR_SPLICE, VT,
532           MVT::getVectorVT(MVT::i8, VT.getVectorElementCount()));
533     }
534 
535     for (MVT VT : IntVecVTs) {
536       if (!isTypeLegal(VT))
537         continue;
538 
539       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
540       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
541 
542       // Vectors implement MULHS/MULHU.
543       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
544 
545       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
546       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
547         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
548 
549       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
550                          Legal);
551 
552       setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
553 
554       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT,
555                          Expand);
556 
557       setOperationAction(ISD::BSWAP, VT, Expand);
558 
559       // Custom-lower extensions and truncations from/to mask types.
560       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
561                          VT, Custom);
562 
563       // RVV has native int->float & float->int conversions where the
564       // element type sizes are within one power-of-two of each other. Any
565       // wider distances between type sizes have to be lowered as sequences
566       // which progressively narrow the gap in stages.
567       setOperationAction(
568           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
569           VT, Custom);
570 
571       setOperationAction(
572           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
573 
574       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
575       // nodes which truncate by one power of two at a time.
576       setOperationAction(ISD::TRUNCATE, VT, Custom);
577 
578       // Custom-lower insert/extract operations to simplify patterns.
579       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
580                          Custom);
581 
582       // Custom-lower reduction operations to set up the corresponding custom
583       // nodes' operands.
584       setOperationAction(IntegerVecReduceOps, VT, Custom);
585 
586       setOperationAction(IntegerVPOps, VT, Custom);
587 
588       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
589 
590       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
591                          VT, Custom);
592 
593       setOperationAction(
594           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
595           Custom);
596 
597       setOperationAction(
598           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
599           VT, Custom);
600 
601       setOperationAction(ISD::SELECT, VT, Custom);
602       setOperationAction(ISD::SELECT_CC, VT, Expand);
603 
604       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
605 
606       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
607         setTruncStoreAction(VT, OtherVT, Expand);
608         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
609                          VT, Expand);
610       }
611 
612       // Splice
613       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
614 
615       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
616       // type that can represent the value exactly.
617       if (VT.getVectorElementType() != MVT::i64) {
618         MVT FloatEltVT =
619             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
620         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
621         if (isTypeLegal(FloatVT)) {
622           setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
623                              Custom);
624         }
625       }
626     }
627 
628     // Expand various CCs to best match the RVV ISA, which natively supports UNE
629     // but no other unordered comparisons, and supports all ordered comparisons
630     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
631     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
632     // and we pattern-match those back to the "original", swapping operands once
633     // more. This way we catch both operations and both "vf" and "fv" forms with
634     // fewer patterns.
635     static const ISD::CondCode VFPCCToExpand[] = {
636         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
637         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
638         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
639     };
640 
641     // Sets common operation actions on RVV floating-point vector types.
642     const auto SetCommonVFPActions = [&](MVT VT) {
643       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
644       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
645       // sizes are within one power-of-two of each other. Therefore conversions
646       // between vXf16 and vXf64 must be lowered as sequences which convert via
647       // vXf32.
648       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
649       // Custom-lower insert/extract operations to simplify patterns.
650       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
651                          Custom);
652       // Expand various condition codes (explained above).
653       setCondCodeAction(VFPCCToExpand, VT, Expand);
654 
655       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
656 
657       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
658                          VT, Custom);
659 
660       setOperationAction(FloatingPointVecReduceOps, VT, Custom);
661 
662       // Expand FP operations that need libcalls.
663       setOperationAction(ISD::FREM, VT, Expand);
664       setOperationAction(ISD::FPOW, VT, Expand);
665       setOperationAction(ISD::FCOS, VT, Expand);
666       setOperationAction(ISD::FSIN, VT, Expand);
667       setOperationAction(ISD::FSINCOS, VT, Expand);
668       setOperationAction(ISD::FEXP, VT, Expand);
669       setOperationAction(ISD::FEXP2, VT, Expand);
670       setOperationAction(ISD::FLOG, VT, Expand);
671       setOperationAction(ISD::FLOG2, VT, Expand);
672       setOperationAction(ISD::FLOG10, VT, Expand);
673       setOperationAction(ISD::FRINT, VT, Expand);
674       setOperationAction(ISD::FNEARBYINT, VT, Expand);
675 
676       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
677       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
678       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
679       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
680 
681       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
682 
683       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
684 
685       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
686                          VT, Custom);
687 
688       setOperationAction(
689           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
690           Custom);
691 
692       setOperationAction(ISD::SELECT, VT, Custom);
693       setOperationAction(ISD::SELECT_CC, VT, Expand);
694 
695       setOperationAction(
696           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
697           VT, Custom);
698 
699       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
700 
701       setOperationAction(FloatingPointVPOps, VT, Custom);
702     };
703 
704     // Sets common extload/truncstore actions on RVV floating-point vector
705     // types.
706     const auto SetCommonVFPExtLoadTruncStoreActions =
707         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
708           for (auto SmallVT : SmallerVTs) {
709             setTruncStoreAction(VT, SmallVT, Expand);
710             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
711           }
712         };
713 
714     if (Subtarget.hasVInstructionsF16()) {
715       for (MVT VT : F16VecVTs) {
716         if (!isTypeLegal(VT))
717           continue;
718         SetCommonVFPActions(VT);
719       }
720     }
721 
722     if (Subtarget.hasVInstructionsF32()) {
723       for (MVT VT : F32VecVTs) {
724         if (!isTypeLegal(VT))
725           continue;
726         SetCommonVFPActions(VT);
727         SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
728       }
729     }
730 
731     if (Subtarget.hasVInstructionsF64()) {
732       for (MVT VT : F64VecVTs) {
733         if (!isTypeLegal(VT))
734           continue;
735         SetCommonVFPActions(VT);
736         SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
737         SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
738       }
739     }
740 
741     if (Subtarget.useRVVForFixedLengthVectors()) {
742       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
743         if (!useRVVForFixedLengthVectorVT(VT))
744           continue;
745 
746         // By default everything must be expanded.
747         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
748           setOperationAction(Op, VT, Expand);
749         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
750           setTruncStoreAction(VT, OtherVT, Expand);
751           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
752                            OtherVT, VT, Expand);
753         }
754 
755         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
756         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
757                            Custom);
758 
759         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
760                            Custom);
761 
762         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
763                            VT, Custom);
764 
765         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
766 
767         setOperationAction(ISD::SETCC, VT, Custom);
768 
769         setOperationAction(ISD::SELECT, VT, Custom);
770 
771         setOperationAction(ISD::TRUNCATE, VT, Custom);
772 
773         setOperationAction(ISD::BITCAST, VT, Custom);
774 
775         setOperationAction(
776             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
777             Custom);
778 
779         setOperationAction(
780             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
781             Custom);
782 
783         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
784                             ISD::FP_TO_UINT},
785                            VT, Custom);
786 
787         // Operations below are different for between masks and other vectors.
788         if (VT.getVectorElementType() == MVT::i1) {
789           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
790                               ISD::OR, ISD::XOR},
791                              VT, Custom);
792 
793           setOperationAction(
794               {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNCATE},
795               VT, Custom);
796           continue;
797         }
798 
799         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
800         // it before type legalization for i64 vectors on RV32. It will then be
801         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
802         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
803         // improvements first.
804         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
805           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
806           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
807         }
808 
809         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
810         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
811 
812         setOperationAction(
813             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
814 
815         setOperationAction(
816             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
817             Custom);
818 
819         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
820                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
821                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
822                            VT, Custom);
823 
824         setOperationAction(
825             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
826 
827         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
828         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
829           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
830 
831         setOperationAction(
832             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
833             Custom);
834 
835         setOperationAction(ISD::VSELECT, VT, Custom);
836         setOperationAction(ISD::SELECT_CC, VT, Expand);
837 
838         setOperationAction(
839             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
840 
841         // Custom-lower reduction operations to set up the corresponding custom
842         // nodes' operands.
843         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
844                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
845                             ISD::VECREDUCE_UMIN},
846                            VT, Custom);
847 
848         setOperationAction(IntegerVPOps, VT, Custom);
849 
850         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
851         // type that can represent the value exactly.
852         if (VT.getVectorElementType() != MVT::i64) {
853           MVT FloatEltVT =
854               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
855           EVT FloatVT =
856               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
857           if (isTypeLegal(FloatVT))
858             setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
859                                Custom);
860         }
861       }
862 
863       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
864         if (!useRVVForFixedLengthVectorVT(VT))
865           continue;
866 
867         // By default everything must be expanded.
868         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
869           setOperationAction(Op, VT, Expand);
870         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
871           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
872           setTruncStoreAction(VT, OtherVT, Expand);
873         }
874 
875         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
876         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
877                            Custom);
878 
879         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
880                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
881                             ISD::EXTRACT_VECTOR_ELT},
882                            VT, Custom);
883 
884         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
885                             ISD::MGATHER, ISD::MSCATTER},
886                            VT, Custom);
887 
888         setOperationAction(
889             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
890             Custom);
891 
892         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
893                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
894                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
895                            VT, Custom);
896 
897         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
898 
899         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
900                            VT, Custom);
901 
902         setCondCodeAction(VFPCCToExpand, VT, Expand);
903 
904         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
905         setOperationAction(ISD::SELECT_CC, VT, Expand);
906 
907         setOperationAction(ISD::BITCAST, VT, Custom);
908 
909         setOperationAction(FloatingPointVecReduceOps, VT, Custom);
910 
911         setOperationAction(FloatingPointVPOps, VT, Custom);
912       }
913 
914       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
915       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
916                          Custom);
917       if (Subtarget.hasStdExtZfh())
918         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
919       if (Subtarget.hasStdExtF())
920         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
921       if (Subtarget.hasStdExtD())
922         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
923     }
924   }
925 
926   // Function alignments.
927   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
928   setMinFunctionAlignment(FunctionAlignment);
929   setPrefFunctionAlignment(FunctionAlignment);
930 
931   setMinimumJumpTableEntries(5);
932 
933   // Jumps are expensive, compared to logic
934   setJumpIsExpensive();
935 
936   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
937                        ISD::OR, ISD::XOR});
938   if (Subtarget.is64Bit())
939     setTargetDAGCombine(ISD::SRA);
940 
941   if (Subtarget.hasStdExtF())
942     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
943 
944   if (Subtarget.hasStdExtZbp())
945     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
946 
947   if (Subtarget.hasStdExtZbb())
948     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
949 
950   if (Subtarget.hasStdExtZbkb())
951     setTargetDAGCombine(ISD::BITREVERSE);
952   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
953     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
954   if (Subtarget.hasStdExtF())
955     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
956                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
957   if (Subtarget.hasVInstructions())
958     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
959                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
960                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
961   if (Subtarget.useRVVForFixedLengthVectors())
962     setTargetDAGCombine(ISD::BITCAST);
963 
964   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
965   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
966 }
967 
968 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
969                                             LLVMContext &Context,
970                                             EVT VT) const {
971   if (!VT.isVector())
972     return getPointerTy(DL);
973   if (Subtarget.hasVInstructions() &&
974       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
975     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
976   return VT.changeVectorElementTypeToInteger();
977 }
978 
979 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
980   return Subtarget.getXLenVT();
981 }
982 
983 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
984                                              const CallInst &I,
985                                              MachineFunction &MF,
986                                              unsigned Intrinsic) const {
987   auto &DL = I.getModule()->getDataLayout();
988   switch (Intrinsic) {
989   default:
990     return false;
991   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
992   case Intrinsic::riscv_masked_atomicrmw_add_i32:
993   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
994   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
995   case Intrinsic::riscv_masked_atomicrmw_max_i32:
996   case Intrinsic::riscv_masked_atomicrmw_min_i32:
997   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
998   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
999   case Intrinsic::riscv_masked_cmpxchg_i32:
1000     Info.opc = ISD::INTRINSIC_W_CHAIN;
1001     Info.memVT = MVT::i32;
1002     Info.ptrVal = I.getArgOperand(0);
1003     Info.offset = 0;
1004     Info.align = Align(4);
1005     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1006                  MachineMemOperand::MOVolatile;
1007     return true;
1008   case Intrinsic::riscv_masked_strided_load:
1009     Info.opc = ISD::INTRINSIC_W_CHAIN;
1010     Info.ptrVal = I.getArgOperand(1);
1011     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1012     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1013     Info.size = MemoryLocation::UnknownSize;
1014     Info.flags |= MachineMemOperand::MOLoad;
1015     return true;
1016   case Intrinsic::riscv_masked_strided_store:
1017     Info.opc = ISD::INTRINSIC_VOID;
1018     Info.ptrVal = I.getArgOperand(1);
1019     Info.memVT =
1020         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1021     Info.align = Align(
1022         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1023         8);
1024     Info.size = MemoryLocation::UnknownSize;
1025     Info.flags |= MachineMemOperand::MOStore;
1026     return true;
1027   case Intrinsic::riscv_seg2_load:
1028   case Intrinsic::riscv_seg3_load:
1029   case Intrinsic::riscv_seg4_load:
1030   case Intrinsic::riscv_seg5_load:
1031   case Intrinsic::riscv_seg6_load:
1032   case Intrinsic::riscv_seg7_load:
1033   case Intrinsic::riscv_seg8_load:
1034     Info.opc = ISD::INTRINSIC_W_CHAIN;
1035     Info.ptrVal = I.getArgOperand(0);
1036     Info.memVT =
1037         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1038     Info.align =
1039         Align(DL.getTypeSizeInBits(
1040                   I.getType()->getStructElementType(0)->getScalarType()) /
1041               8);
1042     Info.size = MemoryLocation::UnknownSize;
1043     Info.flags |= MachineMemOperand::MOLoad;
1044     return true;
1045   }
1046 }
1047 
1048 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1049                                                 const AddrMode &AM, Type *Ty,
1050                                                 unsigned AS,
1051                                                 Instruction *I) const {
1052   // No global is ever allowed as a base.
1053   if (AM.BaseGV)
1054     return false;
1055 
1056   // RVV instructions only support register addressing.
1057   if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
1058     return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
1059 
1060   // Require a 12-bit signed offset.
1061   if (!isInt<12>(AM.BaseOffs))
1062     return false;
1063 
1064   switch (AM.Scale) {
1065   case 0: // "r+i" or just "i", depending on HasBaseReg.
1066     break;
1067   case 1:
1068     if (!AM.HasBaseReg) // allow "r+i".
1069       break;
1070     return false; // disallow "r+r" or "r+r+i".
1071   default:
1072     return false;
1073   }
1074 
1075   return true;
1076 }
1077 
1078 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1079   return isInt<12>(Imm);
1080 }
1081 
1082 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1083   return isInt<12>(Imm);
1084 }
1085 
1086 // On RV32, 64-bit integers are split into their high and low parts and held
1087 // in two different registers, so the trunc is free since the low register can
1088 // just be used.
1089 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1090   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1091     return false;
1092   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1093   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1094   return (SrcBits == 64 && DestBits == 32);
1095 }
1096 
1097 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1098   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1099       !SrcVT.isInteger() || !DstVT.isInteger())
1100     return false;
1101   unsigned SrcBits = SrcVT.getSizeInBits();
1102   unsigned DestBits = DstVT.getSizeInBits();
1103   return (SrcBits == 64 && DestBits == 32);
1104 }
1105 
1106 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1107   // Zexts are free if they can be combined with a load.
1108   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1109   // poorly with type legalization of compares preferring sext.
1110   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1111     EVT MemVT = LD->getMemoryVT();
1112     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1113         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1114          LD->getExtensionType() == ISD::ZEXTLOAD))
1115       return true;
1116   }
1117 
1118   return TargetLowering::isZExtFree(Val, VT2);
1119 }
1120 
1121 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1122   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1123 }
1124 
1125 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1126   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1127 }
1128 
1129 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1130   return Subtarget.hasStdExtZbb();
1131 }
1132 
1133 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1134   return Subtarget.hasStdExtZbb();
1135 }
1136 
1137 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1138   EVT VT = Y.getValueType();
1139 
1140   // FIXME: Support vectors once we have tests.
1141   if (VT.isVector())
1142     return false;
1143 
1144   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1145           Subtarget.hasStdExtZbkb()) &&
1146          !isa<ConstantSDNode>(Y);
1147 }
1148 
1149 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1150   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1151   auto *C = dyn_cast<ConstantSDNode>(Y);
1152   return C && C->getAPIntValue().ule(10);
1153 }
1154 
1155 bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
1156                                                             Type *Ty) const {
1157   assert(Ty->isIntegerTy());
1158 
1159   unsigned BitSize = Ty->getIntegerBitWidth();
1160   if (BitSize > Subtarget.getXLen())
1161     return false;
1162 
1163   // Fast path, assume 32-bit immediates are cheap.
1164   int64_t Val = Imm.getSExtValue();
1165   if (isInt<32>(Val))
1166     return true;
1167 
1168   // A constant pool entry may be more aligned thant he load we're trying to
1169   // replace. If we don't support unaligned scalar mem, prefer the constant
1170   // pool.
1171   // TODO: Can the caller pass down the alignment?
1172   if (!Subtarget.enableUnalignedScalarMem())
1173     return true;
1174 
1175   // Prefer to keep the load if it would require many instructions.
1176   // This uses the same threshold we use for constant pools but doesn't
1177   // check useConstantPoolForLargeInts.
1178   // TODO: Should we keep the load only when we're definitely going to emit a
1179   // constant pool?
1180 
1181   RISCVMatInt::InstSeq Seq =
1182       RISCVMatInt::generateInstSeq(Val, Subtarget.getFeatureBits());
1183   return Seq.size() <= Subtarget.getMaxBuildIntsCost();
1184 }
1185 
1186 bool RISCVTargetLowering::
1187     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
1188         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
1189         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
1190         SelectionDAG &DAG) const {
1191   // One interesting pattern that we'd want to form is 'bit extract':
1192   //   ((1 >> Y) & 1) ==/!= 0
1193   // But we also need to be careful not to try to reverse that fold.
1194 
1195   // Is this '((1 >> Y) & 1)'?
1196   if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
1197     return false; // Keep the 'bit extract' pattern.
1198 
1199   // Will this be '((1 >> Y) & 1)' after the transform?
1200   if (NewShiftOpcode == ISD::SRL && CC->isOne())
1201     return true; // Do form the 'bit extract' pattern.
1202 
1203   // If 'X' is a constant, and we transform, then we will immediately
1204   // try to undo the fold, thus causing endless combine loop.
1205   // So only do the transform if X is not a constant. This matches the default
1206   // implementation of this function.
1207   return !XC;
1208 }
1209 
1210 /// Check if sinking \p I's operands to I's basic block is profitable, because
1211 /// the operands can be folded into a target instruction, e.g.
1212 /// splats of scalars can fold into vector instructions.
1213 bool RISCVTargetLowering::shouldSinkOperands(
1214     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1215   using namespace llvm::PatternMatch;
1216 
1217   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1218     return false;
1219 
1220   auto IsSinker = [&](Instruction *I, int Operand) {
1221     switch (I->getOpcode()) {
1222     case Instruction::Add:
1223     case Instruction::Sub:
1224     case Instruction::Mul:
1225     case Instruction::And:
1226     case Instruction::Or:
1227     case Instruction::Xor:
1228     case Instruction::FAdd:
1229     case Instruction::FSub:
1230     case Instruction::FMul:
1231     case Instruction::FDiv:
1232     case Instruction::ICmp:
1233     case Instruction::FCmp:
1234       return true;
1235     case Instruction::Shl:
1236     case Instruction::LShr:
1237     case Instruction::AShr:
1238     case Instruction::UDiv:
1239     case Instruction::SDiv:
1240     case Instruction::URem:
1241     case Instruction::SRem:
1242       return Operand == 1;
1243     case Instruction::Call:
1244       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1245         switch (II->getIntrinsicID()) {
1246         case Intrinsic::fma:
1247         case Intrinsic::vp_fma:
1248           return Operand == 0 || Operand == 1;
1249         // FIXME: Our patterns can only match vx/vf instructions when the splat
1250         // it on the RHS, because TableGen doesn't recognize our VP operations
1251         // as commutative.
1252         case Intrinsic::vp_add:
1253         case Intrinsic::vp_mul:
1254         case Intrinsic::vp_and:
1255         case Intrinsic::vp_or:
1256         case Intrinsic::vp_xor:
1257         case Intrinsic::vp_fadd:
1258         case Intrinsic::vp_fmul:
1259         case Intrinsic::vp_shl:
1260         case Intrinsic::vp_lshr:
1261         case Intrinsic::vp_ashr:
1262         case Intrinsic::vp_udiv:
1263         case Intrinsic::vp_sdiv:
1264         case Intrinsic::vp_urem:
1265         case Intrinsic::vp_srem:
1266           return Operand == 1;
1267         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1268         // explicit patterns for both LHS and RHS (as 'vr' versions).
1269         case Intrinsic::vp_sub:
1270         case Intrinsic::vp_fsub:
1271         case Intrinsic::vp_fdiv:
1272           return Operand == 0 || Operand == 1;
1273         default:
1274           return false;
1275         }
1276       }
1277       return false;
1278     default:
1279       return false;
1280     }
1281   };
1282 
1283   for (auto OpIdx : enumerate(I->operands())) {
1284     if (!IsSinker(I, OpIdx.index()))
1285       continue;
1286 
1287     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1288     // Make sure we are not already sinking this operand
1289     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1290       continue;
1291 
1292     // We are looking for a splat that can be sunk.
1293     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1294                              m_Undef(), m_ZeroMask())))
1295       continue;
1296 
1297     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1298     // and vector registers
1299     for (Use &U : Op->uses()) {
1300       Instruction *Insn = cast<Instruction>(U.getUser());
1301       if (!IsSinker(Insn, U.getOperandNo()))
1302         return false;
1303     }
1304 
1305     Ops.push_back(&Op->getOperandUse(0));
1306     Ops.push_back(&OpIdx.value());
1307   }
1308   return true;
1309 }
1310 
1311 bool RISCVTargetLowering::isOffsetFoldingLegal(
1312     const GlobalAddressSDNode *GA) const {
1313   // In order to maximise the opportunity for common subexpression elimination,
1314   // keep a separate ADD node for the global address offset instead of folding
1315   // it in the global address node. Later peephole optimisations may choose to
1316   // fold it back in when profitable.
1317   return false;
1318 }
1319 
1320 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1321                                        bool ForCodeSize) const {
1322   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1323   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1324     return false;
1325   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1326     return false;
1327   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1328     return false;
1329   return Imm.isZero();
1330 }
1331 
1332 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1333   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1334          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1335          (VT == MVT::f64 && Subtarget.hasStdExtD());
1336 }
1337 
1338 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1339                                                       CallingConv::ID CC,
1340                                                       EVT VT) const {
1341   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1342   // We might still end up using a GPR but that will be decided based on ABI.
1343   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1344   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1345     return MVT::f32;
1346 
1347   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1348 }
1349 
1350 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1351                                                            CallingConv::ID CC,
1352                                                            EVT VT) const {
1353   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1354   // We might still end up using a GPR but that will be decided based on ABI.
1355   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1356   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1357     return 1;
1358 
1359   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1360 }
1361 
1362 // Changes the condition code and swaps operands if necessary, so the SetCC
1363 // operation matches one of the comparisons supported directly by branches
1364 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1365 // with 1/-1.
1366 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1367                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1368   // Convert X > -1 to X >= 0.
1369   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1370     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1371     CC = ISD::SETGE;
1372     return;
1373   }
1374   // Convert X < 1 to 0 >= X.
1375   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1376     RHS = LHS;
1377     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1378     CC = ISD::SETGE;
1379     return;
1380   }
1381 
1382   switch (CC) {
1383   default:
1384     break;
1385   case ISD::SETGT:
1386   case ISD::SETLE:
1387   case ISD::SETUGT:
1388   case ISD::SETULE:
1389     CC = ISD::getSetCCSwappedOperands(CC);
1390     std::swap(LHS, RHS);
1391     break;
1392   }
1393 }
1394 
1395 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1396   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1397   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1398   if (VT.getVectorElementType() == MVT::i1)
1399     KnownSize *= 8;
1400 
1401   switch (KnownSize) {
1402   default:
1403     llvm_unreachable("Invalid LMUL.");
1404   case 8:
1405     return RISCVII::VLMUL::LMUL_F8;
1406   case 16:
1407     return RISCVII::VLMUL::LMUL_F4;
1408   case 32:
1409     return RISCVII::VLMUL::LMUL_F2;
1410   case 64:
1411     return RISCVII::VLMUL::LMUL_1;
1412   case 128:
1413     return RISCVII::VLMUL::LMUL_2;
1414   case 256:
1415     return RISCVII::VLMUL::LMUL_4;
1416   case 512:
1417     return RISCVII::VLMUL::LMUL_8;
1418   }
1419 }
1420 
1421 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1422   switch (LMul) {
1423   default:
1424     llvm_unreachable("Invalid LMUL.");
1425   case RISCVII::VLMUL::LMUL_F8:
1426   case RISCVII::VLMUL::LMUL_F4:
1427   case RISCVII::VLMUL::LMUL_F2:
1428   case RISCVII::VLMUL::LMUL_1:
1429     return RISCV::VRRegClassID;
1430   case RISCVII::VLMUL::LMUL_2:
1431     return RISCV::VRM2RegClassID;
1432   case RISCVII::VLMUL::LMUL_4:
1433     return RISCV::VRM4RegClassID;
1434   case RISCVII::VLMUL::LMUL_8:
1435     return RISCV::VRM8RegClassID;
1436   }
1437 }
1438 
1439 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1440   RISCVII::VLMUL LMUL = getLMUL(VT);
1441   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1442       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1443       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1444       LMUL == RISCVII::VLMUL::LMUL_1) {
1445     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1446                   "Unexpected subreg numbering");
1447     return RISCV::sub_vrm1_0 + Index;
1448   }
1449   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1450     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1451                   "Unexpected subreg numbering");
1452     return RISCV::sub_vrm2_0 + Index;
1453   }
1454   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1455     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1456                   "Unexpected subreg numbering");
1457     return RISCV::sub_vrm4_0 + Index;
1458   }
1459   llvm_unreachable("Invalid vector type.");
1460 }
1461 
1462 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1463   if (VT.getVectorElementType() == MVT::i1)
1464     return RISCV::VRRegClassID;
1465   return getRegClassIDForLMUL(getLMUL(VT));
1466 }
1467 
1468 // Attempt to decompose a subvector insert/extract between VecVT and
1469 // SubVecVT via subregister indices. Returns the subregister index that
1470 // can perform the subvector insert/extract with the given element index, as
1471 // well as the index corresponding to any leftover subvectors that must be
1472 // further inserted/extracted within the register class for SubVecVT.
1473 std::pair<unsigned, unsigned>
1474 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1475     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1476     const RISCVRegisterInfo *TRI) {
1477   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1478                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1479                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1480                 "Register classes not ordered");
1481   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1482   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1483   // Try to compose a subregister index that takes us from the incoming
1484   // LMUL>1 register class down to the outgoing one. At each step we half
1485   // the LMUL:
1486   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1487   // Note that this is not guaranteed to find a subregister index, such as
1488   // when we are extracting from one VR type to another.
1489   unsigned SubRegIdx = RISCV::NoSubRegister;
1490   for (const unsigned RCID :
1491        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1492     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1493       VecVT = VecVT.getHalfNumVectorElementsVT();
1494       bool IsHi =
1495           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1496       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1497                                             getSubregIndexByMVT(VecVT, IsHi));
1498       if (IsHi)
1499         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1500     }
1501   return {SubRegIdx, InsertExtractIdx};
1502 }
1503 
1504 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1505 // stores for those types.
1506 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1507   return !Subtarget.useRVVForFixedLengthVectors() ||
1508          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1509 }
1510 
1511 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1512   if (ScalarTy->isPointerTy())
1513     return true;
1514 
1515   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1516       ScalarTy->isIntegerTy(32))
1517     return true;
1518 
1519   if (ScalarTy->isIntegerTy(64))
1520     return Subtarget.hasVInstructionsI64();
1521 
1522   if (ScalarTy->isHalfTy())
1523     return Subtarget.hasVInstructionsF16();
1524   if (ScalarTy->isFloatTy())
1525     return Subtarget.hasVInstructionsF32();
1526   if (ScalarTy->isDoubleTy())
1527     return Subtarget.hasVInstructionsF64();
1528 
1529   return false;
1530 }
1531 
1532 static SDValue getVLOperand(SDValue Op) {
1533   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1534           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1535          "Unexpected opcode");
1536   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1537   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1538   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1539       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1540   if (!II)
1541     return SDValue();
1542   return Op.getOperand(II->VLOperand + 1 + HasChain);
1543 }
1544 
1545 static bool useRVVForFixedLengthVectorVT(MVT VT,
1546                                          const RISCVSubtarget &Subtarget) {
1547   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1548   if (!Subtarget.useRVVForFixedLengthVectors())
1549     return false;
1550 
1551   // We only support a set of vector types with a consistent maximum fixed size
1552   // across all supported vector element types to avoid legalization issues.
1553   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1554   // fixed-length vector type we support is 1024 bytes.
1555   if (VT.getFixedSizeInBits() > 1024 * 8)
1556     return false;
1557 
1558   unsigned MinVLen = Subtarget.getRealMinVLen();
1559 
1560   MVT EltVT = VT.getVectorElementType();
1561 
1562   // Don't use RVV for vectors we cannot scalarize if required.
1563   switch (EltVT.SimpleTy) {
1564   // i1 is supported but has different rules.
1565   default:
1566     return false;
1567   case MVT::i1:
1568     // Masks can only use a single register.
1569     if (VT.getVectorNumElements() > MinVLen)
1570       return false;
1571     MinVLen /= 8;
1572     break;
1573   case MVT::i8:
1574   case MVT::i16:
1575   case MVT::i32:
1576     break;
1577   case MVT::i64:
1578     if (!Subtarget.hasVInstructionsI64())
1579       return false;
1580     break;
1581   case MVT::f16:
1582     if (!Subtarget.hasVInstructionsF16())
1583       return false;
1584     break;
1585   case MVT::f32:
1586     if (!Subtarget.hasVInstructionsF32())
1587       return false;
1588     break;
1589   case MVT::f64:
1590     if (!Subtarget.hasVInstructionsF64())
1591       return false;
1592     break;
1593   }
1594 
1595   // Reject elements larger than ELEN.
1596   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1597     return false;
1598 
1599   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1600   // Don't use RVV for types that don't fit.
1601   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1602     return false;
1603 
1604   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1605   // the base fixed length RVV support in place.
1606   if (!VT.isPow2VectorType())
1607     return false;
1608 
1609   return true;
1610 }
1611 
1612 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1613   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1614 }
1615 
1616 // Return the largest legal scalable vector type that matches VT's element type.
1617 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1618                                             const RISCVSubtarget &Subtarget) {
1619   // This may be called before legal types are setup.
1620   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1621           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1622          "Expected legal fixed length vector!");
1623 
1624   unsigned MinVLen = Subtarget.getRealMinVLen();
1625   unsigned MaxELen = Subtarget.getELEN();
1626 
1627   MVT EltVT = VT.getVectorElementType();
1628   switch (EltVT.SimpleTy) {
1629   default:
1630     llvm_unreachable("unexpected element type for RVV container");
1631   case MVT::i1:
1632   case MVT::i8:
1633   case MVT::i16:
1634   case MVT::i32:
1635   case MVT::i64:
1636   case MVT::f16:
1637   case MVT::f32:
1638   case MVT::f64: {
1639     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1640     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1641     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1642     unsigned NumElts =
1643         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1644     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1645     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1646     return MVT::getScalableVectorVT(EltVT, NumElts);
1647   }
1648   }
1649 }
1650 
1651 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1652                                             const RISCVSubtarget &Subtarget) {
1653   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1654                                           Subtarget);
1655 }
1656 
1657 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1658   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1659 }
1660 
1661 // Grow V to consume an entire RVV register.
1662 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1663                                        const RISCVSubtarget &Subtarget) {
1664   assert(VT.isScalableVector() &&
1665          "Expected to convert into a scalable vector!");
1666   assert(V.getValueType().isFixedLengthVector() &&
1667          "Expected a fixed length vector operand!");
1668   SDLoc DL(V);
1669   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1670   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1671 }
1672 
1673 // Shrink V so it's just big enough to maintain a VT's worth of data.
1674 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1675                                          const RISCVSubtarget &Subtarget) {
1676   assert(VT.isFixedLengthVector() &&
1677          "Expected to convert into a fixed length vector!");
1678   assert(V.getValueType().isScalableVector() &&
1679          "Expected a scalable vector operand!");
1680   SDLoc DL(V);
1681   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1682   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1683 }
1684 
1685 /// Return the type of the mask type suitable for masking the provided
1686 /// vector type.  This is simply an i1 element type vector of the same
1687 /// (possibly scalable) length.
1688 static MVT getMaskTypeFor(MVT VecVT) {
1689   assert(VecVT.isVector());
1690   ElementCount EC = VecVT.getVectorElementCount();
1691   return MVT::getVectorVT(MVT::i1, EC);
1692 }
1693 
1694 /// Creates an all ones mask suitable for masking a vector of type VecTy with
1695 /// vector length VL.  .
1696 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
1697                               SelectionDAG &DAG) {
1698   MVT MaskVT = getMaskTypeFor(VecVT);
1699   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1700 }
1701 
1702 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1703 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1704 // the vector type that it is contained in.
1705 static std::pair<SDValue, SDValue>
1706 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1707                 const RISCVSubtarget &Subtarget) {
1708   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1709   MVT XLenVT = Subtarget.getXLenVT();
1710   SDValue VL = VecVT.isFixedLengthVector()
1711                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1712                    : DAG.getRegister(RISCV::X0, XLenVT);
1713   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
1714   return {Mask, VL};
1715 }
1716 
1717 // As above but assuming the given type is a scalable vector type.
1718 static std::pair<SDValue, SDValue>
1719 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1720                         const RISCVSubtarget &Subtarget) {
1721   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1722   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1723 }
1724 
1725 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1726 // of either is (currently) supported. This can get us into an infinite loop
1727 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1728 // as a ..., etc.
1729 // Until either (or both) of these can reliably lower any node, reporting that
1730 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1731 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1732 // which is not desirable.
1733 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1734     EVT VT, unsigned DefinedValues) const {
1735   return false;
1736 }
1737 
1738 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1739                                   const RISCVSubtarget &Subtarget) {
1740   // RISCV FP-to-int conversions saturate to the destination register size, but
1741   // don't produce 0 for nan. We can use a conversion instruction and fix the
1742   // nan case with a compare and a select.
1743   SDValue Src = Op.getOperand(0);
1744 
1745   EVT DstVT = Op.getValueType();
1746   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1747 
1748   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1749   unsigned Opc;
1750   if (SatVT == DstVT)
1751     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1752   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1753     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1754   else
1755     return SDValue();
1756   // FIXME: Support other SatVTs by clamping before or after the conversion.
1757 
1758   SDLoc DL(Op);
1759   SDValue FpToInt = DAG.getNode(
1760       Opc, DL, DstVT, Src,
1761       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1762 
1763   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1764   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1765 }
1766 
1767 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1768 // and back. Taking care to avoid converting values that are nan or already
1769 // correct.
1770 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1771 // have FRM dependencies modeled yet.
1772 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1773   MVT VT = Op.getSimpleValueType();
1774   assert(VT.isVector() && "Unexpected type");
1775 
1776   SDLoc DL(Op);
1777 
1778   // Freeze the source since we are increasing the number of uses.
1779   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1780 
1781   // Truncate to integer and convert back to FP.
1782   MVT IntVT = VT.changeVectorElementTypeToInteger();
1783   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1784   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1785 
1786   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1787 
1788   if (Op.getOpcode() == ISD::FCEIL) {
1789     // If the truncated value is the greater than or equal to the original
1790     // value, we've computed the ceil. Otherwise, we went the wrong way and
1791     // need to increase by 1.
1792     // FIXME: This should use a masked operation. Handle here or in isel?
1793     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1794                                  DAG.getConstantFP(1.0, DL, VT));
1795     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1796     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1797   } else if (Op.getOpcode() == ISD::FFLOOR) {
1798     // If the truncated value is the less than or equal to the original value,
1799     // we've computed the floor. Otherwise, we went the wrong way and need to
1800     // decrease by 1.
1801     // FIXME: This should use a masked operation. Handle here or in isel?
1802     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1803                                  DAG.getConstantFP(1.0, DL, VT));
1804     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1805     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1806   }
1807 
1808   // Restore the original sign so that -0.0 is preserved.
1809   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1810 
1811   // Determine the largest integer that can be represented exactly. This and
1812   // values larger than it don't have any fractional bits so don't need to
1813   // be converted.
1814   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1815   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1816   APFloat MaxVal = APFloat(FltSem);
1817   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1818                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1819   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1820 
1821   // If abs(Src) was larger than MaxVal or nan, keep it.
1822   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1823   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1824   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1825 }
1826 
1827 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1828 // This mode isn't supported in vector hardware on RISCV. But as long as we
1829 // aren't compiling with trapping math, we can emulate this with
1830 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1831 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1832 // dependencies modeled yet.
1833 // FIXME: Use masked operations to avoid final merge.
1834 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1835   MVT VT = Op.getSimpleValueType();
1836   assert(VT.isVector() && "Unexpected type");
1837 
1838   SDLoc DL(Op);
1839 
1840   // Freeze the source since we are increasing the number of uses.
1841   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1842 
1843   // We do the conversion on the absolute value and fix the sign at the end.
1844   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1845 
1846   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1847   bool Ignored;
1848   APFloat Point5Pred = APFloat(0.5f);
1849   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1850   Point5Pred.next(/*nextDown*/ true);
1851 
1852   // Add the adjustment.
1853   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1854                                DAG.getConstantFP(Point5Pred, DL, VT));
1855 
1856   // Truncate to integer and convert back to fp.
1857   MVT IntVT = VT.changeVectorElementTypeToInteger();
1858   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1859   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1860 
1861   // Restore the original sign.
1862   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1863 
1864   // Determine the largest integer that can be represented exactly. This and
1865   // values larger than it don't have any fractional bits so don't need to
1866   // be converted.
1867   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1868   APFloat MaxVal = APFloat(FltSem);
1869   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1870                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1871   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1872 
1873   // If abs(Src) was larger than MaxVal or nan, keep it.
1874   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1875   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1876   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1877 }
1878 
1879 struct VIDSequence {
1880   int64_t StepNumerator;
1881   unsigned StepDenominator;
1882   int64_t Addend;
1883 };
1884 
1885 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1886 // to the (non-zero) step S and start value X. This can be then lowered as the
1887 // RVV sequence (VID * S) + X, for example.
1888 // The step S is represented as an integer numerator divided by a positive
1889 // denominator. Note that the implementation currently only identifies
1890 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1891 // cannot detect 2/3, for example.
1892 // Note that this method will also match potentially unappealing index
1893 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1894 // determine whether this is worth generating code for.
1895 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1896   unsigned NumElts = Op.getNumOperands();
1897   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1898   if (!Op.getValueType().isInteger())
1899     return None;
1900 
1901   Optional<unsigned> SeqStepDenom;
1902   Optional<int64_t> SeqStepNum, SeqAddend;
1903   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1904   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1905   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1906     // Assume undef elements match the sequence; we just have to be careful
1907     // when interpolating across them.
1908     if (Op.getOperand(Idx).isUndef())
1909       continue;
1910     // The BUILD_VECTOR must be all constants.
1911     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1912       return None;
1913 
1914     uint64_t Val = Op.getConstantOperandVal(Idx) &
1915                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1916 
1917     if (PrevElt) {
1918       // Calculate the step since the last non-undef element, and ensure
1919       // it's consistent across the entire sequence.
1920       unsigned IdxDiff = Idx - PrevElt->second;
1921       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1922 
1923       // A zero-value value difference means that we're somewhere in the middle
1924       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1925       // step change before evaluating the sequence.
1926       if (ValDiff == 0)
1927         continue;
1928 
1929       int64_t Remainder = ValDiff % IdxDiff;
1930       // Normalize the step if it's greater than 1.
1931       if (Remainder != ValDiff) {
1932         // The difference must cleanly divide the element span.
1933         if (Remainder != 0)
1934           return None;
1935         ValDiff /= IdxDiff;
1936         IdxDiff = 1;
1937       }
1938 
1939       if (!SeqStepNum)
1940         SeqStepNum = ValDiff;
1941       else if (ValDiff != SeqStepNum)
1942         return None;
1943 
1944       if (!SeqStepDenom)
1945         SeqStepDenom = IdxDiff;
1946       else if (IdxDiff != *SeqStepDenom)
1947         return None;
1948     }
1949 
1950     // Record this non-undef element for later.
1951     if (!PrevElt || PrevElt->first != Val)
1952       PrevElt = std::make_pair(Val, Idx);
1953   }
1954 
1955   // We need to have logged a step for this to count as a legal index sequence.
1956   if (!SeqStepNum || !SeqStepDenom)
1957     return None;
1958 
1959   // Loop back through the sequence and validate elements we might have skipped
1960   // while waiting for a valid step. While doing this, log any sequence addend.
1961   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1962     if (Op.getOperand(Idx).isUndef())
1963       continue;
1964     uint64_t Val = Op.getConstantOperandVal(Idx) &
1965                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1966     uint64_t ExpectedVal =
1967         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1968     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1969     if (!SeqAddend)
1970       SeqAddend = Addend;
1971     else if (Addend != SeqAddend)
1972       return None;
1973   }
1974 
1975   assert(SeqAddend && "Must have an addend if we have a step");
1976 
1977   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1978 }
1979 
1980 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1981 // and lower it as a VRGATHER_VX_VL from the source vector.
1982 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1983                                   SelectionDAG &DAG,
1984                                   const RISCVSubtarget &Subtarget) {
1985   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1986     return SDValue();
1987   SDValue Vec = SplatVal.getOperand(0);
1988   // Only perform this optimization on vectors of the same size for simplicity.
1989   // Don't perform this optimization for i1 vectors.
1990   // FIXME: Support i1 vectors, maybe by promoting to i8?
1991   if (Vec.getValueType() != VT || VT.getVectorElementType() == MVT::i1)
1992     return SDValue();
1993   SDValue Idx = SplatVal.getOperand(1);
1994   // The index must be a legal type.
1995   if (Idx.getValueType() != Subtarget.getXLenVT())
1996     return SDValue();
1997 
1998   MVT ContainerVT = VT;
1999   if (VT.isFixedLengthVector()) {
2000     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2001     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2002   }
2003 
2004   SDValue Mask, VL;
2005   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2006 
2007   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2008                                Idx, Mask, DAG.getUNDEF(ContainerVT), VL);
2009 
2010   if (!VT.isFixedLengthVector())
2011     return Gather;
2012 
2013   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2014 }
2015 
2016 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2017                                  const RISCVSubtarget &Subtarget) {
2018   MVT VT = Op.getSimpleValueType();
2019   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2020 
2021   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2022 
2023   SDLoc DL(Op);
2024   SDValue Mask, VL;
2025   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2026 
2027   MVT XLenVT = Subtarget.getXLenVT();
2028   unsigned NumElts = Op.getNumOperands();
2029 
2030   if (VT.getVectorElementType() == MVT::i1) {
2031     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2032       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2033       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2034     }
2035 
2036     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2037       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2038       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2039     }
2040 
2041     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2042     // scalar integer chunks whose bit-width depends on the number of mask
2043     // bits and XLEN.
2044     // First, determine the most appropriate scalar integer type to use. This
2045     // is at most XLenVT, but may be shrunk to a smaller vector element type
2046     // according to the size of the final vector - use i8 chunks rather than
2047     // XLenVT if we're producing a v8i1. This results in more consistent
2048     // codegen across RV32 and RV64.
2049     unsigned NumViaIntegerBits =
2050         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2051     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
2052     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2053       // If we have to use more than one INSERT_VECTOR_ELT then this
2054       // optimization is likely to increase code size; avoid peforming it in
2055       // such a case. We can use a load from a constant pool in this case.
2056       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2057         return SDValue();
2058       // Now we can create our integer vector type. Note that it may be larger
2059       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2060       MVT IntegerViaVecVT =
2061           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2062                            divideCeil(NumElts, NumViaIntegerBits));
2063 
2064       uint64_t Bits = 0;
2065       unsigned BitPos = 0, IntegerEltIdx = 0;
2066       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2067 
2068       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2069         // Once we accumulate enough bits to fill our scalar type, insert into
2070         // our vector and clear our accumulated data.
2071         if (I != 0 && I % NumViaIntegerBits == 0) {
2072           if (NumViaIntegerBits <= 32)
2073             Bits = SignExtend64<32>(Bits);
2074           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2075           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2076                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2077           Bits = 0;
2078           BitPos = 0;
2079           IntegerEltIdx++;
2080         }
2081         SDValue V = Op.getOperand(I);
2082         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2083         Bits |= ((uint64_t)BitValue << BitPos);
2084       }
2085 
2086       // Insert the (remaining) scalar value into position in our integer
2087       // vector type.
2088       if (NumViaIntegerBits <= 32)
2089         Bits = SignExtend64<32>(Bits);
2090       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2091       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2092                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2093 
2094       if (NumElts < NumViaIntegerBits) {
2095         // If we're producing a smaller vector than our minimum legal integer
2096         // type, bitcast to the equivalent (known-legal) mask type, and extract
2097         // our final mask.
2098         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2099         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2100         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2101                           DAG.getConstant(0, DL, XLenVT));
2102       } else {
2103         // Else we must have produced an integer type with the same size as the
2104         // mask type; bitcast for the final result.
2105         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2106         Vec = DAG.getBitcast(VT, Vec);
2107       }
2108 
2109       return Vec;
2110     }
2111 
2112     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2113     // vector type, we have a legal equivalently-sized i8 type, so we can use
2114     // that.
2115     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2116     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2117 
2118     SDValue WideVec;
2119     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2120       // For a splat, perform a scalar truncate before creating the wider
2121       // vector.
2122       assert(Splat.getValueType() == XLenVT &&
2123              "Unexpected type for i1 splat value");
2124       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2125                           DAG.getConstant(1, DL, XLenVT));
2126       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2127     } else {
2128       SmallVector<SDValue, 8> Ops(Op->op_values());
2129       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2130       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2131       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2132     }
2133 
2134     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2135   }
2136 
2137   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2138     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2139       return Gather;
2140     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2141                                         : RISCVISD::VMV_V_X_VL;
2142     Splat =
2143         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2144     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2145   }
2146 
2147   // Try and match index sequences, which we can lower to the vid instruction
2148   // with optional modifications. An all-undef vector is matched by
2149   // getSplatValue, above.
2150   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2151     int64_t StepNumerator = SimpleVID->StepNumerator;
2152     unsigned StepDenominator = SimpleVID->StepDenominator;
2153     int64_t Addend = SimpleVID->Addend;
2154 
2155     assert(StepNumerator != 0 && "Invalid step");
2156     bool Negate = false;
2157     int64_t SplatStepVal = StepNumerator;
2158     unsigned StepOpcode = ISD::MUL;
2159     if (StepNumerator != 1) {
2160       if (isPowerOf2_64(std::abs(StepNumerator))) {
2161         Negate = StepNumerator < 0;
2162         StepOpcode = ISD::SHL;
2163         SplatStepVal = Log2_64(std::abs(StepNumerator));
2164       }
2165     }
2166 
2167     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2168     // threshold since it's the immediate value many RVV instructions accept.
2169     // There is no vmul.vi instruction so ensure multiply constant can fit in
2170     // a single addi instruction.
2171     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2172          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2173         isPowerOf2_32(StepDenominator) &&
2174         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
2175       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2176       // Convert right out of the scalable type so we can use standard ISD
2177       // nodes for the rest of the computation. If we used scalable types with
2178       // these, we'd lose the fixed-length vector info and generate worse
2179       // vsetvli code.
2180       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2181       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2182           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2183         SDValue SplatStep = DAG.getSplatBuildVector(
2184             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2185         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2186       }
2187       if (StepDenominator != 1) {
2188         SDValue SplatStep = DAG.getSplatBuildVector(
2189             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2190         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2191       }
2192       if (Addend != 0 || Negate) {
2193         SDValue SplatAddend = DAG.getSplatBuildVector(
2194             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2195         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2196       }
2197       return VID;
2198     }
2199   }
2200 
2201   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2202   // when re-interpreted as a vector with a larger element type. For example,
2203   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2204   // could be instead splat as
2205   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2206   // TODO: This optimization could also work on non-constant splats, but it
2207   // would require bit-manipulation instructions to construct the splat value.
2208   SmallVector<SDValue> Sequence;
2209   unsigned EltBitSize = VT.getScalarSizeInBits();
2210   const auto *BV = cast<BuildVectorSDNode>(Op);
2211   if (VT.isInteger() && EltBitSize < 64 &&
2212       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2213       BV->getRepeatedSequence(Sequence) &&
2214       (Sequence.size() * EltBitSize) <= 64) {
2215     unsigned SeqLen = Sequence.size();
2216     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2217     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2218     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2219             ViaIntVT == MVT::i64) &&
2220            "Unexpected sequence type");
2221 
2222     unsigned EltIdx = 0;
2223     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2224     uint64_t SplatValue = 0;
2225     // Construct the amalgamated value which can be splatted as this larger
2226     // vector type.
2227     for (const auto &SeqV : Sequence) {
2228       if (!SeqV.isUndef())
2229         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2230                        << (EltIdx * EltBitSize));
2231       EltIdx++;
2232     }
2233 
2234     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2235     // achieve better constant materializion.
2236     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2237       SplatValue = SignExtend64<32>(SplatValue);
2238 
2239     // Since we can't introduce illegal i64 types at this stage, we can only
2240     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2241     // way we can use RVV instructions to splat.
2242     assert((ViaIntVT.bitsLE(XLenVT) ||
2243             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2244            "Unexpected bitcast sequence");
2245     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2246       SDValue ViaVL =
2247           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2248       MVT ViaContainerVT =
2249           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2250       SDValue Splat =
2251           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2252                       DAG.getUNDEF(ViaContainerVT),
2253                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2254       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2255       return DAG.getBitcast(VT, Splat);
2256     }
2257   }
2258 
2259   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2260   // which constitute a large proportion of the elements. In such cases we can
2261   // splat a vector with the dominant element and make up the shortfall with
2262   // INSERT_VECTOR_ELTs.
2263   // Note that this includes vectors of 2 elements by association. The
2264   // upper-most element is the "dominant" one, allowing us to use a splat to
2265   // "insert" the upper element, and an insert of the lower element at position
2266   // 0, which improves codegen.
2267   SDValue DominantValue;
2268   unsigned MostCommonCount = 0;
2269   DenseMap<SDValue, unsigned> ValueCounts;
2270   unsigned NumUndefElts =
2271       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2272 
2273   // Track the number of scalar loads we know we'd be inserting, estimated as
2274   // any non-zero floating-point constant. Other kinds of element are either
2275   // already in registers or are materialized on demand. The threshold at which
2276   // a vector load is more desirable than several scalar materializion and
2277   // vector-insertion instructions is not known.
2278   unsigned NumScalarLoads = 0;
2279 
2280   for (SDValue V : Op->op_values()) {
2281     if (V.isUndef())
2282       continue;
2283 
2284     ValueCounts.insert(std::make_pair(V, 0));
2285     unsigned &Count = ValueCounts[V];
2286 
2287     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2288       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2289 
2290     // Is this value dominant? In case of a tie, prefer the highest element as
2291     // it's cheaper to insert near the beginning of a vector than it is at the
2292     // end.
2293     if (++Count >= MostCommonCount) {
2294       DominantValue = V;
2295       MostCommonCount = Count;
2296     }
2297   }
2298 
2299   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2300   unsigned NumDefElts = NumElts - NumUndefElts;
2301   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2302 
2303   // Don't perform this optimization when optimizing for size, since
2304   // materializing elements and inserting them tends to cause code bloat.
2305   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2306       ((MostCommonCount > DominantValueCountThreshold) ||
2307        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2308     // Start by splatting the most common element.
2309     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2310 
2311     DenseSet<SDValue> Processed{DominantValue};
2312     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2313     for (const auto &OpIdx : enumerate(Op->ops())) {
2314       const SDValue &V = OpIdx.value();
2315       if (V.isUndef() || !Processed.insert(V).second)
2316         continue;
2317       if (ValueCounts[V] == 1) {
2318         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2319                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2320       } else {
2321         // Blend in all instances of this value using a VSELECT, using a
2322         // mask where each bit signals whether that element is the one
2323         // we're after.
2324         SmallVector<SDValue> Ops;
2325         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2326           return DAG.getConstant(V == V1, DL, XLenVT);
2327         });
2328         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2329                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2330                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2331       }
2332     }
2333 
2334     return Vec;
2335   }
2336 
2337   return SDValue();
2338 }
2339 
2340 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2341                                    SDValue Lo, SDValue Hi, SDValue VL,
2342                                    SelectionDAG &DAG) {
2343   if (!Passthru)
2344     Passthru = DAG.getUNDEF(VT);
2345   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2346     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2347     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2348     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2349     // node in order to try and match RVV vector/scalar instructions.
2350     if ((LoC >> 31) == HiC)
2351       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2352 
2353     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2354     // vmv.v.x whose EEW = 32 to lower it.
2355     auto *Const = dyn_cast<ConstantSDNode>(VL);
2356     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2357       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2358       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2359       // access the subtarget here now.
2360       auto InterVec = DAG.getNode(
2361           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2362                                   DAG.getRegister(RISCV::X0, MVT::i32));
2363       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2364     }
2365   }
2366 
2367   // Fall back to a stack store and stride x0 vector load.
2368   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2369                      Hi, VL);
2370 }
2371 
2372 // Called by type legalization to handle splat of i64 on RV32.
2373 // FIXME: We can optimize this when the type has sign or zero bits in one
2374 // of the halves.
2375 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2376                                    SDValue Scalar, SDValue VL,
2377                                    SelectionDAG &DAG) {
2378   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2379   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2380                            DAG.getConstant(0, DL, MVT::i32));
2381   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2382                            DAG.getConstant(1, DL, MVT::i32));
2383   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2384 }
2385 
2386 // This function lowers a splat of a scalar operand Splat with the vector
2387 // length VL. It ensures the final sequence is type legal, which is useful when
2388 // lowering a splat after type legalization.
2389 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2390                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2391                                 const RISCVSubtarget &Subtarget) {
2392   bool HasPassthru = Passthru && !Passthru.isUndef();
2393   if (!HasPassthru && !Passthru)
2394     Passthru = DAG.getUNDEF(VT);
2395   if (VT.isFloatingPoint()) {
2396     // If VL is 1, we could use vfmv.s.f.
2397     if (isOneConstant(VL))
2398       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2399     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2400   }
2401 
2402   MVT XLenVT = Subtarget.getXLenVT();
2403 
2404   // Simplest case is that the operand needs to be promoted to XLenVT.
2405   if (Scalar.getValueType().bitsLE(XLenVT)) {
2406     // If the operand is a constant, sign extend to increase our chances
2407     // of being able to use a .vi instruction. ANY_EXTEND would become a
2408     // a zero extend and the simm5 check in isel would fail.
2409     // FIXME: Should we ignore the upper bits in isel instead?
2410     unsigned ExtOpc =
2411         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2412     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2413     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2414     // If VL is 1 and the scalar value won't benefit from immediate, we could
2415     // use vmv.s.x.
2416     if (isOneConstant(VL) &&
2417         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2418       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2419     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2420   }
2421 
2422   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2423          "Unexpected scalar for splat lowering!");
2424 
2425   if (isOneConstant(VL) && isNullConstant(Scalar))
2426     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2427                        DAG.getConstant(0, DL, XLenVT), VL);
2428 
2429   // Otherwise use the more complicated splatting algorithm.
2430   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2431 }
2432 
2433 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2434                                 const RISCVSubtarget &Subtarget) {
2435   // We need to be able to widen elements to the next larger integer type.
2436   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2437     return false;
2438 
2439   int Size = Mask.size();
2440   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2441 
2442   int Srcs[] = {-1, -1};
2443   for (int i = 0; i != Size; ++i) {
2444     // Ignore undef elements.
2445     if (Mask[i] < 0)
2446       continue;
2447 
2448     // Is this an even or odd element.
2449     int Pol = i % 2;
2450 
2451     // Ensure we consistently use the same source for this element polarity.
2452     int Src = Mask[i] / Size;
2453     if (Srcs[Pol] < 0)
2454       Srcs[Pol] = Src;
2455     if (Srcs[Pol] != Src)
2456       return false;
2457 
2458     // Make sure the element within the source is appropriate for this element
2459     // in the destination.
2460     int Elt = Mask[i] % Size;
2461     if (Elt != i / 2)
2462       return false;
2463   }
2464 
2465   // We need to find a source for each polarity and they can't be the same.
2466   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2467     return false;
2468 
2469   // Swap the sources if the second source was in the even polarity.
2470   SwapSources = Srcs[0] > Srcs[1];
2471 
2472   return true;
2473 }
2474 
2475 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2476 /// and then extract the original number of elements from the rotated result.
2477 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2478 /// returned rotation amount is for a rotate right, where elements move from
2479 /// higher elements to lower elements. \p LoSrc indicates the first source
2480 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2481 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2482 /// 0 or 1 if a rotation is found.
2483 ///
2484 /// NOTE: We talk about rotate to the right which matches how bit shift and
2485 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2486 /// and the table below write vectors with the lowest elements on the left.
2487 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2488   int Size = Mask.size();
2489 
2490   // We need to detect various ways of spelling a rotation:
2491   //   [11, 12, 13, 14, 15,  0,  1,  2]
2492   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2493   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2494   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2495   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2496   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2497   int Rotation = 0;
2498   LoSrc = -1;
2499   HiSrc = -1;
2500   for (int i = 0; i != Size; ++i) {
2501     int M = Mask[i];
2502     if (M < 0)
2503       continue;
2504 
2505     // Determine where a rotate vector would have started.
2506     int StartIdx = i - (M % Size);
2507     // The identity rotation isn't interesting, stop.
2508     if (StartIdx == 0)
2509       return -1;
2510 
2511     // If we found the tail of a vector the rotation must be the missing
2512     // front. If we found the head of a vector, it must be how much of the
2513     // head.
2514     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2515 
2516     if (Rotation == 0)
2517       Rotation = CandidateRotation;
2518     else if (Rotation != CandidateRotation)
2519       // The rotations don't match, so we can't match this mask.
2520       return -1;
2521 
2522     // Compute which value this mask is pointing at.
2523     int MaskSrc = M < Size ? 0 : 1;
2524 
2525     // Compute which of the two target values this index should be assigned to.
2526     // This reflects whether the high elements are remaining or the low elemnts
2527     // are remaining.
2528     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2529 
2530     // Either set up this value if we've not encountered it before, or check
2531     // that it remains consistent.
2532     if (TargetSrc < 0)
2533       TargetSrc = MaskSrc;
2534     else if (TargetSrc != MaskSrc)
2535       // This may be a rotation, but it pulls from the inputs in some
2536       // unsupported interleaving.
2537       return -1;
2538   }
2539 
2540   // Check that we successfully analyzed the mask, and normalize the results.
2541   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2542   assert((LoSrc >= 0 || HiSrc >= 0) &&
2543          "Failed to find a rotated input vector!");
2544 
2545   return Rotation;
2546 }
2547 
2548 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2549                                    const RISCVSubtarget &Subtarget) {
2550   SDValue V1 = Op.getOperand(0);
2551   SDValue V2 = Op.getOperand(1);
2552   SDLoc DL(Op);
2553   MVT XLenVT = Subtarget.getXLenVT();
2554   MVT VT = Op.getSimpleValueType();
2555   unsigned NumElts = VT.getVectorNumElements();
2556   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2557 
2558   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2559 
2560   SDValue TrueMask, VL;
2561   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2562 
2563   if (SVN->isSplat()) {
2564     const int Lane = SVN->getSplatIndex();
2565     if (Lane >= 0) {
2566       MVT SVT = VT.getVectorElementType();
2567 
2568       // Turn splatted vector load into a strided load with an X0 stride.
2569       SDValue V = V1;
2570       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2571       // with undef.
2572       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2573       int Offset = Lane;
2574       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2575         int OpElements =
2576             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2577         V = V.getOperand(Offset / OpElements);
2578         Offset %= OpElements;
2579       }
2580 
2581       // We need to ensure the load isn't atomic or volatile.
2582       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2583         auto *Ld = cast<LoadSDNode>(V);
2584         Offset *= SVT.getStoreSize();
2585         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2586                                                    TypeSize::Fixed(Offset), DL);
2587 
2588         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2589         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2590           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2591           SDValue IntID =
2592               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2593           SDValue Ops[] = {Ld->getChain(),
2594                            IntID,
2595                            DAG.getUNDEF(ContainerVT),
2596                            NewAddr,
2597                            DAG.getRegister(RISCV::X0, XLenVT),
2598                            VL};
2599           SDValue NewLoad = DAG.getMemIntrinsicNode(
2600               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2601               DAG.getMachineFunction().getMachineMemOperand(
2602                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2603           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2604           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2605         }
2606 
2607         // Otherwise use a scalar load and splat. This will give the best
2608         // opportunity to fold a splat into the operation. ISel can turn it into
2609         // the x0 strided load if we aren't able to fold away the select.
2610         if (SVT.isFloatingPoint())
2611           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2612                           Ld->getPointerInfo().getWithOffset(Offset),
2613                           Ld->getOriginalAlign(),
2614                           Ld->getMemOperand()->getFlags());
2615         else
2616           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2617                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2618                              Ld->getOriginalAlign(),
2619                              Ld->getMemOperand()->getFlags());
2620         DAG.makeEquivalentMemoryOrdering(Ld, V);
2621 
2622         unsigned Opc =
2623             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2624         SDValue Splat =
2625             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2626         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2627       }
2628 
2629       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2630       assert(Lane < (int)NumElts && "Unexpected lane!");
2631       SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT,
2632                                    V1, DAG.getConstant(Lane, DL, XLenVT),
2633                                    TrueMask, DAG.getUNDEF(ContainerVT), VL);
2634       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2635     }
2636   }
2637 
2638   ArrayRef<int> Mask = SVN->getMask();
2639 
2640   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2641   // be undef which can be handled with a single SLIDEDOWN/UP.
2642   int LoSrc, HiSrc;
2643   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2644   if (Rotation > 0) {
2645     SDValue LoV, HiV;
2646     if (LoSrc >= 0) {
2647       LoV = LoSrc == 0 ? V1 : V2;
2648       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2649     }
2650     if (HiSrc >= 0) {
2651       HiV = HiSrc == 0 ? V1 : V2;
2652       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2653     }
2654 
2655     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2656     // to slide LoV up by (NumElts - Rotation).
2657     unsigned InvRotate = NumElts - Rotation;
2658 
2659     SDValue Res = DAG.getUNDEF(ContainerVT);
2660     if (HiV) {
2661       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2662       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2663       // causes multiple vsetvlis in some test cases such as lowering
2664       // reduce.mul
2665       SDValue DownVL = VL;
2666       if (LoV)
2667         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2668       Res =
2669           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2670                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2671     }
2672     if (LoV)
2673       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2674                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2675 
2676     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2677   }
2678 
2679   // Detect an interleave shuffle and lower to
2680   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2681   bool SwapSources;
2682   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2683     // Swap sources if needed.
2684     if (SwapSources)
2685       std::swap(V1, V2);
2686 
2687     // Extract the lower half of the vectors.
2688     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2689     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2690                      DAG.getConstant(0, DL, XLenVT));
2691     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2692                      DAG.getConstant(0, DL, XLenVT));
2693 
2694     // Double the element width and halve the number of elements in an int type.
2695     unsigned EltBits = VT.getScalarSizeInBits();
2696     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2697     MVT WideIntVT =
2698         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2699     // Convert this to a scalable vector. We need to base this on the
2700     // destination size to ensure there's always a type with a smaller LMUL.
2701     MVT WideIntContainerVT =
2702         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2703 
2704     // Convert sources to scalable vectors with the same element count as the
2705     // larger type.
2706     MVT HalfContainerVT = MVT::getVectorVT(
2707         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2708     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2709     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2710 
2711     // Cast sources to integer.
2712     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2713     MVT IntHalfVT =
2714         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2715     V1 = DAG.getBitcast(IntHalfVT, V1);
2716     V2 = DAG.getBitcast(IntHalfVT, V2);
2717 
2718     // Freeze V2 since we use it twice and we need to be sure that the add and
2719     // multiply see the same value.
2720     V2 = DAG.getFreeze(V2);
2721 
2722     // Recreate TrueMask using the widened type's element count.
2723     TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
2724 
2725     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2726     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2727                               V2, TrueMask, VL);
2728     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2729     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2730                                      DAG.getUNDEF(IntHalfVT),
2731                                      DAG.getAllOnesConstant(DL, XLenVT));
2732     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2733                                    V2, Multiplier, TrueMask, VL);
2734     // Add the new copies to our previous addition giving us 2^eltbits copies of
2735     // V2. This is equivalent to shifting V2 left by eltbits. This should
2736     // combine with the vwmulu.vv above to form vwmaccu.vv.
2737     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2738                       TrueMask, VL);
2739     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2740     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2741     // vector VT.
2742     ContainerVT =
2743         MVT::getVectorVT(VT.getVectorElementType(),
2744                          WideIntContainerVT.getVectorElementCount() * 2);
2745     Add = DAG.getBitcast(ContainerVT, Add);
2746     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2747   }
2748 
2749   // Detect shuffles which can be re-expressed as vector selects; these are
2750   // shuffles in which each element in the destination is taken from an element
2751   // at the corresponding index in either source vectors.
2752   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2753     int MaskIndex = MaskIdx.value();
2754     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2755   });
2756 
2757   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2758 
2759   SmallVector<SDValue> MaskVals;
2760   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2761   // merged with a second vrgather.
2762   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2763 
2764   // By default we preserve the original operand order, and use a mask to
2765   // select LHS as true and RHS as false. However, since RVV vector selects may
2766   // feature splats but only on the LHS, we may choose to invert our mask and
2767   // instead select between RHS and LHS.
2768   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2769   bool InvertMask = IsSelect == SwapOps;
2770 
2771   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2772   // half.
2773   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2774 
2775   // Now construct the mask that will be used by the vselect or blended
2776   // vrgather operation. For vrgathers, construct the appropriate indices into
2777   // each vector.
2778   for (int MaskIndex : Mask) {
2779     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2780     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2781     if (!IsSelect) {
2782       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2783       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2784                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2785                                      : DAG.getUNDEF(XLenVT));
2786       GatherIndicesRHS.push_back(
2787           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2788                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2789       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2790         ++LHSIndexCounts[MaskIndex];
2791       if (!IsLHSOrUndefIndex)
2792         ++RHSIndexCounts[MaskIndex - NumElts];
2793     }
2794   }
2795 
2796   if (SwapOps) {
2797     std::swap(V1, V2);
2798     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2799   }
2800 
2801   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2802   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2803   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2804 
2805   if (IsSelect)
2806     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2807 
2808   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2809     // On such a large vector we're unable to use i8 as the index type.
2810     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2811     // may involve vector splitting if we're already at LMUL=8, or our
2812     // user-supplied maximum fixed-length LMUL.
2813     return SDValue();
2814   }
2815 
2816   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2817   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2818   MVT IndexVT = VT.changeTypeToInteger();
2819   // Since we can't introduce illegal index types at this stage, use i16 and
2820   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2821   // than XLenVT.
2822   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2823     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2824     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2825   }
2826 
2827   MVT IndexContainerVT =
2828       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2829 
2830   SDValue Gather;
2831   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2832   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2833   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2834     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2835                               Subtarget);
2836   } else {
2837     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2838     // If only one index is used, we can use a "splat" vrgather.
2839     // TODO: We can splat the most-common index and fix-up any stragglers, if
2840     // that's beneficial.
2841     if (LHSIndexCounts.size() == 1) {
2842       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2843       Gather = DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2844                            DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask,
2845                            DAG.getUNDEF(ContainerVT), VL);
2846     } else {
2847       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2848       LHSIndices =
2849           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2850 
2851       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2852                            TrueMask, DAG.getUNDEF(ContainerVT), VL);
2853     }
2854   }
2855 
2856   // If a second vector operand is used by this shuffle, blend it in with an
2857   // additional vrgather.
2858   if (!V2.isUndef()) {
2859     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2860 
2861     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2862     SelectMask =
2863         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2864 
2865     // If only one index is used, we can use a "splat" vrgather.
2866     // TODO: We can splat the most-common index and fix-up any stragglers, if
2867     // that's beneficial.
2868     if (RHSIndexCounts.size() == 1) {
2869       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2870       Gather = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2871                            DAG.getConstant(SplatIndex, DL, XLenVT), SelectMask,
2872                            Gather, VL);
2873     } else {
2874       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2875       RHSIndices =
2876           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2877       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices,
2878                            SelectMask, Gather, VL);
2879     }
2880   }
2881 
2882   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2883 }
2884 
2885 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2886   // Support splats for any type. These should type legalize well.
2887   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2888     return true;
2889 
2890   // Only support legal VTs for other shuffles for now.
2891   if (!isTypeLegal(VT))
2892     return false;
2893 
2894   MVT SVT = VT.getSimpleVT();
2895 
2896   bool SwapSources;
2897   int LoSrc, HiSrc;
2898   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2899          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2900 }
2901 
2902 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2903 // the exponent.
2904 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2905   MVT VT = Op.getSimpleValueType();
2906   unsigned EltSize = VT.getScalarSizeInBits();
2907   SDValue Src = Op.getOperand(0);
2908   SDLoc DL(Op);
2909 
2910   // We need a FP type that can represent the value.
2911   // TODO: Use f16 for i8 when possible?
2912   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2913   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2914 
2915   // Legal types should have been checked in the RISCVTargetLowering
2916   // constructor.
2917   // TODO: Splitting may make sense in some cases.
2918   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2919          "Expected legal float type!");
2920 
2921   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2922   // The trailing zero count is equal to log2 of this single bit value.
2923   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2924     SDValue Neg =
2925         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2926     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2927   }
2928 
2929   // We have a legal FP type, convert to it.
2930   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2931   // Bitcast to integer and shift the exponent to the LSB.
2932   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2933   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2934   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2935   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2936                               DAG.getConstant(ShiftAmt, DL, IntVT));
2937   // Truncate back to original type to allow vnsrl.
2938   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2939   // The exponent contains log2 of the value in biased form.
2940   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2941 
2942   // For trailing zeros, we just need to subtract the bias.
2943   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2944     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2945                        DAG.getConstant(ExponentBias, DL, VT));
2946 
2947   // For leading zeros, we need to remove the bias and convert from log2 to
2948   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2949   unsigned Adjust = ExponentBias + (EltSize - 1);
2950   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2951 }
2952 
2953 // While RVV has alignment restrictions, we should always be able to load as a
2954 // legal equivalently-sized byte-typed vector instead. This method is
2955 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2956 // the load is already correctly-aligned, it returns SDValue().
2957 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2958                                                     SelectionDAG &DAG) const {
2959   auto *Load = cast<LoadSDNode>(Op);
2960   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2961 
2962   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2963                                      Load->getMemoryVT(),
2964                                      *Load->getMemOperand()))
2965     return SDValue();
2966 
2967   SDLoc DL(Op);
2968   MVT VT = Op.getSimpleValueType();
2969   unsigned EltSizeBits = VT.getScalarSizeInBits();
2970   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2971          "Unexpected unaligned RVV load type");
2972   MVT NewVT =
2973       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2974   assert(NewVT.isValid() &&
2975          "Expecting equally-sized RVV vector types to be legal");
2976   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2977                           Load->getPointerInfo(), Load->getOriginalAlign(),
2978                           Load->getMemOperand()->getFlags());
2979   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2980 }
2981 
2982 // While RVV has alignment restrictions, we should always be able to store as a
2983 // legal equivalently-sized byte-typed vector instead. This method is
2984 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2985 // returns SDValue() if the store is already correctly aligned.
2986 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2987                                                      SelectionDAG &DAG) const {
2988   auto *Store = cast<StoreSDNode>(Op);
2989   assert(Store && Store->getValue().getValueType().isVector() &&
2990          "Expected vector store");
2991 
2992   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2993                                      Store->getMemoryVT(),
2994                                      *Store->getMemOperand()))
2995     return SDValue();
2996 
2997   SDLoc DL(Op);
2998   SDValue StoredVal = Store->getValue();
2999   MVT VT = StoredVal.getSimpleValueType();
3000   unsigned EltSizeBits = VT.getScalarSizeInBits();
3001   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3002          "Unexpected unaligned RVV store type");
3003   MVT NewVT =
3004       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3005   assert(NewVT.isValid() &&
3006          "Expecting equally-sized RVV vector types to be legal");
3007   StoredVal = DAG.getBitcast(NewVT, StoredVal);
3008   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
3009                       Store->getPointerInfo(), Store->getOriginalAlign(),
3010                       Store->getMemOperand()->getFlags());
3011 }
3012 
3013 static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
3014                              const RISCVSubtarget &Subtarget) {
3015   assert(Op.getValueType() == MVT::i64 && "Unexpected VT");
3016 
3017   int64_t Imm = cast<ConstantSDNode>(Op)->getSExtValue();
3018 
3019   // All simm32 constants should be handled by isel.
3020   // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
3021   // this check redundant, but small immediates are common so this check
3022   // should have better compile time.
3023   if (isInt<32>(Imm))
3024     return Op;
3025 
3026   // We only need to cost the immediate, if constant pool lowering is enabled.
3027   if (!Subtarget.useConstantPoolForLargeInts())
3028     return Op;
3029 
3030   RISCVMatInt::InstSeq Seq =
3031       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
3032   if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
3033     return Op;
3034 
3035   // Expand to a constant pool using the default expansion code.
3036   return SDValue();
3037 }
3038 
3039 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
3040                                             SelectionDAG &DAG) const {
3041   switch (Op.getOpcode()) {
3042   default:
3043     report_fatal_error("unimplemented operand");
3044   case ISD::GlobalAddress:
3045     return lowerGlobalAddress(Op, DAG);
3046   case ISD::BlockAddress:
3047     return lowerBlockAddress(Op, DAG);
3048   case ISD::ConstantPool:
3049     return lowerConstantPool(Op, DAG);
3050   case ISD::JumpTable:
3051     return lowerJumpTable(Op, DAG);
3052   case ISD::GlobalTLSAddress:
3053     return lowerGlobalTLSAddress(Op, DAG);
3054   case ISD::Constant:
3055     return lowerConstant(Op, DAG, Subtarget);
3056   case ISD::SELECT:
3057     return lowerSELECT(Op, DAG);
3058   case ISD::BRCOND:
3059     return lowerBRCOND(Op, DAG);
3060   case ISD::VASTART:
3061     return lowerVASTART(Op, DAG);
3062   case ISD::FRAMEADDR:
3063     return lowerFRAMEADDR(Op, DAG);
3064   case ISD::RETURNADDR:
3065     return lowerRETURNADDR(Op, DAG);
3066   case ISD::SHL_PARTS:
3067     return lowerShiftLeftParts(Op, DAG);
3068   case ISD::SRA_PARTS:
3069     return lowerShiftRightParts(Op, DAG, true);
3070   case ISD::SRL_PARTS:
3071     return lowerShiftRightParts(Op, DAG, false);
3072   case ISD::BITCAST: {
3073     SDLoc DL(Op);
3074     EVT VT = Op.getValueType();
3075     SDValue Op0 = Op.getOperand(0);
3076     EVT Op0VT = Op0.getValueType();
3077     MVT XLenVT = Subtarget.getXLenVT();
3078     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3079       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3080       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3081       return FPConv;
3082     }
3083     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3084         Subtarget.hasStdExtF()) {
3085       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3086       SDValue FPConv =
3087           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3088       return FPConv;
3089     }
3090 
3091     // Consider other scalar<->scalar casts as legal if the types are legal.
3092     // Otherwise expand them.
3093     if (!VT.isVector() && !Op0VT.isVector()) {
3094       if (isTypeLegal(VT) && isTypeLegal(Op0VT))
3095         return Op;
3096       return SDValue();
3097     }
3098 
3099     assert(!VT.isScalableVector() && !Op0VT.isScalableVector() &&
3100            "Unexpected types");
3101 
3102     if (VT.isFixedLengthVector()) {
3103       // We can handle fixed length vector bitcasts with a simple replacement
3104       // in isel.
3105       if (Op0VT.isFixedLengthVector())
3106         return Op;
3107       // When bitcasting from scalar to fixed-length vector, insert the scalar
3108       // into a one-element vector of the result type, and perform a vector
3109       // bitcast.
3110       if (!Op0VT.isVector()) {
3111         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3112         if (!isTypeLegal(BVT))
3113           return SDValue();
3114         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3115                                               DAG.getUNDEF(BVT), Op0,
3116                                               DAG.getConstant(0, DL, XLenVT)));
3117       }
3118       return SDValue();
3119     }
3120     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3121     // thus: bitcast the vector to a one-element vector type whose element type
3122     // is the same as the result type, and extract the first element.
3123     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3124       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3125       if (!isTypeLegal(BVT))
3126         return SDValue();
3127       SDValue BVec = DAG.getBitcast(BVT, Op0);
3128       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3129                          DAG.getConstant(0, DL, XLenVT));
3130     }
3131     return SDValue();
3132   }
3133   case ISD::INTRINSIC_WO_CHAIN:
3134     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3135   case ISD::INTRINSIC_W_CHAIN:
3136     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3137   case ISD::INTRINSIC_VOID:
3138     return LowerINTRINSIC_VOID(Op, DAG);
3139   case ISD::BSWAP:
3140   case ISD::BITREVERSE: {
3141     MVT VT = Op.getSimpleValueType();
3142     SDLoc DL(Op);
3143     if (Subtarget.hasStdExtZbp()) {
3144       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3145       // Start with the maximum immediate value which is the bitwidth - 1.
3146       unsigned Imm = VT.getSizeInBits() - 1;
3147       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3148       if (Op.getOpcode() == ISD::BSWAP)
3149         Imm &= ~0x7U;
3150       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3151                          DAG.getConstant(Imm, DL, VT));
3152     }
3153     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3154     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3155     // Expand bitreverse to a bswap(rev8) followed by brev8.
3156     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3157     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3158     // as brev8 by an isel pattern.
3159     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3160                        DAG.getConstant(7, DL, VT));
3161   }
3162   case ISD::FSHL:
3163   case ISD::FSHR: {
3164     MVT VT = Op.getSimpleValueType();
3165     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3166     SDLoc DL(Op);
3167     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3168     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3169     // accidentally setting the extra bit.
3170     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3171     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3172                                 DAG.getConstant(ShAmtWidth, DL, VT));
3173     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3174     // instruction use different orders. fshl will return its first operand for
3175     // shift of zero, fshr will return its second operand. fsl and fsr both
3176     // return rs1 so the ISD nodes need to have different operand orders.
3177     // Shift amount is in rs2.
3178     SDValue Op0 = Op.getOperand(0);
3179     SDValue Op1 = Op.getOperand(1);
3180     unsigned Opc = RISCVISD::FSL;
3181     if (Op.getOpcode() == ISD::FSHR) {
3182       std::swap(Op0, Op1);
3183       Opc = RISCVISD::FSR;
3184     }
3185     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3186   }
3187   case ISD::TRUNCATE:
3188     // Only custom-lower vector truncates
3189     if (!Op.getSimpleValueType().isVector())
3190       return Op;
3191     return lowerVectorTruncLike(Op, DAG);
3192   case ISD::ANY_EXTEND:
3193   case ISD::ZERO_EXTEND:
3194     if (Op.getOperand(0).getValueType().isVector() &&
3195         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3196       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3197     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3198   case ISD::SIGN_EXTEND:
3199     if (Op.getOperand(0).getValueType().isVector() &&
3200         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3201       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3202     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3203   case ISD::SPLAT_VECTOR_PARTS:
3204     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3205   case ISD::INSERT_VECTOR_ELT:
3206     return lowerINSERT_VECTOR_ELT(Op, DAG);
3207   case ISD::EXTRACT_VECTOR_ELT:
3208     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3209   case ISD::VSCALE: {
3210     MVT VT = Op.getSimpleValueType();
3211     SDLoc DL(Op);
3212     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3213     // We define our scalable vector types for lmul=1 to use a 64 bit known
3214     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3215     // vscale as VLENB / 8.
3216     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3217     if (Subtarget.getRealMinVLen() < RISCV::RVVBitsPerBlock)
3218       report_fatal_error("Support for VLEN==32 is incomplete.");
3219     // We assume VLENB is a multiple of 8. We manually choose the best shift
3220     // here because SimplifyDemandedBits isn't always able to simplify it.
3221     uint64_t Val = Op.getConstantOperandVal(0);
3222     if (isPowerOf2_64(Val)) {
3223       uint64_t Log2 = Log2_64(Val);
3224       if (Log2 < 3)
3225         return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3226                            DAG.getConstant(3 - Log2, DL, VT));
3227       if (Log2 > 3)
3228         return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3229                            DAG.getConstant(Log2 - 3, DL, VT));
3230       return VLENB;
3231     }
3232     // If the multiplier is a multiple of 8, scale it down to avoid needing
3233     // to shift the VLENB value.
3234     if ((Val % 8) == 0)
3235       return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3236                          DAG.getConstant(Val / 8, DL, VT));
3237 
3238     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3239                                  DAG.getConstant(3, DL, VT));
3240     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3241   }
3242   case ISD::FPOWI: {
3243     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3244     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3245     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3246         Op.getOperand(1).getValueType() == MVT::i32) {
3247       SDLoc DL(Op);
3248       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3249       SDValue Powi =
3250           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3251       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3252                          DAG.getIntPtrConstant(0, DL));
3253     }
3254     return SDValue();
3255   }
3256   case ISD::FP_EXTEND:
3257   case ISD::FP_ROUND:
3258     if (!Op.getValueType().isVector())
3259       return Op;
3260     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3261   case ISD::FP_TO_SINT:
3262   case ISD::FP_TO_UINT:
3263   case ISD::SINT_TO_FP:
3264   case ISD::UINT_TO_FP: {
3265     // RVV can only do fp<->int conversions to types half/double the size as
3266     // the source. We custom-lower any conversions that do two hops into
3267     // sequences.
3268     MVT VT = Op.getSimpleValueType();
3269     if (!VT.isVector())
3270       return Op;
3271     SDLoc DL(Op);
3272     SDValue Src = Op.getOperand(0);
3273     MVT EltVT = VT.getVectorElementType();
3274     MVT SrcVT = Src.getSimpleValueType();
3275     MVT SrcEltVT = SrcVT.getVectorElementType();
3276     unsigned EltSize = EltVT.getSizeInBits();
3277     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3278     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3279            "Unexpected vector element types");
3280 
3281     bool IsInt2FP = SrcEltVT.isInteger();
3282     // Widening conversions
3283     if (EltSize > (2 * SrcEltSize)) {
3284       if (IsInt2FP) {
3285         // Do a regular integer sign/zero extension then convert to float.
3286         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3287                                       VT.getVectorElementCount());
3288         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3289                                  ? ISD::ZERO_EXTEND
3290                                  : ISD::SIGN_EXTEND;
3291         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3292         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3293       }
3294       // FP2Int
3295       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3296       // Do one doubling fp_extend then complete the operation by converting
3297       // to int.
3298       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3299       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3300       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3301     }
3302 
3303     // Narrowing conversions
3304     if (SrcEltSize > (2 * EltSize)) {
3305       if (IsInt2FP) {
3306         // One narrowing int_to_fp, then an fp_round.
3307         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3308         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3309         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3310         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3311       }
3312       // FP2Int
3313       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3314       // representable by the integer, the result is poison.
3315       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3316                                     VT.getVectorElementCount());
3317       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3318       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3319     }
3320 
3321     // Scalable vectors can exit here. Patterns will handle equally-sized
3322     // conversions halving/doubling ones.
3323     if (!VT.isFixedLengthVector())
3324       return Op;
3325 
3326     // For fixed-length vectors we lower to a custom "VL" node.
3327     unsigned RVVOpc = 0;
3328     switch (Op.getOpcode()) {
3329     default:
3330       llvm_unreachable("Impossible opcode");
3331     case ISD::FP_TO_SINT:
3332       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3333       break;
3334     case ISD::FP_TO_UINT:
3335       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3336       break;
3337     case ISD::SINT_TO_FP:
3338       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3339       break;
3340     case ISD::UINT_TO_FP:
3341       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3342       break;
3343     }
3344 
3345     MVT ContainerVT, SrcContainerVT;
3346     // Derive the reference container type from the larger vector type.
3347     if (SrcEltSize > EltSize) {
3348       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3349       ContainerVT =
3350           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3351     } else {
3352       ContainerVT = getContainerForFixedLengthVector(VT);
3353       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3354     }
3355 
3356     SDValue Mask, VL;
3357     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3358 
3359     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3360     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3361     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3362   }
3363   case ISD::FP_TO_SINT_SAT:
3364   case ISD::FP_TO_UINT_SAT:
3365     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3366   case ISD::FTRUNC:
3367   case ISD::FCEIL:
3368   case ISD::FFLOOR:
3369     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3370   case ISD::FROUND:
3371     return lowerFROUND(Op, DAG);
3372   case ISD::VECREDUCE_ADD:
3373   case ISD::VECREDUCE_UMAX:
3374   case ISD::VECREDUCE_SMAX:
3375   case ISD::VECREDUCE_UMIN:
3376   case ISD::VECREDUCE_SMIN:
3377     return lowerVECREDUCE(Op, DAG);
3378   case ISD::VECREDUCE_AND:
3379   case ISD::VECREDUCE_OR:
3380   case ISD::VECREDUCE_XOR:
3381     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3382       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3383     return lowerVECREDUCE(Op, DAG);
3384   case ISD::VECREDUCE_FADD:
3385   case ISD::VECREDUCE_SEQ_FADD:
3386   case ISD::VECREDUCE_FMIN:
3387   case ISD::VECREDUCE_FMAX:
3388     return lowerFPVECREDUCE(Op, DAG);
3389   case ISD::VP_REDUCE_ADD:
3390   case ISD::VP_REDUCE_UMAX:
3391   case ISD::VP_REDUCE_SMAX:
3392   case ISD::VP_REDUCE_UMIN:
3393   case ISD::VP_REDUCE_SMIN:
3394   case ISD::VP_REDUCE_FADD:
3395   case ISD::VP_REDUCE_SEQ_FADD:
3396   case ISD::VP_REDUCE_FMIN:
3397   case ISD::VP_REDUCE_FMAX:
3398     return lowerVPREDUCE(Op, DAG);
3399   case ISD::VP_REDUCE_AND:
3400   case ISD::VP_REDUCE_OR:
3401   case ISD::VP_REDUCE_XOR:
3402     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3403       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3404     return lowerVPREDUCE(Op, DAG);
3405   case ISD::INSERT_SUBVECTOR:
3406     return lowerINSERT_SUBVECTOR(Op, DAG);
3407   case ISD::EXTRACT_SUBVECTOR:
3408     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3409   case ISD::STEP_VECTOR:
3410     return lowerSTEP_VECTOR(Op, DAG);
3411   case ISD::VECTOR_REVERSE:
3412     return lowerVECTOR_REVERSE(Op, DAG);
3413   case ISD::VECTOR_SPLICE:
3414     return lowerVECTOR_SPLICE(Op, DAG);
3415   case ISD::BUILD_VECTOR:
3416     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3417   case ISD::SPLAT_VECTOR:
3418     if (Op.getValueType().getVectorElementType() == MVT::i1)
3419       return lowerVectorMaskSplat(Op, DAG);
3420     return SDValue();
3421   case ISD::VECTOR_SHUFFLE:
3422     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3423   case ISD::CONCAT_VECTORS: {
3424     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3425     // better than going through the stack, as the default expansion does.
3426     SDLoc DL(Op);
3427     MVT VT = Op.getSimpleValueType();
3428     unsigned NumOpElts =
3429         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3430     SDValue Vec = DAG.getUNDEF(VT);
3431     for (const auto &OpIdx : enumerate(Op->ops())) {
3432       SDValue SubVec = OpIdx.value();
3433       // Don't insert undef subvectors.
3434       if (SubVec.isUndef())
3435         continue;
3436       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3437                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3438     }
3439     return Vec;
3440   }
3441   case ISD::LOAD:
3442     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3443       return V;
3444     if (Op.getValueType().isFixedLengthVector())
3445       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3446     return Op;
3447   case ISD::STORE:
3448     if (auto V = expandUnalignedRVVStore(Op, DAG))
3449       return V;
3450     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3451       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3452     return Op;
3453   case ISD::MLOAD:
3454   case ISD::VP_LOAD:
3455     return lowerMaskedLoad(Op, DAG);
3456   case ISD::MSTORE:
3457   case ISD::VP_STORE:
3458     return lowerMaskedStore(Op, DAG);
3459   case ISD::SETCC:
3460     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3461   case ISD::ADD:
3462     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3463   case ISD::SUB:
3464     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3465   case ISD::MUL:
3466     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3467   case ISD::MULHS:
3468     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3469   case ISD::MULHU:
3470     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3471   case ISD::AND:
3472     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3473                                               RISCVISD::AND_VL);
3474   case ISD::OR:
3475     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3476                                               RISCVISD::OR_VL);
3477   case ISD::XOR:
3478     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3479                                               RISCVISD::XOR_VL);
3480   case ISD::SDIV:
3481     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3482   case ISD::SREM:
3483     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3484   case ISD::UDIV:
3485     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3486   case ISD::UREM:
3487     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3488   case ISD::SHL:
3489   case ISD::SRA:
3490   case ISD::SRL:
3491     if (Op.getSimpleValueType().isFixedLengthVector())
3492       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3493     // This can be called for an i32 shift amount that needs to be promoted.
3494     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3495            "Unexpected custom legalisation");
3496     return SDValue();
3497   case ISD::SADDSAT:
3498     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3499   case ISD::UADDSAT:
3500     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3501   case ISD::SSUBSAT:
3502     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3503   case ISD::USUBSAT:
3504     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3505   case ISD::FADD:
3506     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3507   case ISD::FSUB:
3508     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3509   case ISD::FMUL:
3510     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3511   case ISD::FDIV:
3512     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3513   case ISD::FNEG:
3514     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3515   case ISD::FABS:
3516     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3517   case ISD::FSQRT:
3518     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3519   case ISD::FMA:
3520     return lowerToScalableOp(Op, DAG, RISCVISD::VFMADD_VL);
3521   case ISD::SMIN:
3522     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3523   case ISD::SMAX:
3524     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3525   case ISD::UMIN:
3526     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3527   case ISD::UMAX:
3528     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3529   case ISD::FMINNUM:
3530     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3531   case ISD::FMAXNUM:
3532     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3533   case ISD::ABS:
3534     return lowerABS(Op, DAG);
3535   case ISD::CTLZ_ZERO_UNDEF:
3536   case ISD::CTTZ_ZERO_UNDEF:
3537     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3538   case ISD::VSELECT:
3539     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3540   case ISD::FCOPYSIGN:
3541     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3542   case ISD::MGATHER:
3543   case ISD::VP_GATHER:
3544     return lowerMaskedGather(Op, DAG);
3545   case ISD::MSCATTER:
3546   case ISD::VP_SCATTER:
3547     return lowerMaskedScatter(Op, DAG);
3548   case ISD::FLT_ROUNDS_:
3549     return lowerGET_ROUNDING(Op, DAG);
3550   case ISD::SET_ROUNDING:
3551     return lowerSET_ROUNDING(Op, DAG);
3552   case ISD::EH_DWARF_CFA:
3553     return lowerEH_DWARF_CFA(Op, DAG);
3554   case ISD::VP_SELECT:
3555     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3556   case ISD::VP_MERGE:
3557     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3558   case ISD::VP_ADD:
3559     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3560   case ISD::VP_SUB:
3561     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3562   case ISD::VP_MUL:
3563     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3564   case ISD::VP_SDIV:
3565     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3566   case ISD::VP_UDIV:
3567     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3568   case ISD::VP_SREM:
3569     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3570   case ISD::VP_UREM:
3571     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3572   case ISD::VP_AND:
3573     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3574   case ISD::VP_OR:
3575     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3576   case ISD::VP_XOR:
3577     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3578   case ISD::VP_ASHR:
3579     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3580   case ISD::VP_LSHR:
3581     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3582   case ISD::VP_SHL:
3583     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3584   case ISD::VP_FADD:
3585     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3586   case ISD::VP_FSUB:
3587     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3588   case ISD::VP_FMUL:
3589     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3590   case ISD::VP_FDIV:
3591     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3592   case ISD::VP_FNEG:
3593     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3594   case ISD::VP_FMA:
3595     return lowerVPOp(Op, DAG, RISCVISD::VFMADD_VL);
3596   case ISD::VP_SIGN_EXTEND:
3597   case ISD::VP_ZERO_EXTEND:
3598     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3599       return lowerVPExtMaskOp(Op, DAG);
3600     return lowerVPOp(Op, DAG,
3601                      Op.getOpcode() == ISD::VP_SIGN_EXTEND
3602                          ? RISCVISD::VSEXT_VL
3603                          : RISCVISD::VZEXT_VL);
3604   case ISD::VP_TRUNCATE:
3605     return lowerVectorTruncLike(Op, DAG);
3606   case ISD::VP_FP_EXTEND:
3607   case ISD::VP_FP_ROUND:
3608     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3609   case ISD::VP_FPTOSI:
3610     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3611   case ISD::VP_FPTOUI:
3612     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3613   case ISD::VP_SITOFP:
3614     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3615   case ISD::VP_UITOFP:
3616     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3617   case ISD::VP_SETCC:
3618     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3619       return lowerVPSetCCMaskOp(Op, DAG);
3620     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3621   }
3622 }
3623 
3624 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3625                              SelectionDAG &DAG, unsigned Flags) {
3626   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3627 }
3628 
3629 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3630                              SelectionDAG &DAG, unsigned Flags) {
3631   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3632                                    Flags);
3633 }
3634 
3635 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3636                              SelectionDAG &DAG, unsigned Flags) {
3637   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3638                                    N->getOffset(), Flags);
3639 }
3640 
3641 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3642                              SelectionDAG &DAG, unsigned Flags) {
3643   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3644 }
3645 
3646 template <class NodeTy>
3647 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3648                                      bool IsLocal) const {
3649   SDLoc DL(N);
3650   EVT Ty = getPointerTy(DAG.getDataLayout());
3651 
3652   if (isPositionIndependent()) {
3653     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3654     if (IsLocal)
3655       // Use PC-relative addressing to access the symbol. This generates the
3656       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3657       // %pcrel_lo(auipc)).
3658       return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
3659 
3660     // Use PC-relative addressing to access the GOT for this symbol, then load
3661     // the address from the GOT. This generates the pattern (PseudoLA sym),
3662     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3663     MachineFunction &MF = DAG.getMachineFunction();
3664     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3665         MachinePointerInfo::getGOT(MF),
3666         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3667             MachineMemOperand::MOInvariant,
3668         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3669     SDValue Load =
3670         DAG.getMemIntrinsicNode(RISCVISD::LA, DL, DAG.getVTList(Ty, MVT::Other),
3671                                 {DAG.getEntryNode(), Addr}, Ty, MemOp);
3672     return Load;
3673   }
3674 
3675   switch (getTargetMachine().getCodeModel()) {
3676   default:
3677     report_fatal_error("Unsupported code model for lowering");
3678   case CodeModel::Small: {
3679     // Generate a sequence for accessing addresses within the first 2 GiB of
3680     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3681     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3682     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3683     SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
3684     return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNHi, AddrLo);
3685   }
3686   case CodeModel::Medium: {
3687     // Generate a sequence for accessing addresses within any 2GiB range within
3688     // the address space. This generates the pattern (PseudoLLA sym), which
3689     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3690     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3691     return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
3692   }
3693   }
3694 }
3695 
3696 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3697                                                 SelectionDAG &DAG) const {
3698   SDLoc DL(Op);
3699   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3700   assert(N->getOffset() == 0 && "unexpected offset in global node");
3701   return getAddr(N, DAG, N->getGlobal()->isDSOLocal());
3702 }
3703 
3704 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3705                                                SelectionDAG &DAG) const {
3706   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3707 
3708   return getAddr(N, DAG);
3709 }
3710 
3711 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3712                                                SelectionDAG &DAG) const {
3713   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3714 
3715   return getAddr(N, DAG);
3716 }
3717 
3718 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3719                                             SelectionDAG &DAG) const {
3720   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3721 
3722   return getAddr(N, DAG);
3723 }
3724 
3725 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3726                                               SelectionDAG &DAG,
3727                                               bool UseGOT) const {
3728   SDLoc DL(N);
3729   EVT Ty = getPointerTy(DAG.getDataLayout());
3730   const GlobalValue *GV = N->getGlobal();
3731   MVT XLenVT = Subtarget.getXLenVT();
3732 
3733   if (UseGOT) {
3734     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3735     // load the address from the GOT and add the thread pointer. This generates
3736     // the pattern (PseudoLA_TLS_IE sym), which expands to
3737     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3738     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3739     MachineFunction &MF = DAG.getMachineFunction();
3740     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3741         MachinePointerInfo::getGOT(MF),
3742         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3743             MachineMemOperand::MOInvariant,
3744         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3745     SDValue Load = DAG.getMemIntrinsicNode(
3746         RISCVISD::LA_TLS_IE, DL, DAG.getVTList(Ty, MVT::Other),
3747         {DAG.getEntryNode(), Addr}, Ty, MemOp);
3748 
3749     // Add the thread pointer.
3750     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3751     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3752   }
3753 
3754   // Generate a sequence for accessing the address relative to the thread
3755   // pointer, with the appropriate adjustment for the thread pointer offset.
3756   // This generates the pattern
3757   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3758   SDValue AddrHi =
3759       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3760   SDValue AddrAdd =
3761       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3762   SDValue AddrLo =
3763       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3764 
3765   SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
3766   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3767   SDValue MNAdd =
3768       DAG.getNode(RISCVISD::ADD_TPREL, DL, Ty, MNHi, TPReg, AddrAdd);
3769   return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNAdd, AddrLo);
3770 }
3771 
3772 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3773                                                SelectionDAG &DAG) const {
3774   SDLoc DL(N);
3775   EVT Ty = getPointerTy(DAG.getDataLayout());
3776   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3777   const GlobalValue *GV = N->getGlobal();
3778 
3779   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3780   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3781   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3782   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3783   SDValue Load = DAG.getNode(RISCVISD::LA_TLS_GD, DL, Ty, Addr);
3784 
3785   // Prepare argument list to generate call.
3786   ArgListTy Args;
3787   ArgListEntry Entry;
3788   Entry.Node = Load;
3789   Entry.Ty = CallTy;
3790   Args.push_back(Entry);
3791 
3792   // Setup call to __tls_get_addr.
3793   TargetLowering::CallLoweringInfo CLI(DAG);
3794   CLI.setDebugLoc(DL)
3795       .setChain(DAG.getEntryNode())
3796       .setLibCallee(CallingConv::C, CallTy,
3797                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3798                     std::move(Args));
3799 
3800   return LowerCallTo(CLI).first;
3801 }
3802 
3803 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3804                                                    SelectionDAG &DAG) const {
3805   SDLoc DL(Op);
3806   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3807   assert(N->getOffset() == 0 && "unexpected offset in global node");
3808 
3809   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3810 
3811   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3812       CallingConv::GHC)
3813     report_fatal_error("In GHC calling convention TLS is not supported");
3814 
3815   SDValue Addr;
3816   switch (Model) {
3817   case TLSModel::LocalExec:
3818     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3819     break;
3820   case TLSModel::InitialExec:
3821     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3822     break;
3823   case TLSModel::LocalDynamic:
3824   case TLSModel::GeneralDynamic:
3825     Addr = getDynamicTLSAddr(N, DAG);
3826     break;
3827   }
3828 
3829   return Addr;
3830 }
3831 
3832 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3833   SDValue CondV = Op.getOperand(0);
3834   SDValue TrueV = Op.getOperand(1);
3835   SDValue FalseV = Op.getOperand(2);
3836   SDLoc DL(Op);
3837   MVT VT = Op.getSimpleValueType();
3838   MVT XLenVT = Subtarget.getXLenVT();
3839 
3840   // Lower vector SELECTs to VSELECTs by splatting the condition.
3841   if (VT.isVector()) {
3842     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3843     SDValue CondSplat = VT.isScalableVector()
3844                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3845                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3846     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3847   }
3848 
3849   // If the result type is XLenVT and CondV is the output of a SETCC node
3850   // which also operated on XLenVT inputs, then merge the SETCC node into the
3851   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3852   // compare+branch instructions. i.e.:
3853   // (select (setcc lhs, rhs, cc), truev, falsev)
3854   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3855   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3856       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3857     SDValue LHS = CondV.getOperand(0);
3858     SDValue RHS = CondV.getOperand(1);
3859     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3860     ISD::CondCode CCVal = CC->get();
3861 
3862     // Special case for a select of 2 constants that have a diffence of 1.
3863     // Normally this is done by DAGCombine, but if the select is introduced by
3864     // type legalization or op legalization, we miss it. Restricting to SETLT
3865     // case for now because that is what signed saturating add/sub need.
3866     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3867     // but we would probably want to swap the true/false values if the condition
3868     // is SETGE/SETLE to avoid an XORI.
3869     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3870         CCVal == ISD::SETLT) {
3871       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3872       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3873       if (TrueVal - 1 == FalseVal)
3874         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3875       if (TrueVal + 1 == FalseVal)
3876         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3877     }
3878 
3879     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3880 
3881     SDValue TargetCC = DAG.getCondCode(CCVal);
3882     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3883     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3884   }
3885 
3886   // Otherwise:
3887   // (select condv, truev, falsev)
3888   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3889   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3890   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3891 
3892   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3893 
3894   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3895 }
3896 
3897 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3898   SDValue CondV = Op.getOperand(1);
3899   SDLoc DL(Op);
3900   MVT XLenVT = Subtarget.getXLenVT();
3901 
3902   if (CondV.getOpcode() == ISD::SETCC &&
3903       CondV.getOperand(0).getValueType() == XLenVT) {
3904     SDValue LHS = CondV.getOperand(0);
3905     SDValue RHS = CondV.getOperand(1);
3906     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3907 
3908     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3909 
3910     SDValue TargetCC = DAG.getCondCode(CCVal);
3911     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3912                        LHS, RHS, TargetCC, Op.getOperand(2));
3913   }
3914 
3915   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3916                      CondV, DAG.getConstant(0, DL, XLenVT),
3917                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3918 }
3919 
3920 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3921   MachineFunction &MF = DAG.getMachineFunction();
3922   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3923 
3924   SDLoc DL(Op);
3925   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3926                                  getPointerTy(MF.getDataLayout()));
3927 
3928   // vastart just stores the address of the VarArgsFrameIndex slot into the
3929   // memory location argument.
3930   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3931   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3932                       MachinePointerInfo(SV));
3933 }
3934 
3935 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3936                                             SelectionDAG &DAG) const {
3937   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3938   MachineFunction &MF = DAG.getMachineFunction();
3939   MachineFrameInfo &MFI = MF.getFrameInfo();
3940   MFI.setFrameAddressIsTaken(true);
3941   Register FrameReg = RI.getFrameRegister(MF);
3942   int XLenInBytes = Subtarget.getXLen() / 8;
3943 
3944   EVT VT = Op.getValueType();
3945   SDLoc DL(Op);
3946   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3947   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3948   while (Depth--) {
3949     int Offset = -(XLenInBytes * 2);
3950     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3951                               DAG.getIntPtrConstant(Offset, DL));
3952     FrameAddr =
3953         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3954   }
3955   return FrameAddr;
3956 }
3957 
3958 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3959                                              SelectionDAG &DAG) const {
3960   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3961   MachineFunction &MF = DAG.getMachineFunction();
3962   MachineFrameInfo &MFI = MF.getFrameInfo();
3963   MFI.setReturnAddressIsTaken(true);
3964   MVT XLenVT = Subtarget.getXLenVT();
3965   int XLenInBytes = Subtarget.getXLen() / 8;
3966 
3967   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3968     return SDValue();
3969 
3970   EVT VT = Op.getValueType();
3971   SDLoc DL(Op);
3972   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3973   if (Depth) {
3974     int Off = -XLenInBytes;
3975     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3976     SDValue Offset = DAG.getConstant(Off, DL, VT);
3977     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3978                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3979                        MachinePointerInfo());
3980   }
3981 
3982   // Return the value of the return address register, marking it an implicit
3983   // live-in.
3984   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3985   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3986 }
3987 
3988 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3989                                                  SelectionDAG &DAG) const {
3990   SDLoc DL(Op);
3991   SDValue Lo = Op.getOperand(0);
3992   SDValue Hi = Op.getOperand(1);
3993   SDValue Shamt = Op.getOperand(2);
3994   EVT VT = Lo.getValueType();
3995 
3996   // if Shamt-XLEN < 0: // Shamt < XLEN
3997   //   Lo = Lo << Shamt
3998   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
3999   // else:
4000   //   Lo = 0
4001   //   Hi = Lo << (Shamt-XLEN)
4002 
4003   SDValue Zero = DAG.getConstant(0, DL, VT);
4004   SDValue One = DAG.getConstant(1, DL, VT);
4005   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4006   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4007   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4008   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4009 
4010   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4011   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4012   SDValue ShiftRightLo =
4013       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4014   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4015   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4016   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4017 
4018   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4019 
4020   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4021   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4022 
4023   SDValue Parts[2] = {Lo, Hi};
4024   return DAG.getMergeValues(Parts, DL);
4025 }
4026 
4027 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4028                                                   bool IsSRA) const {
4029   SDLoc DL(Op);
4030   SDValue Lo = Op.getOperand(0);
4031   SDValue Hi = Op.getOperand(1);
4032   SDValue Shamt = Op.getOperand(2);
4033   EVT VT = Lo.getValueType();
4034 
4035   // SRA expansion:
4036   //   if Shamt-XLEN < 0: // Shamt < XLEN
4037   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4038   //     Hi = Hi >>s Shamt
4039   //   else:
4040   //     Lo = Hi >>s (Shamt-XLEN);
4041   //     Hi = Hi >>s (XLEN-1)
4042   //
4043   // SRL expansion:
4044   //   if Shamt-XLEN < 0: // Shamt < XLEN
4045   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4046   //     Hi = Hi >>u Shamt
4047   //   else:
4048   //     Lo = Hi >>u (Shamt-XLEN);
4049   //     Hi = 0;
4050 
4051   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4052 
4053   SDValue Zero = DAG.getConstant(0, DL, VT);
4054   SDValue One = DAG.getConstant(1, DL, VT);
4055   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4056   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4057   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4058   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4059 
4060   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4061   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4062   SDValue ShiftLeftHi =
4063       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4064   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4065   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4066   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4067   SDValue HiFalse =
4068       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4069 
4070   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4071 
4072   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4073   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4074 
4075   SDValue Parts[2] = {Lo, Hi};
4076   return DAG.getMergeValues(Parts, DL);
4077 }
4078 
4079 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4080 // legal equivalently-sized i8 type, so we can use that as a go-between.
4081 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4082                                                   SelectionDAG &DAG) const {
4083   SDLoc DL(Op);
4084   MVT VT = Op.getSimpleValueType();
4085   SDValue SplatVal = Op.getOperand(0);
4086   // All-zeros or all-ones splats are handled specially.
4087   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4088     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4089     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4090   }
4091   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4092     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4093     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4094   }
4095   MVT XLenVT = Subtarget.getXLenVT();
4096   assert(SplatVal.getValueType() == XLenVT &&
4097          "Unexpected type for i1 splat value");
4098   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4099   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4100                          DAG.getConstant(1, DL, XLenVT));
4101   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4102   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4103   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4104 }
4105 
4106 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4107 // illegal (currently only vXi64 RV32).
4108 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4109 // them to VMV_V_X_VL.
4110 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4111                                                      SelectionDAG &DAG) const {
4112   SDLoc DL(Op);
4113   MVT VecVT = Op.getSimpleValueType();
4114   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4115          "Unexpected SPLAT_VECTOR_PARTS lowering");
4116 
4117   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4118   SDValue Lo = Op.getOperand(0);
4119   SDValue Hi = Op.getOperand(1);
4120 
4121   if (VecVT.isFixedLengthVector()) {
4122     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4123     SDLoc DL(Op);
4124     SDValue Mask, VL;
4125     std::tie(Mask, VL) =
4126         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4127 
4128     SDValue Res =
4129         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4130     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4131   }
4132 
4133   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4134     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4135     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4136     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4137     // node in order to try and match RVV vector/scalar instructions.
4138     if ((LoC >> 31) == HiC)
4139       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4140                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4141   }
4142 
4143   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4144   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4145       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4146       Hi.getConstantOperandVal(1) == 31)
4147     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4148                        DAG.getRegister(RISCV::X0, MVT::i32));
4149 
4150   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4151   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4152                      DAG.getUNDEF(VecVT), Lo, Hi,
4153                      DAG.getRegister(RISCV::X0, MVT::i32));
4154 }
4155 
4156 // Custom-lower extensions from mask vectors by using a vselect either with 1
4157 // for zero/any-extension or -1 for sign-extension:
4158 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4159 // Note that any-extension is lowered identically to zero-extension.
4160 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4161                                                 int64_t ExtTrueVal) const {
4162   SDLoc DL(Op);
4163   MVT VecVT = Op.getSimpleValueType();
4164   SDValue Src = Op.getOperand(0);
4165   // Only custom-lower extensions from mask types
4166   assert(Src.getValueType().isVector() &&
4167          Src.getValueType().getVectorElementType() == MVT::i1);
4168 
4169   if (VecVT.isScalableVector()) {
4170     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4171     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4172     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4173   }
4174 
4175   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4176   MVT I1ContainerVT =
4177       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4178 
4179   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4180 
4181   SDValue Mask, VL;
4182   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4183 
4184   MVT XLenVT = Subtarget.getXLenVT();
4185   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4186   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4187 
4188   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4189                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4190   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4191                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4192   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4193                                SplatTrueVal, SplatZero, VL);
4194 
4195   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4196 }
4197 
4198 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4199     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4200   MVT ExtVT = Op.getSimpleValueType();
4201   // Only custom-lower extensions from fixed-length vector types.
4202   if (!ExtVT.isFixedLengthVector())
4203     return Op;
4204   MVT VT = Op.getOperand(0).getSimpleValueType();
4205   // Grab the canonical container type for the extended type. Infer the smaller
4206   // type from that to ensure the same number of vector elements, as we know
4207   // the LMUL will be sufficient to hold the smaller type.
4208   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4209   // Get the extended container type manually to ensure the same number of
4210   // vector elements between source and dest.
4211   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4212                                      ContainerExtVT.getVectorElementCount());
4213 
4214   SDValue Op1 =
4215       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4216 
4217   SDLoc DL(Op);
4218   SDValue Mask, VL;
4219   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4220 
4221   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4222 
4223   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4224 }
4225 
4226 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4227 // setcc operation:
4228 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4229 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
4230                                                       SelectionDAG &DAG) const {
4231   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4232   SDLoc DL(Op);
4233   EVT MaskVT = Op.getValueType();
4234   // Only expect to custom-lower truncations to mask types
4235   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4236          "Unexpected type for vector mask lowering");
4237   SDValue Src = Op.getOperand(0);
4238   MVT VecVT = Src.getSimpleValueType();
4239   SDValue Mask, VL;
4240   if (IsVPTrunc) {
4241     Mask = Op.getOperand(1);
4242     VL = Op.getOperand(2);
4243   }
4244   // If this is a fixed vector, we need to convert it to a scalable vector.
4245   MVT ContainerVT = VecVT;
4246 
4247   if (VecVT.isFixedLengthVector()) {
4248     ContainerVT = getContainerForFixedLengthVector(VecVT);
4249     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4250     if (IsVPTrunc) {
4251       MVT MaskContainerVT =
4252           getContainerForFixedLengthVector(Mask.getSimpleValueType());
4253       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
4254     }
4255   }
4256 
4257   if (!IsVPTrunc) {
4258     std::tie(Mask, VL) =
4259         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4260   }
4261 
4262   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4263   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4264 
4265   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4266                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
4267   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4268                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4269 
4270   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4271   SDValue Trunc =
4272       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4273   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4274                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4275   if (MaskVT.isFixedLengthVector())
4276     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4277   return Trunc;
4278 }
4279 
4280 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
4281                                                   SelectionDAG &DAG) const {
4282   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4283   SDLoc DL(Op);
4284 
4285   MVT VT = Op.getSimpleValueType();
4286   // Only custom-lower vector truncates
4287   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4288 
4289   // Truncates to mask types are handled differently
4290   if (VT.getVectorElementType() == MVT::i1)
4291     return lowerVectorMaskTruncLike(Op, DAG);
4292 
4293   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
4294   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
4295   // truncate by one power of two at a time.
4296   MVT DstEltVT = VT.getVectorElementType();
4297 
4298   SDValue Src = Op.getOperand(0);
4299   MVT SrcVT = Src.getSimpleValueType();
4300   MVT SrcEltVT = SrcVT.getVectorElementType();
4301 
4302   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
4303          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
4304          "Unexpected vector truncate lowering");
4305 
4306   MVT ContainerVT = SrcVT;
4307   SDValue Mask, VL;
4308   if (IsVPTrunc) {
4309     Mask = Op.getOperand(1);
4310     VL = Op.getOperand(2);
4311   }
4312   if (SrcVT.isFixedLengthVector()) {
4313     ContainerVT = getContainerForFixedLengthVector(SrcVT);
4314     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4315     if (IsVPTrunc) {
4316       MVT MaskVT = getMaskTypeFor(ContainerVT);
4317       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4318     }
4319   }
4320 
4321   SDValue Result = Src;
4322   if (!IsVPTrunc) {
4323     std::tie(Mask, VL) =
4324         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4325   }
4326 
4327   LLVMContext &Context = *DAG.getContext();
4328   const ElementCount Count = ContainerVT.getVectorElementCount();
4329   do {
4330     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
4331     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
4332     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
4333                          Mask, VL);
4334   } while (SrcEltVT != DstEltVT);
4335 
4336   if (SrcVT.isFixedLengthVector())
4337     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4338 
4339   return Result;
4340 }
4341 
4342 SDValue
4343 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
4344                                                     SelectionDAG &DAG) const {
4345   bool IsVP =
4346       Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
4347   bool IsExtend =
4348       Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
4349   // RVV can only do truncate fp to types half the size as the source. We
4350   // custom-lower f64->f16 rounds via RVV's round-to-odd float
4351   // conversion instruction.
4352   SDLoc DL(Op);
4353   MVT VT = Op.getSimpleValueType();
4354 
4355   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4356 
4357   SDValue Src = Op.getOperand(0);
4358   MVT SrcVT = Src.getSimpleValueType();
4359 
4360   bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
4361                                      SrcVT.getVectorElementType() != MVT::f16);
4362   bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
4363                                      SrcVT.getVectorElementType() != MVT::f64);
4364 
4365   bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
4366 
4367   // Prepare any fixed-length vector operands.
4368   MVT ContainerVT = VT;
4369   SDValue Mask, VL;
4370   if (IsVP) {
4371     Mask = Op.getOperand(1);
4372     VL = Op.getOperand(2);
4373   }
4374   if (VT.isFixedLengthVector()) {
4375     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
4376     ContainerVT =
4377         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
4378     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4379     if (IsVP) {
4380       MVT MaskVT = getMaskTypeFor(ContainerVT);
4381       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4382     }
4383   }
4384 
4385   if (!IsVP)
4386     std::tie(Mask, VL) =
4387         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4388 
4389   unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
4390 
4391   if (IsDirectConv) {
4392     Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
4393     if (VT.isFixedLengthVector())
4394       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
4395     return Src;
4396   }
4397 
4398   unsigned InterConvOpc =
4399       IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
4400 
4401   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
4402   SDValue IntermediateConv =
4403       DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
4404   SDValue Result =
4405       DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
4406   if (VT.isFixedLengthVector())
4407     return convertFromScalableVector(VT, Result, DAG, Subtarget);
4408   return Result;
4409 }
4410 
4411 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4412 // first position of a vector, and that vector is slid up to the insert index.
4413 // By limiting the active vector length to index+1 and merging with the
4414 // original vector (with an undisturbed tail policy for elements >= VL), we
4415 // achieve the desired result of leaving all elements untouched except the one
4416 // at VL-1, which is replaced with the desired value.
4417 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4418                                                     SelectionDAG &DAG) const {
4419   SDLoc DL(Op);
4420   MVT VecVT = Op.getSimpleValueType();
4421   SDValue Vec = Op.getOperand(0);
4422   SDValue Val = Op.getOperand(1);
4423   SDValue Idx = Op.getOperand(2);
4424 
4425   if (VecVT.getVectorElementType() == MVT::i1) {
4426     // FIXME: For now we just promote to an i8 vector and insert into that,
4427     // but this is probably not optimal.
4428     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4429     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4430     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4431     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4432   }
4433 
4434   MVT ContainerVT = VecVT;
4435   // If the operand is a fixed-length vector, convert to a scalable one.
4436   if (VecVT.isFixedLengthVector()) {
4437     ContainerVT = getContainerForFixedLengthVector(VecVT);
4438     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4439   }
4440 
4441   MVT XLenVT = Subtarget.getXLenVT();
4442 
4443   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4444   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4445   // Even i64-element vectors on RV32 can be lowered without scalar
4446   // legalization if the most-significant 32 bits of the value are not affected
4447   // by the sign-extension of the lower 32 bits.
4448   // TODO: We could also catch sign extensions of a 32-bit value.
4449   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4450     const auto *CVal = cast<ConstantSDNode>(Val);
4451     if (isInt<32>(CVal->getSExtValue())) {
4452       IsLegalInsert = true;
4453       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4454     }
4455   }
4456 
4457   SDValue Mask, VL;
4458   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4459 
4460   SDValue ValInVec;
4461 
4462   if (IsLegalInsert) {
4463     unsigned Opc =
4464         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4465     if (isNullConstant(Idx)) {
4466       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4467       if (!VecVT.isFixedLengthVector())
4468         return Vec;
4469       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4470     }
4471     ValInVec =
4472         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4473   } else {
4474     // On RV32, i64-element vectors must be specially handled to place the
4475     // value at element 0, by using two vslide1up instructions in sequence on
4476     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4477     // this.
4478     SDValue One = DAG.getConstant(1, DL, XLenVT);
4479     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4480     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4481     MVT I32ContainerVT =
4482         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4483     SDValue I32Mask =
4484         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4485     // Limit the active VL to two.
4486     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4487     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4488     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4489     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4490                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4491     // First slide in the hi value, then the lo in underneath it.
4492     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4493                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4494                            I32Mask, InsertI64VL);
4495     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4496                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4497                            I32Mask, InsertI64VL);
4498     // Bitcast back to the right container type.
4499     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4500   }
4501 
4502   // Now that the value is in a vector, slide it into position.
4503   SDValue InsertVL =
4504       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4505   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4506                                 ValInVec, Idx, Mask, InsertVL);
4507   if (!VecVT.isFixedLengthVector())
4508     return Slideup;
4509   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4510 }
4511 
4512 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4513 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4514 // types this is done using VMV_X_S to allow us to glean information about the
4515 // sign bits of the result.
4516 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4517                                                      SelectionDAG &DAG) const {
4518   SDLoc DL(Op);
4519   SDValue Idx = Op.getOperand(1);
4520   SDValue Vec = Op.getOperand(0);
4521   EVT EltVT = Op.getValueType();
4522   MVT VecVT = Vec.getSimpleValueType();
4523   MVT XLenVT = Subtarget.getXLenVT();
4524 
4525   if (VecVT.getVectorElementType() == MVT::i1) {
4526     if (VecVT.isFixedLengthVector()) {
4527       unsigned NumElts = VecVT.getVectorNumElements();
4528       if (NumElts >= 8) {
4529         MVT WideEltVT;
4530         unsigned WidenVecLen;
4531         SDValue ExtractElementIdx;
4532         SDValue ExtractBitIdx;
4533         unsigned MaxEEW = Subtarget.getELEN();
4534         MVT LargestEltVT = MVT::getIntegerVT(
4535             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4536         if (NumElts <= LargestEltVT.getSizeInBits()) {
4537           assert(isPowerOf2_32(NumElts) &&
4538                  "the number of elements should be power of 2");
4539           WideEltVT = MVT::getIntegerVT(NumElts);
4540           WidenVecLen = 1;
4541           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4542           ExtractBitIdx = Idx;
4543         } else {
4544           WideEltVT = LargestEltVT;
4545           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4546           // extract element index = index / element width
4547           ExtractElementIdx = DAG.getNode(
4548               ISD::SRL, DL, XLenVT, Idx,
4549               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4550           // mask bit index = index % element width
4551           ExtractBitIdx = DAG.getNode(
4552               ISD::AND, DL, XLenVT, Idx,
4553               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4554         }
4555         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4556         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4557         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4558                                          Vec, ExtractElementIdx);
4559         // Extract the bit from GPR.
4560         SDValue ShiftRight =
4561             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4562         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4563                            DAG.getConstant(1, DL, XLenVT));
4564       }
4565     }
4566     // Otherwise, promote to an i8 vector and extract from that.
4567     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4568     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4569     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4570   }
4571 
4572   // If this is a fixed vector, we need to convert it to a scalable vector.
4573   MVT ContainerVT = VecVT;
4574   if (VecVT.isFixedLengthVector()) {
4575     ContainerVT = getContainerForFixedLengthVector(VecVT);
4576     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4577   }
4578 
4579   // If the index is 0, the vector is already in the right position.
4580   if (!isNullConstant(Idx)) {
4581     // Use a VL of 1 to avoid processing more elements than we need.
4582     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4583     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
4584     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4585                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4586   }
4587 
4588   if (!EltVT.isInteger()) {
4589     // Floating-point extracts are handled in TableGen.
4590     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4591                        DAG.getConstant(0, DL, XLenVT));
4592   }
4593 
4594   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4595   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4596 }
4597 
4598 // Some RVV intrinsics may claim that they want an integer operand to be
4599 // promoted or expanded.
4600 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4601                                            const RISCVSubtarget &Subtarget) {
4602   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4603           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4604          "Unexpected opcode");
4605 
4606   if (!Subtarget.hasVInstructions())
4607     return SDValue();
4608 
4609   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4610   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4611   SDLoc DL(Op);
4612 
4613   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4614       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4615   if (!II || !II->hasScalarOperand())
4616     return SDValue();
4617 
4618   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4619   assert(SplatOp < Op.getNumOperands());
4620 
4621   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4622   SDValue &ScalarOp = Operands[SplatOp];
4623   MVT OpVT = ScalarOp.getSimpleValueType();
4624   MVT XLenVT = Subtarget.getXLenVT();
4625 
4626   // If this isn't a scalar, or its type is XLenVT we're done.
4627   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4628     return SDValue();
4629 
4630   // Simplest case is that the operand needs to be promoted to XLenVT.
4631   if (OpVT.bitsLT(XLenVT)) {
4632     // If the operand is a constant, sign extend to increase our chances
4633     // of being able to use a .vi instruction. ANY_EXTEND would become a
4634     // a zero extend and the simm5 check in isel would fail.
4635     // FIXME: Should we ignore the upper bits in isel instead?
4636     unsigned ExtOpc =
4637         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4638     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4639     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4640   }
4641 
4642   // Use the previous operand to get the vXi64 VT. The result might be a mask
4643   // VT for compares. Using the previous operand assumes that the previous
4644   // operand will never have a smaller element size than a scalar operand and
4645   // that a widening operation never uses SEW=64.
4646   // NOTE: If this fails the below assert, we can probably just find the
4647   // element count from any operand or result and use it to construct the VT.
4648   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4649   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4650 
4651   // The more complex case is when the scalar is larger than XLenVT.
4652   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4653          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4654 
4655   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4656   // instruction to sign-extend since SEW>XLEN.
4657   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4658     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4659     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4660   }
4661 
4662   switch (IntNo) {
4663   case Intrinsic::riscv_vslide1up:
4664   case Intrinsic::riscv_vslide1down:
4665   case Intrinsic::riscv_vslide1up_mask:
4666   case Intrinsic::riscv_vslide1down_mask: {
4667     // We need to special case these when the scalar is larger than XLen.
4668     unsigned NumOps = Op.getNumOperands();
4669     bool IsMasked = NumOps == 7;
4670 
4671     // Convert the vector source to the equivalent nxvXi32 vector.
4672     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4673     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4674 
4675     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4676                                    DAG.getConstant(0, DL, XLenVT));
4677     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4678                                    DAG.getConstant(1, DL, XLenVT));
4679 
4680     // Double the VL since we halved SEW.
4681     SDValue AVL = getVLOperand(Op);
4682     SDValue I32VL;
4683 
4684     // Optimize for constant AVL
4685     if (isa<ConstantSDNode>(AVL)) {
4686       unsigned EltSize = VT.getScalarSizeInBits();
4687       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4688 
4689       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4690       unsigned MaxVLMAX =
4691           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4692 
4693       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4694       unsigned MinVLMAX =
4695           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4696 
4697       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4698       if (AVLInt <= MinVLMAX) {
4699         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4700       } else if (AVLInt >= 2 * MaxVLMAX) {
4701         // Just set vl to VLMAX in this situation
4702         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4703         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4704         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4705         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4706         SDValue SETVLMAX = DAG.getTargetConstant(
4707             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4708         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4709                             LMUL);
4710       } else {
4711         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4712         // is related to the hardware implementation.
4713         // So let the following code handle
4714       }
4715     }
4716     if (!I32VL) {
4717       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4718       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4719       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4720       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4721       SDValue SETVL =
4722           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4723       // Using vsetvli instruction to get actually used length which related to
4724       // the hardware implementation
4725       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4726                                SEW, LMUL);
4727       I32VL =
4728           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4729     }
4730 
4731     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
4732 
4733     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4734     // instructions.
4735     SDValue Passthru;
4736     if (IsMasked)
4737       Passthru = DAG.getUNDEF(I32VT);
4738     else
4739       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4740 
4741     if (IntNo == Intrinsic::riscv_vslide1up ||
4742         IntNo == Intrinsic::riscv_vslide1up_mask) {
4743       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4744                         ScalarHi, I32Mask, I32VL);
4745       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4746                         ScalarLo, I32Mask, I32VL);
4747     } else {
4748       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4749                         ScalarLo, I32Mask, I32VL);
4750       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4751                         ScalarHi, I32Mask, I32VL);
4752     }
4753 
4754     // Convert back to nxvXi64.
4755     Vec = DAG.getBitcast(VT, Vec);
4756 
4757     if (!IsMasked)
4758       return Vec;
4759     // Apply mask after the operation.
4760     SDValue Mask = Operands[NumOps - 3];
4761     SDValue MaskedOff = Operands[1];
4762     // Assume Policy operand is the last operand.
4763     uint64_t Policy =
4764         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4765     // We don't need to select maskedoff if it's undef.
4766     if (MaskedOff.isUndef())
4767       return Vec;
4768     // TAMU
4769     if (Policy == RISCVII::TAIL_AGNOSTIC)
4770       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4771                          AVL);
4772     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4773     // It's fine because vmerge does not care mask policy.
4774     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4775                        AVL);
4776   }
4777   }
4778 
4779   // We need to convert the scalar to a splat vector.
4780   SDValue VL = getVLOperand(Op);
4781   assert(VL.getValueType() == XLenVT);
4782   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4783   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4784 }
4785 
4786 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4787                                                      SelectionDAG &DAG) const {
4788   unsigned IntNo = Op.getConstantOperandVal(0);
4789   SDLoc DL(Op);
4790   MVT XLenVT = Subtarget.getXLenVT();
4791 
4792   switch (IntNo) {
4793   default:
4794     break; // Don't custom lower most intrinsics.
4795   case Intrinsic::thread_pointer: {
4796     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4797     return DAG.getRegister(RISCV::X4, PtrVT);
4798   }
4799   case Intrinsic::riscv_orc_b:
4800   case Intrinsic::riscv_brev8: {
4801     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4802     unsigned Opc =
4803         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4804     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4805                        DAG.getConstant(7, DL, XLenVT));
4806   }
4807   case Intrinsic::riscv_grev:
4808   case Intrinsic::riscv_gorc: {
4809     unsigned Opc =
4810         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4811     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4812   }
4813   case Intrinsic::riscv_zip:
4814   case Intrinsic::riscv_unzip: {
4815     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4816     // For i32 the immediate is 15. For i64 the immediate is 31.
4817     unsigned Opc =
4818         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4819     unsigned BitWidth = Op.getValueSizeInBits();
4820     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4821     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4822                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4823   }
4824   case Intrinsic::riscv_shfl:
4825   case Intrinsic::riscv_unshfl: {
4826     unsigned Opc =
4827         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4828     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4829   }
4830   case Intrinsic::riscv_bcompress:
4831   case Intrinsic::riscv_bdecompress: {
4832     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4833                                                        : RISCVISD::BDECOMPRESS;
4834     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4835   }
4836   case Intrinsic::riscv_bfp:
4837     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4838                        Op.getOperand(2));
4839   case Intrinsic::riscv_fsl:
4840     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4841                        Op.getOperand(2), Op.getOperand(3));
4842   case Intrinsic::riscv_fsr:
4843     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4844                        Op.getOperand(2), Op.getOperand(3));
4845   case Intrinsic::riscv_vmv_x_s:
4846     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4847     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4848                        Op.getOperand(1));
4849   case Intrinsic::riscv_vmv_v_x:
4850     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4851                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4852                             Subtarget);
4853   case Intrinsic::riscv_vfmv_v_f:
4854     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4855                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4856   case Intrinsic::riscv_vmv_s_x: {
4857     SDValue Scalar = Op.getOperand(2);
4858 
4859     if (Scalar.getValueType().bitsLE(XLenVT)) {
4860       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4861       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4862                          Op.getOperand(1), Scalar, Op.getOperand(3));
4863     }
4864 
4865     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4866 
4867     // This is an i64 value that lives in two scalar registers. We have to
4868     // insert this in a convoluted way. First we build vXi64 splat containing
4869     // the two values that we assemble using some bit math. Next we'll use
4870     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4871     // to merge element 0 from our splat into the source vector.
4872     // FIXME: This is probably not the best way to do this, but it is
4873     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4874     // point.
4875     //   sw lo, (a0)
4876     //   sw hi, 4(a0)
4877     //   vlse vX, (a0)
4878     //
4879     //   vid.v      vVid
4880     //   vmseq.vx   mMask, vVid, 0
4881     //   vmerge.vvm vDest, vSrc, vVal, mMask
4882     MVT VT = Op.getSimpleValueType();
4883     SDValue Vec = Op.getOperand(1);
4884     SDValue VL = getVLOperand(Op);
4885 
4886     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4887     if (Op.getOperand(1).isUndef())
4888       return SplattedVal;
4889     SDValue SplattedIdx =
4890         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4891                     DAG.getConstant(0, DL, MVT::i32), VL);
4892 
4893     MVT MaskVT = getMaskTypeFor(VT);
4894     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
4895     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4896     SDValue SelectCond =
4897         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4898                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4899     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4900                        Vec, VL);
4901   }
4902   }
4903 
4904   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4905 }
4906 
4907 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4908                                                     SelectionDAG &DAG) const {
4909   unsigned IntNo = Op.getConstantOperandVal(1);
4910   switch (IntNo) {
4911   default:
4912     break;
4913   case Intrinsic::riscv_masked_strided_load: {
4914     SDLoc DL(Op);
4915     MVT XLenVT = Subtarget.getXLenVT();
4916 
4917     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4918     // the selection of the masked intrinsics doesn't do this for us.
4919     SDValue Mask = Op.getOperand(5);
4920     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4921 
4922     MVT VT = Op->getSimpleValueType(0);
4923     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4924 
4925     SDValue PassThru = Op.getOperand(2);
4926     if (!IsUnmasked) {
4927       MVT MaskVT = getMaskTypeFor(ContainerVT);
4928       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4929       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4930     }
4931 
4932     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4933 
4934     SDValue IntID = DAG.getTargetConstant(
4935         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4936         XLenVT);
4937 
4938     auto *Load = cast<MemIntrinsicSDNode>(Op);
4939     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4940     if (IsUnmasked)
4941       Ops.push_back(DAG.getUNDEF(ContainerVT));
4942     else
4943       Ops.push_back(PassThru);
4944     Ops.push_back(Op.getOperand(3)); // Ptr
4945     Ops.push_back(Op.getOperand(4)); // Stride
4946     if (!IsUnmasked)
4947       Ops.push_back(Mask);
4948     Ops.push_back(VL);
4949     if (!IsUnmasked) {
4950       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4951       Ops.push_back(Policy);
4952     }
4953 
4954     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4955     SDValue Result =
4956         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4957                                 Load->getMemoryVT(), Load->getMemOperand());
4958     SDValue Chain = Result.getValue(1);
4959     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4960     return DAG.getMergeValues({Result, Chain}, DL);
4961   }
4962   case Intrinsic::riscv_seg2_load:
4963   case Intrinsic::riscv_seg3_load:
4964   case Intrinsic::riscv_seg4_load:
4965   case Intrinsic::riscv_seg5_load:
4966   case Intrinsic::riscv_seg6_load:
4967   case Intrinsic::riscv_seg7_load:
4968   case Intrinsic::riscv_seg8_load: {
4969     SDLoc DL(Op);
4970     static const Intrinsic::ID VlsegInts[7] = {
4971         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4972         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4973         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4974         Intrinsic::riscv_vlseg8};
4975     unsigned NF = Op->getNumValues() - 1;
4976     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4977     MVT XLenVT = Subtarget.getXLenVT();
4978     MVT VT = Op->getSimpleValueType(0);
4979     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4980 
4981     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4982     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4983     auto *Load = cast<MemIntrinsicSDNode>(Op);
4984     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4985     ContainerVTs.push_back(MVT::Other);
4986     SDVTList VTs = DAG.getVTList(ContainerVTs);
4987     SmallVector<SDValue, 12> Ops = {Load->getChain(), IntID};
4988     Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT));
4989     Ops.push_back(Op.getOperand(2));
4990     Ops.push_back(VL);
4991     SDValue Result =
4992         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4993                                 Load->getMemoryVT(), Load->getMemOperand());
4994     SmallVector<SDValue, 9> Results;
4995     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4996       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4997                                                   DAG, Subtarget));
4998     Results.push_back(Result.getValue(NF));
4999     return DAG.getMergeValues(Results, DL);
5000   }
5001   }
5002 
5003   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
5004 }
5005 
5006 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
5007                                                  SelectionDAG &DAG) const {
5008   unsigned IntNo = Op.getConstantOperandVal(1);
5009   switch (IntNo) {
5010   default:
5011     break;
5012   case Intrinsic::riscv_masked_strided_store: {
5013     SDLoc DL(Op);
5014     MVT XLenVT = Subtarget.getXLenVT();
5015 
5016     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5017     // the selection of the masked intrinsics doesn't do this for us.
5018     SDValue Mask = Op.getOperand(5);
5019     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5020 
5021     SDValue Val = Op.getOperand(2);
5022     MVT VT = Val.getSimpleValueType();
5023     MVT ContainerVT = getContainerForFixedLengthVector(VT);
5024 
5025     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5026     if (!IsUnmasked) {
5027       MVT MaskVT = getMaskTypeFor(ContainerVT);
5028       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5029     }
5030 
5031     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5032 
5033     SDValue IntID = DAG.getTargetConstant(
5034         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
5035         XLenVT);
5036 
5037     auto *Store = cast<MemIntrinsicSDNode>(Op);
5038     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
5039     Ops.push_back(Val);
5040     Ops.push_back(Op.getOperand(3)); // Ptr
5041     Ops.push_back(Op.getOperand(4)); // Stride
5042     if (!IsUnmasked)
5043       Ops.push_back(Mask);
5044     Ops.push_back(VL);
5045 
5046     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
5047                                    Ops, Store->getMemoryVT(),
5048                                    Store->getMemOperand());
5049   }
5050   }
5051 
5052   return SDValue();
5053 }
5054 
5055 static MVT getLMUL1VT(MVT VT) {
5056   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
5057          "Unexpected vector MVT");
5058   return MVT::getScalableVectorVT(
5059       VT.getVectorElementType(),
5060       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
5061 }
5062 
5063 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
5064   switch (ISDOpcode) {
5065   default:
5066     llvm_unreachable("Unhandled reduction");
5067   case ISD::VECREDUCE_ADD:
5068     return RISCVISD::VECREDUCE_ADD_VL;
5069   case ISD::VECREDUCE_UMAX:
5070     return RISCVISD::VECREDUCE_UMAX_VL;
5071   case ISD::VECREDUCE_SMAX:
5072     return RISCVISD::VECREDUCE_SMAX_VL;
5073   case ISD::VECREDUCE_UMIN:
5074     return RISCVISD::VECREDUCE_UMIN_VL;
5075   case ISD::VECREDUCE_SMIN:
5076     return RISCVISD::VECREDUCE_SMIN_VL;
5077   case ISD::VECREDUCE_AND:
5078     return RISCVISD::VECREDUCE_AND_VL;
5079   case ISD::VECREDUCE_OR:
5080     return RISCVISD::VECREDUCE_OR_VL;
5081   case ISD::VECREDUCE_XOR:
5082     return RISCVISD::VECREDUCE_XOR_VL;
5083   }
5084 }
5085 
5086 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5087                                                          SelectionDAG &DAG,
5088                                                          bool IsVP) const {
5089   SDLoc DL(Op);
5090   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5091   MVT VecVT = Vec.getSimpleValueType();
5092   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5093           Op.getOpcode() == ISD::VECREDUCE_OR ||
5094           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5095           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5096           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5097           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5098          "Unexpected reduction lowering");
5099 
5100   MVT XLenVT = Subtarget.getXLenVT();
5101   assert(Op.getValueType() == XLenVT &&
5102          "Expected reduction output to be legalized to XLenVT");
5103 
5104   MVT ContainerVT = VecVT;
5105   if (VecVT.isFixedLengthVector()) {
5106     ContainerVT = getContainerForFixedLengthVector(VecVT);
5107     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5108   }
5109 
5110   SDValue Mask, VL;
5111   if (IsVP) {
5112     Mask = Op.getOperand(2);
5113     VL = Op.getOperand(3);
5114   } else {
5115     std::tie(Mask, VL) =
5116         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5117   }
5118 
5119   unsigned BaseOpc;
5120   ISD::CondCode CC;
5121   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5122 
5123   switch (Op.getOpcode()) {
5124   default:
5125     llvm_unreachable("Unhandled reduction");
5126   case ISD::VECREDUCE_AND:
5127   case ISD::VP_REDUCE_AND: {
5128     // vcpop ~x == 0
5129     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5130     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5131     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5132     CC = ISD::SETEQ;
5133     BaseOpc = ISD::AND;
5134     break;
5135   }
5136   case ISD::VECREDUCE_OR:
5137   case ISD::VP_REDUCE_OR:
5138     // vcpop x != 0
5139     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5140     CC = ISD::SETNE;
5141     BaseOpc = ISD::OR;
5142     break;
5143   case ISD::VECREDUCE_XOR:
5144   case ISD::VP_REDUCE_XOR: {
5145     // ((vcpop x) & 1) != 0
5146     SDValue One = DAG.getConstant(1, DL, XLenVT);
5147     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5148     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5149     CC = ISD::SETNE;
5150     BaseOpc = ISD::XOR;
5151     break;
5152   }
5153   }
5154 
5155   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5156 
5157   if (!IsVP)
5158     return SetCC;
5159 
5160   // Now include the start value in the operation.
5161   // Note that we must return the start value when no elements are operated
5162   // upon. The vcpop instructions we've emitted in each case above will return
5163   // 0 for an inactive vector, and so we've already received the neutral value:
5164   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5165   // can simply include the start value.
5166   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5167 }
5168 
5169 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5170                                             SelectionDAG &DAG) const {
5171   SDLoc DL(Op);
5172   SDValue Vec = Op.getOperand(0);
5173   EVT VecEVT = Vec.getValueType();
5174 
5175   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5176 
5177   // Due to ordering in legalize types we may have a vector type that needs to
5178   // be split. Do that manually so we can get down to a legal type.
5179   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5180          TargetLowering::TypeSplitVector) {
5181     SDValue Lo, Hi;
5182     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5183     VecEVT = Lo.getValueType();
5184     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5185   }
5186 
5187   // TODO: The type may need to be widened rather than split. Or widened before
5188   // it can be split.
5189   if (!isTypeLegal(VecEVT))
5190     return SDValue();
5191 
5192   MVT VecVT = VecEVT.getSimpleVT();
5193   MVT VecEltVT = VecVT.getVectorElementType();
5194   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5195 
5196   MVT ContainerVT = VecVT;
5197   if (VecVT.isFixedLengthVector()) {
5198     ContainerVT = getContainerForFixedLengthVector(VecVT);
5199     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5200   }
5201 
5202   MVT M1VT = getLMUL1VT(ContainerVT);
5203   MVT XLenVT = Subtarget.getXLenVT();
5204 
5205   SDValue Mask, VL;
5206   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5207 
5208   SDValue NeutralElem =
5209       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5210   SDValue IdentitySplat =
5211       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5212                        M1VT, DL, DAG, Subtarget);
5213   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5214                                   IdentitySplat, Mask, VL);
5215   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5216                              DAG.getConstant(0, DL, XLenVT));
5217   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5218 }
5219 
5220 // Given a reduction op, this function returns the matching reduction opcode,
5221 // the vector SDValue and the scalar SDValue required to lower this to a
5222 // RISCVISD node.
5223 static std::tuple<unsigned, SDValue, SDValue>
5224 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5225   SDLoc DL(Op);
5226   auto Flags = Op->getFlags();
5227   unsigned Opcode = Op.getOpcode();
5228   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5229   switch (Opcode) {
5230   default:
5231     llvm_unreachable("Unhandled reduction");
5232   case ISD::VECREDUCE_FADD: {
5233     // Use positive zero if we can. It is cheaper to materialize.
5234     SDValue Zero =
5235         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5236     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5237   }
5238   case ISD::VECREDUCE_SEQ_FADD:
5239     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5240                            Op.getOperand(0));
5241   case ISD::VECREDUCE_FMIN:
5242     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5243                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5244   case ISD::VECREDUCE_FMAX:
5245     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5246                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5247   }
5248 }
5249 
5250 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5251                                               SelectionDAG &DAG) const {
5252   SDLoc DL(Op);
5253   MVT VecEltVT = Op.getSimpleValueType();
5254 
5255   unsigned RVVOpcode;
5256   SDValue VectorVal, ScalarVal;
5257   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5258       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5259   MVT VecVT = VectorVal.getSimpleValueType();
5260 
5261   MVT ContainerVT = VecVT;
5262   if (VecVT.isFixedLengthVector()) {
5263     ContainerVT = getContainerForFixedLengthVector(VecVT);
5264     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5265   }
5266 
5267   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5268   MVT XLenVT = Subtarget.getXLenVT();
5269 
5270   SDValue Mask, VL;
5271   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5272 
5273   SDValue ScalarSplat =
5274       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5275                        M1VT, DL, DAG, Subtarget);
5276   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5277                                   VectorVal, ScalarSplat, Mask, VL);
5278   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5279                      DAG.getConstant(0, DL, XLenVT));
5280 }
5281 
5282 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5283   switch (ISDOpcode) {
5284   default:
5285     llvm_unreachable("Unhandled reduction");
5286   case ISD::VP_REDUCE_ADD:
5287     return RISCVISD::VECREDUCE_ADD_VL;
5288   case ISD::VP_REDUCE_UMAX:
5289     return RISCVISD::VECREDUCE_UMAX_VL;
5290   case ISD::VP_REDUCE_SMAX:
5291     return RISCVISD::VECREDUCE_SMAX_VL;
5292   case ISD::VP_REDUCE_UMIN:
5293     return RISCVISD::VECREDUCE_UMIN_VL;
5294   case ISD::VP_REDUCE_SMIN:
5295     return RISCVISD::VECREDUCE_SMIN_VL;
5296   case ISD::VP_REDUCE_AND:
5297     return RISCVISD::VECREDUCE_AND_VL;
5298   case ISD::VP_REDUCE_OR:
5299     return RISCVISD::VECREDUCE_OR_VL;
5300   case ISD::VP_REDUCE_XOR:
5301     return RISCVISD::VECREDUCE_XOR_VL;
5302   case ISD::VP_REDUCE_FADD:
5303     return RISCVISD::VECREDUCE_FADD_VL;
5304   case ISD::VP_REDUCE_SEQ_FADD:
5305     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5306   case ISD::VP_REDUCE_FMAX:
5307     return RISCVISD::VECREDUCE_FMAX_VL;
5308   case ISD::VP_REDUCE_FMIN:
5309     return RISCVISD::VECREDUCE_FMIN_VL;
5310   }
5311 }
5312 
5313 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5314                                            SelectionDAG &DAG) const {
5315   SDLoc DL(Op);
5316   SDValue Vec = Op.getOperand(1);
5317   EVT VecEVT = Vec.getValueType();
5318 
5319   // TODO: The type may need to be widened rather than split. Or widened before
5320   // it can be split.
5321   if (!isTypeLegal(VecEVT))
5322     return SDValue();
5323 
5324   MVT VecVT = VecEVT.getSimpleVT();
5325   MVT VecEltVT = VecVT.getVectorElementType();
5326   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5327 
5328   MVT ContainerVT = VecVT;
5329   if (VecVT.isFixedLengthVector()) {
5330     ContainerVT = getContainerForFixedLengthVector(VecVT);
5331     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5332   }
5333 
5334   SDValue VL = Op.getOperand(3);
5335   SDValue Mask = Op.getOperand(2);
5336 
5337   MVT M1VT = getLMUL1VT(ContainerVT);
5338   MVT XLenVT = Subtarget.getXLenVT();
5339   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5340 
5341   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5342                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5343                                         DL, DAG, Subtarget);
5344   SDValue Reduction =
5345       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5346   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5347                              DAG.getConstant(0, DL, XLenVT));
5348   if (!VecVT.isInteger())
5349     return Elt0;
5350   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5351 }
5352 
5353 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5354                                                    SelectionDAG &DAG) const {
5355   SDValue Vec = Op.getOperand(0);
5356   SDValue SubVec = Op.getOperand(1);
5357   MVT VecVT = Vec.getSimpleValueType();
5358   MVT SubVecVT = SubVec.getSimpleValueType();
5359 
5360   SDLoc DL(Op);
5361   MVT XLenVT = Subtarget.getXLenVT();
5362   unsigned OrigIdx = Op.getConstantOperandVal(2);
5363   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5364 
5365   // We don't have the ability to slide mask vectors up indexed by their i1
5366   // elements; the smallest we can do is i8. Often we are able to bitcast to
5367   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5368   // into a scalable one, we might not necessarily have enough scalable
5369   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5370   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5371       (OrigIdx != 0 || !Vec.isUndef())) {
5372     if (VecVT.getVectorMinNumElements() >= 8 &&
5373         SubVecVT.getVectorMinNumElements() >= 8) {
5374       assert(OrigIdx % 8 == 0 && "Invalid index");
5375       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5376              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5377              "Unexpected mask vector lowering");
5378       OrigIdx /= 8;
5379       SubVecVT =
5380           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5381                            SubVecVT.isScalableVector());
5382       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5383                                VecVT.isScalableVector());
5384       Vec = DAG.getBitcast(VecVT, Vec);
5385       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5386     } else {
5387       // We can't slide this mask vector up indexed by its i1 elements.
5388       // This poses a problem when we wish to insert a scalable vector which
5389       // can't be re-expressed as a larger type. Just choose the slow path and
5390       // extend to a larger type, then truncate back down.
5391       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5392       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5393       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5394       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5395       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5396                         Op.getOperand(2));
5397       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5398       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5399     }
5400   }
5401 
5402   // If the subvector vector is a fixed-length type, we cannot use subregister
5403   // manipulation to simplify the codegen; we don't know which register of a
5404   // LMUL group contains the specific subvector as we only know the minimum
5405   // register size. Therefore we must slide the vector group up the full
5406   // amount.
5407   if (SubVecVT.isFixedLengthVector()) {
5408     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5409       return Op;
5410     MVT ContainerVT = VecVT;
5411     if (VecVT.isFixedLengthVector()) {
5412       ContainerVT = getContainerForFixedLengthVector(VecVT);
5413       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5414     }
5415     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5416                          DAG.getUNDEF(ContainerVT), SubVec,
5417                          DAG.getConstant(0, DL, XLenVT));
5418     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5419       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5420       return DAG.getBitcast(Op.getValueType(), SubVec);
5421     }
5422     SDValue Mask =
5423         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5424     // Set the vector length to only the number of elements we care about. Note
5425     // that for slideup this includes the offset.
5426     SDValue VL =
5427         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5428     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5429     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5430                                   SubVec, SlideupAmt, Mask, VL);
5431     if (VecVT.isFixedLengthVector())
5432       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5433     return DAG.getBitcast(Op.getValueType(), Slideup);
5434   }
5435 
5436   unsigned SubRegIdx, RemIdx;
5437   std::tie(SubRegIdx, RemIdx) =
5438       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5439           VecVT, SubVecVT, OrigIdx, TRI);
5440 
5441   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5442   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5443                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5444                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5445 
5446   // 1. If the Idx has been completely eliminated and this subvector's size is
5447   // a vector register or a multiple thereof, or the surrounding elements are
5448   // undef, then this is a subvector insert which naturally aligns to a vector
5449   // register. These can easily be handled using subregister manipulation.
5450   // 2. If the subvector is smaller than a vector register, then the insertion
5451   // must preserve the undisturbed elements of the register. We do this by
5452   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5453   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5454   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5455   // LMUL=1 type back into the larger vector (resolving to another subregister
5456   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5457   // to avoid allocating a large register group to hold our subvector.
5458   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5459     return Op;
5460 
5461   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5462   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5463   // (in our case undisturbed). This means we can set up a subvector insertion
5464   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5465   // size of the subvector.
5466   MVT InterSubVT = VecVT;
5467   SDValue AlignedExtract = Vec;
5468   unsigned AlignedIdx = OrigIdx - RemIdx;
5469   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5470     InterSubVT = getLMUL1VT(VecVT);
5471     // Extract a subvector equal to the nearest full vector register type. This
5472     // should resolve to a EXTRACT_SUBREG instruction.
5473     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5474                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5475   }
5476 
5477   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5478   // For scalable vectors this must be further multiplied by vscale.
5479   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5480 
5481   SDValue Mask, VL;
5482   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5483 
5484   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5485   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5486   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5487   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5488 
5489   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5490                        DAG.getUNDEF(InterSubVT), SubVec,
5491                        DAG.getConstant(0, DL, XLenVT));
5492 
5493   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5494                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5495 
5496   // If required, insert this subvector back into the correct vector register.
5497   // This should resolve to an INSERT_SUBREG instruction.
5498   if (VecVT.bitsGT(InterSubVT))
5499     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5500                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5501 
5502   // We might have bitcast from a mask type: cast back to the original type if
5503   // required.
5504   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5505 }
5506 
5507 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5508                                                     SelectionDAG &DAG) const {
5509   SDValue Vec = Op.getOperand(0);
5510   MVT SubVecVT = Op.getSimpleValueType();
5511   MVT VecVT = Vec.getSimpleValueType();
5512 
5513   SDLoc DL(Op);
5514   MVT XLenVT = Subtarget.getXLenVT();
5515   unsigned OrigIdx = Op.getConstantOperandVal(1);
5516   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5517 
5518   // We don't have the ability to slide mask vectors down indexed by their i1
5519   // elements; the smallest we can do is i8. Often we are able to bitcast to
5520   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5521   // from a scalable one, we might not necessarily have enough scalable
5522   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5523   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5524     if (VecVT.getVectorMinNumElements() >= 8 &&
5525         SubVecVT.getVectorMinNumElements() >= 8) {
5526       assert(OrigIdx % 8 == 0 && "Invalid index");
5527       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5528              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5529              "Unexpected mask vector lowering");
5530       OrigIdx /= 8;
5531       SubVecVT =
5532           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5533                            SubVecVT.isScalableVector());
5534       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5535                                VecVT.isScalableVector());
5536       Vec = DAG.getBitcast(VecVT, Vec);
5537     } else {
5538       // We can't slide this mask vector down, indexed by its i1 elements.
5539       // This poses a problem when we wish to extract a scalable vector which
5540       // can't be re-expressed as a larger type. Just choose the slow path and
5541       // extend to a larger type, then truncate back down.
5542       // TODO: We could probably improve this when extracting certain fixed
5543       // from fixed, where we can extract as i8 and shift the correct element
5544       // right to reach the desired subvector?
5545       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5546       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5547       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5548       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5549                         Op.getOperand(1));
5550       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5551       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5552     }
5553   }
5554 
5555   // If the subvector vector is a fixed-length type, we cannot use subregister
5556   // manipulation to simplify the codegen; we don't know which register of a
5557   // LMUL group contains the specific subvector as we only know the minimum
5558   // register size. Therefore we must slide the vector group down the full
5559   // amount.
5560   if (SubVecVT.isFixedLengthVector()) {
5561     // With an index of 0 this is a cast-like subvector, which can be performed
5562     // with subregister operations.
5563     if (OrigIdx == 0)
5564       return Op;
5565     MVT ContainerVT = VecVT;
5566     if (VecVT.isFixedLengthVector()) {
5567       ContainerVT = getContainerForFixedLengthVector(VecVT);
5568       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5569     }
5570     SDValue Mask =
5571         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5572     // Set the vector length to only the number of elements we care about. This
5573     // avoids sliding down elements we're going to discard straight away.
5574     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5575     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5576     SDValue Slidedown =
5577         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5578                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5579     // Now we can use a cast-like subvector extract to get the result.
5580     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5581                             DAG.getConstant(0, DL, XLenVT));
5582     return DAG.getBitcast(Op.getValueType(), Slidedown);
5583   }
5584 
5585   unsigned SubRegIdx, RemIdx;
5586   std::tie(SubRegIdx, RemIdx) =
5587       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5588           VecVT, SubVecVT, OrigIdx, TRI);
5589 
5590   // If the Idx has been completely eliminated then this is a subvector extract
5591   // which naturally aligns to a vector register. These can easily be handled
5592   // using subregister manipulation.
5593   if (RemIdx == 0)
5594     return Op;
5595 
5596   // Else we must shift our vector register directly to extract the subvector.
5597   // Do this using VSLIDEDOWN.
5598 
5599   // If the vector type is an LMUL-group type, extract a subvector equal to the
5600   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5601   // instruction.
5602   MVT InterSubVT = VecVT;
5603   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5604     InterSubVT = getLMUL1VT(VecVT);
5605     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5606                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5607   }
5608 
5609   // Slide this vector register down by the desired number of elements in order
5610   // to place the desired subvector starting at element 0.
5611   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5612   // For scalable vectors this must be further multiplied by vscale.
5613   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5614 
5615   SDValue Mask, VL;
5616   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5617   SDValue Slidedown =
5618       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5619                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5620 
5621   // Now the vector is in the right position, extract our final subvector. This
5622   // should resolve to a COPY.
5623   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5624                           DAG.getConstant(0, DL, XLenVT));
5625 
5626   // We might have bitcast from a mask type: cast back to the original type if
5627   // required.
5628   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5629 }
5630 
5631 // Lower step_vector to the vid instruction. Any non-identity step value must
5632 // be accounted for my manual expansion.
5633 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5634                                               SelectionDAG &DAG) const {
5635   SDLoc DL(Op);
5636   MVT VT = Op.getSimpleValueType();
5637   MVT XLenVT = Subtarget.getXLenVT();
5638   SDValue Mask, VL;
5639   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5640   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5641   uint64_t StepValImm = Op.getConstantOperandVal(0);
5642   if (StepValImm != 1) {
5643     if (isPowerOf2_64(StepValImm)) {
5644       SDValue StepVal =
5645           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5646                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5647       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5648     } else {
5649       SDValue StepVal = lowerScalarSplat(
5650           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5651           VL, VT, DL, DAG, Subtarget);
5652       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5653     }
5654   }
5655   return StepVec;
5656 }
5657 
5658 // Implement vector_reverse using vrgather.vv with indices determined by
5659 // subtracting the id of each element from (VLMAX-1). This will convert
5660 // the indices like so:
5661 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5662 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5663 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5664                                                  SelectionDAG &DAG) const {
5665   SDLoc DL(Op);
5666   MVT VecVT = Op.getSimpleValueType();
5667   if (VecVT.getVectorElementType() == MVT::i1) {
5668     MVT WidenVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
5669     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, Op.getOperand(0));
5670     SDValue Op2 = DAG.getNode(ISD::VECTOR_REVERSE, DL, WidenVT, Op1);
5671     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
5672   }
5673   unsigned EltSize = VecVT.getScalarSizeInBits();
5674   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5675   unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
5676   unsigned MaxVLMAX =
5677     RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5678 
5679   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5680   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5681 
5682   // If this is SEW=8 and VLMAX is potentially more than 256, we need
5683   // to use vrgatherei16.vv.
5684   // TODO: It's also possible to use vrgatherei16.vv for other types to
5685   // decrease register width for the index calculation.
5686   if (MaxVLMAX > 256 && EltSize == 8) {
5687     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5688     // Reverse each half, then reassemble them in reverse order.
5689     // NOTE: It's also possible that after splitting that VLMAX no longer
5690     // requires vrgatherei16.vv.
5691     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5692       SDValue Lo, Hi;
5693       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5694       EVT LoVT, HiVT;
5695       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5696       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5697       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5698       // Reassemble the low and high pieces reversed.
5699       // FIXME: This is a CONCAT_VECTORS.
5700       SDValue Res =
5701           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5702                       DAG.getIntPtrConstant(0, DL));
5703       return DAG.getNode(
5704           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5705           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5706     }
5707 
5708     // Just promote the int type to i16 which will double the LMUL.
5709     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5710     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5711   }
5712 
5713   MVT XLenVT = Subtarget.getXLenVT();
5714   SDValue Mask, VL;
5715   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5716 
5717   // Calculate VLMAX-1 for the desired SEW.
5718   unsigned MinElts = VecVT.getVectorMinNumElements();
5719   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5720                               DAG.getConstant(MinElts, DL, XLenVT));
5721   SDValue VLMinus1 =
5722       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5723 
5724   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5725   bool IsRV32E64 =
5726       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5727   SDValue SplatVL;
5728   if (!IsRV32E64)
5729     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5730   else
5731     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5732                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5733 
5734   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5735   SDValue Indices =
5736       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5737 
5738   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask,
5739                      DAG.getUNDEF(VecVT), VL);
5740 }
5741 
5742 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5743                                                 SelectionDAG &DAG) const {
5744   SDLoc DL(Op);
5745   SDValue V1 = Op.getOperand(0);
5746   SDValue V2 = Op.getOperand(1);
5747   MVT XLenVT = Subtarget.getXLenVT();
5748   MVT VecVT = Op.getSimpleValueType();
5749 
5750   unsigned MinElts = VecVT.getVectorMinNumElements();
5751   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5752                               DAG.getConstant(MinElts, DL, XLenVT));
5753 
5754   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5755   SDValue DownOffset, UpOffset;
5756   if (ImmValue >= 0) {
5757     // The operand is a TargetConstant, we need to rebuild it as a regular
5758     // constant.
5759     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5760     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5761   } else {
5762     // The operand is a TargetConstant, we need to rebuild it as a regular
5763     // constant rather than negating the original operand.
5764     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5765     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5766   }
5767 
5768   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
5769 
5770   SDValue SlideDown =
5771       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5772                   DownOffset, TrueMask, UpOffset);
5773   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5774                      TrueMask, DAG.getRegister(RISCV::X0, XLenVT));
5775 }
5776 
5777 SDValue
5778 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5779                                                      SelectionDAG &DAG) const {
5780   SDLoc DL(Op);
5781   auto *Load = cast<LoadSDNode>(Op);
5782 
5783   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5784                                         Load->getMemoryVT(),
5785                                         *Load->getMemOperand()) &&
5786          "Expecting a correctly-aligned load");
5787 
5788   MVT VT = Op.getSimpleValueType();
5789   MVT XLenVT = Subtarget.getXLenVT();
5790   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5791 
5792   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5793 
5794   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5795   SDValue IntID = DAG.getTargetConstant(
5796       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5797   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5798   if (!IsMaskOp)
5799     Ops.push_back(DAG.getUNDEF(ContainerVT));
5800   Ops.push_back(Load->getBasePtr());
5801   Ops.push_back(VL);
5802   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5803   SDValue NewLoad =
5804       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5805                               Load->getMemoryVT(), Load->getMemOperand());
5806 
5807   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5808   return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
5809 }
5810 
5811 SDValue
5812 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5813                                                       SelectionDAG &DAG) const {
5814   SDLoc DL(Op);
5815   auto *Store = cast<StoreSDNode>(Op);
5816 
5817   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5818                                         Store->getMemoryVT(),
5819                                         *Store->getMemOperand()) &&
5820          "Expecting a correctly-aligned store");
5821 
5822   SDValue StoreVal = Store->getValue();
5823   MVT VT = StoreVal.getSimpleValueType();
5824   MVT XLenVT = Subtarget.getXLenVT();
5825 
5826   // If the size less than a byte, we need to pad with zeros to make a byte.
5827   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5828     VT = MVT::v8i1;
5829     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5830                            DAG.getConstant(0, DL, VT), StoreVal,
5831                            DAG.getIntPtrConstant(0, DL));
5832   }
5833 
5834   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5835 
5836   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5837 
5838   SDValue NewValue =
5839       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5840 
5841   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5842   SDValue IntID = DAG.getTargetConstant(
5843       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5844   return DAG.getMemIntrinsicNode(
5845       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5846       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5847       Store->getMemoryVT(), Store->getMemOperand());
5848 }
5849 
5850 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5851                                              SelectionDAG &DAG) const {
5852   SDLoc DL(Op);
5853   MVT VT = Op.getSimpleValueType();
5854 
5855   const auto *MemSD = cast<MemSDNode>(Op);
5856   EVT MemVT = MemSD->getMemoryVT();
5857   MachineMemOperand *MMO = MemSD->getMemOperand();
5858   SDValue Chain = MemSD->getChain();
5859   SDValue BasePtr = MemSD->getBasePtr();
5860 
5861   SDValue Mask, PassThru, VL;
5862   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5863     Mask = VPLoad->getMask();
5864     PassThru = DAG.getUNDEF(VT);
5865     VL = VPLoad->getVectorLength();
5866   } else {
5867     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5868     Mask = MLoad->getMask();
5869     PassThru = MLoad->getPassThru();
5870   }
5871 
5872   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5873 
5874   MVT XLenVT = Subtarget.getXLenVT();
5875 
5876   MVT ContainerVT = VT;
5877   if (VT.isFixedLengthVector()) {
5878     ContainerVT = getContainerForFixedLengthVector(VT);
5879     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5880     if (!IsUnmasked) {
5881       MVT MaskVT = getMaskTypeFor(ContainerVT);
5882       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5883     }
5884   }
5885 
5886   if (!VL)
5887     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5888 
5889   unsigned IntID =
5890       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5891   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5892   if (IsUnmasked)
5893     Ops.push_back(DAG.getUNDEF(ContainerVT));
5894   else
5895     Ops.push_back(PassThru);
5896   Ops.push_back(BasePtr);
5897   if (!IsUnmasked)
5898     Ops.push_back(Mask);
5899   Ops.push_back(VL);
5900   if (!IsUnmasked)
5901     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5902 
5903   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5904 
5905   SDValue Result =
5906       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5907   Chain = Result.getValue(1);
5908 
5909   if (VT.isFixedLengthVector())
5910     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5911 
5912   return DAG.getMergeValues({Result, Chain}, DL);
5913 }
5914 
5915 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5916                                               SelectionDAG &DAG) const {
5917   SDLoc DL(Op);
5918 
5919   const auto *MemSD = cast<MemSDNode>(Op);
5920   EVT MemVT = MemSD->getMemoryVT();
5921   MachineMemOperand *MMO = MemSD->getMemOperand();
5922   SDValue Chain = MemSD->getChain();
5923   SDValue BasePtr = MemSD->getBasePtr();
5924   SDValue Val, Mask, VL;
5925 
5926   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5927     Val = VPStore->getValue();
5928     Mask = VPStore->getMask();
5929     VL = VPStore->getVectorLength();
5930   } else {
5931     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5932     Val = MStore->getValue();
5933     Mask = MStore->getMask();
5934   }
5935 
5936   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5937 
5938   MVT VT = Val.getSimpleValueType();
5939   MVT XLenVT = Subtarget.getXLenVT();
5940 
5941   MVT ContainerVT = VT;
5942   if (VT.isFixedLengthVector()) {
5943     ContainerVT = getContainerForFixedLengthVector(VT);
5944 
5945     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5946     if (!IsUnmasked) {
5947       MVT MaskVT = getMaskTypeFor(ContainerVT);
5948       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5949     }
5950   }
5951 
5952   if (!VL)
5953     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5954 
5955   unsigned IntID =
5956       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5957   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5958   Ops.push_back(Val);
5959   Ops.push_back(BasePtr);
5960   if (!IsUnmasked)
5961     Ops.push_back(Mask);
5962   Ops.push_back(VL);
5963 
5964   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5965                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5966 }
5967 
5968 SDValue
5969 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5970                                                       SelectionDAG &DAG) const {
5971   MVT InVT = Op.getOperand(0).getSimpleValueType();
5972   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5973 
5974   MVT VT = Op.getSimpleValueType();
5975 
5976   SDValue Op1 =
5977       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5978   SDValue Op2 =
5979       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5980 
5981   SDLoc DL(Op);
5982   SDValue VL =
5983       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5984 
5985   MVT MaskVT = getMaskTypeFor(ContainerVT);
5986   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
5987 
5988   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5989                             Op.getOperand(2), Mask, VL);
5990 
5991   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5992 }
5993 
5994 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5995     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5996   MVT VT = Op.getSimpleValueType();
5997 
5998   if (VT.getVectorElementType() == MVT::i1)
5999     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
6000 
6001   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
6002 }
6003 
6004 SDValue
6005 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
6006                                                       SelectionDAG &DAG) const {
6007   unsigned Opc;
6008   switch (Op.getOpcode()) {
6009   default: llvm_unreachable("Unexpected opcode!");
6010   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
6011   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
6012   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
6013   }
6014 
6015   return lowerToScalableOp(Op, DAG, Opc);
6016 }
6017 
6018 // Lower vector ABS to smax(X, sub(0, X)).
6019 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
6020   SDLoc DL(Op);
6021   MVT VT = Op.getSimpleValueType();
6022   SDValue X = Op.getOperand(0);
6023 
6024   assert(VT.isFixedLengthVector() && "Unexpected type");
6025 
6026   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6027   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
6028 
6029   SDValue Mask, VL;
6030   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6031 
6032   SDValue SplatZero = DAG.getNode(
6033       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
6034       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
6035   SDValue NegX =
6036       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
6037   SDValue Max =
6038       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
6039 
6040   return convertFromScalableVector(VT, Max, DAG, Subtarget);
6041 }
6042 
6043 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
6044     SDValue Op, SelectionDAG &DAG) const {
6045   SDLoc DL(Op);
6046   MVT VT = Op.getSimpleValueType();
6047   SDValue Mag = Op.getOperand(0);
6048   SDValue Sign = Op.getOperand(1);
6049   assert(Mag.getValueType() == Sign.getValueType() &&
6050          "Can only handle COPYSIGN with matching types.");
6051 
6052   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6053   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
6054   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
6055 
6056   SDValue Mask, VL;
6057   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6058 
6059   SDValue CopySign =
6060       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
6061 
6062   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
6063 }
6064 
6065 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
6066     SDValue Op, SelectionDAG &DAG) const {
6067   MVT VT = Op.getSimpleValueType();
6068   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6069 
6070   MVT I1ContainerVT =
6071       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6072 
6073   SDValue CC =
6074       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
6075   SDValue Op1 =
6076       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6077   SDValue Op2 =
6078       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6079 
6080   SDLoc DL(Op);
6081   SDValue Mask, VL;
6082   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6083 
6084   SDValue Select =
6085       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6086 
6087   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6088 }
6089 
6090 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6091                                                unsigned NewOpc,
6092                                                bool HasMask) const {
6093   MVT VT = Op.getSimpleValueType();
6094   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6095 
6096   // Create list of operands by converting existing ones to scalable types.
6097   SmallVector<SDValue, 6> Ops;
6098   for (const SDValue &V : Op->op_values()) {
6099     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6100 
6101     // Pass through non-vector operands.
6102     if (!V.getValueType().isVector()) {
6103       Ops.push_back(V);
6104       continue;
6105     }
6106 
6107     // "cast" fixed length vector to a scalable vector.
6108     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6109            "Only fixed length vectors are supported!");
6110     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6111   }
6112 
6113   SDLoc DL(Op);
6114   SDValue Mask, VL;
6115   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6116   if (HasMask)
6117     Ops.push_back(Mask);
6118   Ops.push_back(VL);
6119 
6120   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6121   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6122 }
6123 
6124 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6125 // * Operands of each node are assumed to be in the same order.
6126 // * The EVL operand is promoted from i32 to i64 on RV64.
6127 // * Fixed-length vectors are converted to their scalable-vector container
6128 //   types.
6129 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6130                                        unsigned RISCVISDOpc) const {
6131   SDLoc DL(Op);
6132   MVT VT = Op.getSimpleValueType();
6133   SmallVector<SDValue, 4> Ops;
6134 
6135   for (const auto &OpIdx : enumerate(Op->ops())) {
6136     SDValue V = OpIdx.value();
6137     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6138     // Pass through operands which aren't fixed-length vectors.
6139     if (!V.getValueType().isFixedLengthVector()) {
6140       Ops.push_back(V);
6141       continue;
6142     }
6143     // "cast" fixed length vector to a scalable vector.
6144     MVT OpVT = V.getSimpleValueType();
6145     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6146     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6147            "Only fixed length vectors are supported!");
6148     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6149   }
6150 
6151   if (!VT.isFixedLengthVector())
6152     return DAG.getNode(RISCVISDOpc, DL, VT, Ops, Op->getFlags());
6153 
6154   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6155 
6156   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops, Op->getFlags());
6157 
6158   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6159 }
6160 
6161 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6162                                               SelectionDAG &DAG) const {
6163   SDLoc DL(Op);
6164   MVT VT = Op.getSimpleValueType();
6165 
6166   SDValue Src = Op.getOperand(0);
6167   // NOTE: Mask is dropped.
6168   SDValue VL = Op.getOperand(2);
6169 
6170   MVT ContainerVT = VT;
6171   if (VT.isFixedLengthVector()) {
6172     ContainerVT = getContainerForFixedLengthVector(VT);
6173     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6174     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6175   }
6176 
6177   MVT XLenVT = Subtarget.getXLenVT();
6178   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6179   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6180                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6181 
6182   SDValue SplatValue = DAG.getConstant(
6183       Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
6184   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6185                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6186 
6187   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6188                                Splat, ZeroSplat, VL);
6189   if (!VT.isFixedLengthVector())
6190     return Result;
6191   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6192 }
6193 
6194 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
6195                                                 SelectionDAG &DAG) const {
6196   SDLoc DL(Op);
6197   MVT VT = Op.getSimpleValueType();
6198 
6199   SDValue Op1 = Op.getOperand(0);
6200   SDValue Op2 = Op.getOperand(1);
6201   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6202   // NOTE: Mask is dropped.
6203   SDValue VL = Op.getOperand(4);
6204 
6205   MVT ContainerVT = VT;
6206   if (VT.isFixedLengthVector()) {
6207     ContainerVT = getContainerForFixedLengthVector(VT);
6208     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6209     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6210   }
6211 
6212   SDValue Result;
6213   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
6214 
6215   switch (Condition) {
6216   default:
6217     break;
6218   // X != Y  --> (X^Y)
6219   case ISD::SETNE:
6220     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6221     break;
6222   // X == Y  --> ~(X^Y)
6223   case ISD::SETEQ: {
6224     SDValue Temp =
6225         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6226     Result =
6227         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
6228     break;
6229   }
6230   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
6231   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
6232   case ISD::SETGT:
6233   case ISD::SETULT: {
6234     SDValue Temp =
6235         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6236     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
6237     break;
6238   }
6239   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
6240   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
6241   case ISD::SETLT:
6242   case ISD::SETUGT: {
6243     SDValue Temp =
6244         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6245     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
6246     break;
6247   }
6248   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
6249   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
6250   case ISD::SETGE:
6251   case ISD::SETULE: {
6252     SDValue Temp =
6253         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6254     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
6255     break;
6256   }
6257   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
6258   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
6259   case ISD::SETLE:
6260   case ISD::SETUGE: {
6261     SDValue Temp =
6262         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6263     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
6264     break;
6265   }
6266   }
6267 
6268   if (!VT.isFixedLengthVector())
6269     return Result;
6270   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6271 }
6272 
6273 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6274 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6275                                                 unsigned RISCVISDOpc) const {
6276   SDLoc DL(Op);
6277 
6278   SDValue Src = Op.getOperand(0);
6279   SDValue Mask = Op.getOperand(1);
6280   SDValue VL = Op.getOperand(2);
6281 
6282   MVT DstVT = Op.getSimpleValueType();
6283   MVT SrcVT = Src.getSimpleValueType();
6284   if (DstVT.isFixedLengthVector()) {
6285     DstVT = getContainerForFixedLengthVector(DstVT);
6286     SrcVT = getContainerForFixedLengthVector(SrcVT);
6287     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6288     MVT MaskVT = getMaskTypeFor(DstVT);
6289     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6290   }
6291 
6292   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6293                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6294                                 ? RISCVISD::VSEXT_VL
6295                                 : RISCVISD::VZEXT_VL;
6296 
6297   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6298   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6299 
6300   SDValue Result;
6301   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6302     if (SrcVT.isInteger()) {
6303       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6304 
6305       // Do we need to do any pre-widening before converting?
6306       if (SrcEltSize == 1) {
6307         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6308         MVT XLenVT = Subtarget.getXLenVT();
6309         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6310         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6311                                         DAG.getUNDEF(IntVT), Zero, VL);
6312         SDValue One = DAG.getConstant(
6313             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6314         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6315                                        DAG.getUNDEF(IntVT), One, VL);
6316         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6317                           ZeroSplat, VL);
6318       } else if (DstEltSize > (2 * SrcEltSize)) {
6319         // Widen before converting.
6320         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6321                                      DstVT.getVectorElementCount());
6322         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6323       }
6324 
6325       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6326     } else {
6327       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6328              "Wrong input/output vector types");
6329 
6330       // Convert f16 to f32 then convert f32 to i64.
6331       if (DstEltSize > (2 * SrcEltSize)) {
6332         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6333         MVT InterimFVT =
6334             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6335         Src =
6336             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6337       }
6338 
6339       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6340     }
6341   } else { // Narrowing + Conversion
6342     if (SrcVT.isInteger()) {
6343       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6344       // First do a narrowing convert to an FP type half the size, then round
6345       // the FP type to a small FP type if needed.
6346 
6347       MVT InterimFVT = DstVT;
6348       if (SrcEltSize > (2 * DstEltSize)) {
6349         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6350         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6351         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6352       }
6353 
6354       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6355 
6356       if (InterimFVT != DstVT) {
6357         Src = Result;
6358         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6359       }
6360     } else {
6361       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6362              "Wrong input/output vector types");
6363       // First do a narrowing conversion to an integer half the size, then
6364       // truncate if needed.
6365 
6366       if (DstEltSize == 1) {
6367         // First convert to the same size integer, then convert to mask using
6368         // setcc.
6369         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6370         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6371                                           DstVT.getVectorElementCount());
6372         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6373 
6374         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6375         // otherwise the conversion was undefined.
6376         MVT XLenVT = Subtarget.getXLenVT();
6377         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6378         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6379                                 DAG.getUNDEF(InterimIVT), SplatZero);
6380         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6381                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6382       } else {
6383         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6384                                           DstVT.getVectorElementCount());
6385 
6386         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6387 
6388         while (InterimIVT != DstVT) {
6389           SrcEltSize /= 2;
6390           Src = Result;
6391           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6392                                         DstVT.getVectorElementCount());
6393           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6394                                Src, Mask, VL);
6395         }
6396       }
6397     }
6398   }
6399 
6400   MVT VT = Op.getSimpleValueType();
6401   if (!VT.isFixedLengthVector())
6402     return Result;
6403   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6404 }
6405 
6406 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6407                                             unsigned MaskOpc,
6408                                             unsigned VecOpc) const {
6409   MVT VT = Op.getSimpleValueType();
6410   if (VT.getVectorElementType() != MVT::i1)
6411     return lowerVPOp(Op, DAG, VecOpc);
6412 
6413   // It is safe to drop mask parameter as masked-off elements are undef.
6414   SDValue Op1 = Op->getOperand(0);
6415   SDValue Op2 = Op->getOperand(1);
6416   SDValue VL = Op->getOperand(3);
6417 
6418   MVT ContainerVT = VT;
6419   const bool IsFixed = VT.isFixedLengthVector();
6420   if (IsFixed) {
6421     ContainerVT = getContainerForFixedLengthVector(VT);
6422     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6423     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6424   }
6425 
6426   SDLoc DL(Op);
6427   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6428   if (!IsFixed)
6429     return Val;
6430   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6431 }
6432 
6433 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6434 // matched to a RVV indexed load. The RVV indexed load instructions only
6435 // support the "unsigned unscaled" addressing mode; indices are implicitly
6436 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6437 // signed or scaled indexing is extended to the XLEN value type and scaled
6438 // accordingly.
6439 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6440                                                SelectionDAG &DAG) const {
6441   SDLoc DL(Op);
6442   MVT VT = Op.getSimpleValueType();
6443 
6444   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6445   EVT MemVT = MemSD->getMemoryVT();
6446   MachineMemOperand *MMO = MemSD->getMemOperand();
6447   SDValue Chain = MemSD->getChain();
6448   SDValue BasePtr = MemSD->getBasePtr();
6449 
6450   ISD::LoadExtType LoadExtType;
6451   SDValue Index, Mask, PassThru, VL;
6452 
6453   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6454     Index = VPGN->getIndex();
6455     Mask = VPGN->getMask();
6456     PassThru = DAG.getUNDEF(VT);
6457     VL = VPGN->getVectorLength();
6458     // VP doesn't support extending loads.
6459     LoadExtType = ISD::NON_EXTLOAD;
6460   } else {
6461     // Else it must be a MGATHER.
6462     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6463     Index = MGN->getIndex();
6464     Mask = MGN->getMask();
6465     PassThru = MGN->getPassThru();
6466     LoadExtType = MGN->getExtensionType();
6467   }
6468 
6469   MVT IndexVT = Index.getSimpleValueType();
6470   MVT XLenVT = Subtarget.getXLenVT();
6471 
6472   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6473          "Unexpected VTs!");
6474   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6475   // Targets have to explicitly opt-in for extending vector loads.
6476   assert(LoadExtType == ISD::NON_EXTLOAD &&
6477          "Unexpected extending MGATHER/VP_GATHER");
6478   (void)LoadExtType;
6479 
6480   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6481   // the selection of the masked intrinsics doesn't do this for us.
6482   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6483 
6484   MVT ContainerVT = VT;
6485   if (VT.isFixedLengthVector()) {
6486     ContainerVT = getContainerForFixedLengthVector(VT);
6487     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6488                                ContainerVT.getVectorElementCount());
6489 
6490     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6491 
6492     if (!IsUnmasked) {
6493       MVT MaskVT = getMaskTypeFor(ContainerVT);
6494       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6495       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6496     }
6497   }
6498 
6499   if (!VL)
6500     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6501 
6502   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6503     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6504     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6505                                    VL);
6506     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6507                         TrueMask, VL);
6508   }
6509 
6510   unsigned IntID =
6511       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6512   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6513   if (IsUnmasked)
6514     Ops.push_back(DAG.getUNDEF(ContainerVT));
6515   else
6516     Ops.push_back(PassThru);
6517   Ops.push_back(BasePtr);
6518   Ops.push_back(Index);
6519   if (!IsUnmasked)
6520     Ops.push_back(Mask);
6521   Ops.push_back(VL);
6522   if (!IsUnmasked)
6523     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6524 
6525   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6526   SDValue Result =
6527       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6528   Chain = Result.getValue(1);
6529 
6530   if (VT.isFixedLengthVector())
6531     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6532 
6533   return DAG.getMergeValues({Result, Chain}, DL);
6534 }
6535 
6536 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6537 // matched to a RVV indexed store. The RVV indexed store instructions only
6538 // support the "unsigned unscaled" addressing mode; indices are implicitly
6539 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6540 // signed or scaled indexing is extended to the XLEN value type and scaled
6541 // accordingly.
6542 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6543                                                 SelectionDAG &DAG) const {
6544   SDLoc DL(Op);
6545   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6546   EVT MemVT = MemSD->getMemoryVT();
6547   MachineMemOperand *MMO = MemSD->getMemOperand();
6548   SDValue Chain = MemSD->getChain();
6549   SDValue BasePtr = MemSD->getBasePtr();
6550 
6551   bool IsTruncatingStore = false;
6552   SDValue Index, Mask, Val, VL;
6553 
6554   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6555     Index = VPSN->getIndex();
6556     Mask = VPSN->getMask();
6557     Val = VPSN->getValue();
6558     VL = VPSN->getVectorLength();
6559     // VP doesn't support truncating stores.
6560     IsTruncatingStore = false;
6561   } else {
6562     // Else it must be a MSCATTER.
6563     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6564     Index = MSN->getIndex();
6565     Mask = MSN->getMask();
6566     Val = MSN->getValue();
6567     IsTruncatingStore = MSN->isTruncatingStore();
6568   }
6569 
6570   MVT VT = Val.getSimpleValueType();
6571   MVT IndexVT = Index.getSimpleValueType();
6572   MVT XLenVT = Subtarget.getXLenVT();
6573 
6574   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6575          "Unexpected VTs!");
6576   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6577   // Targets have to explicitly opt-in for extending vector loads and
6578   // truncating vector stores.
6579   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6580   (void)IsTruncatingStore;
6581 
6582   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6583   // the selection of the masked intrinsics doesn't do this for us.
6584   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6585 
6586   MVT ContainerVT = VT;
6587   if (VT.isFixedLengthVector()) {
6588     ContainerVT = getContainerForFixedLengthVector(VT);
6589     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6590                                ContainerVT.getVectorElementCount());
6591 
6592     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6593     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6594 
6595     if (!IsUnmasked) {
6596       MVT MaskVT = getMaskTypeFor(ContainerVT);
6597       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6598     }
6599   }
6600 
6601   if (!VL)
6602     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6603 
6604   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6605     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6606     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6607                                    VL);
6608     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6609                         TrueMask, VL);
6610   }
6611 
6612   unsigned IntID =
6613       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6614   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6615   Ops.push_back(Val);
6616   Ops.push_back(BasePtr);
6617   Ops.push_back(Index);
6618   if (!IsUnmasked)
6619     Ops.push_back(Mask);
6620   Ops.push_back(VL);
6621 
6622   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6623                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6624 }
6625 
6626 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6627                                                SelectionDAG &DAG) const {
6628   const MVT XLenVT = Subtarget.getXLenVT();
6629   SDLoc DL(Op);
6630   SDValue Chain = Op->getOperand(0);
6631   SDValue SysRegNo = DAG.getTargetConstant(
6632       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6633   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6634   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6635 
6636   // Encoding used for rounding mode in RISCV differs from that used in
6637   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6638   // table, which consists of a sequence of 4-bit fields, each representing
6639   // corresponding FLT_ROUNDS mode.
6640   static const int Table =
6641       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6642       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6643       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6644       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6645       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6646 
6647   SDValue Shift =
6648       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6649   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6650                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6651   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6652                                DAG.getConstant(7, DL, XLenVT));
6653 
6654   return DAG.getMergeValues({Masked, Chain}, DL);
6655 }
6656 
6657 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6658                                                SelectionDAG &DAG) const {
6659   const MVT XLenVT = Subtarget.getXLenVT();
6660   SDLoc DL(Op);
6661   SDValue Chain = Op->getOperand(0);
6662   SDValue RMValue = Op->getOperand(1);
6663   SDValue SysRegNo = DAG.getTargetConstant(
6664       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6665 
6666   // Encoding used for rounding mode in RISCV differs from that used in
6667   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6668   // a table, which consists of a sequence of 4-bit fields, each representing
6669   // corresponding RISCV mode.
6670   static const unsigned Table =
6671       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6672       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6673       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6674       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6675       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6676 
6677   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6678                               DAG.getConstant(2, DL, XLenVT));
6679   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6680                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6681   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6682                         DAG.getConstant(0x7, DL, XLenVT));
6683   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6684                      RMValue);
6685 }
6686 
6687 SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
6688                                                SelectionDAG &DAG) const {
6689   MachineFunction &MF = DAG.getMachineFunction();
6690 
6691   bool isRISCV64 = Subtarget.is64Bit();
6692   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6693 
6694   int FI = MF.getFrameInfo().CreateFixedObject(isRISCV64 ? 8 : 4, 0, false);
6695   return DAG.getFrameIndex(FI, PtrVT);
6696 }
6697 
6698 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6699   switch (IntNo) {
6700   default:
6701     llvm_unreachable("Unexpected Intrinsic");
6702   case Intrinsic::riscv_bcompress:
6703     return RISCVISD::BCOMPRESSW;
6704   case Intrinsic::riscv_bdecompress:
6705     return RISCVISD::BDECOMPRESSW;
6706   case Intrinsic::riscv_bfp:
6707     return RISCVISD::BFPW;
6708   case Intrinsic::riscv_fsl:
6709     return RISCVISD::FSLW;
6710   case Intrinsic::riscv_fsr:
6711     return RISCVISD::FSRW;
6712   }
6713 }
6714 
6715 // Converts the given intrinsic to a i64 operation with any extension.
6716 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6717                                          unsigned IntNo) {
6718   SDLoc DL(N);
6719   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6720   // Deal with the Instruction Operands
6721   SmallVector<SDValue, 3> NewOps;
6722   for (SDValue Op : drop_begin(N->ops()))
6723     // Promote the operand to i64 type
6724     NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op));
6725   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps);
6726   // ReplaceNodeResults requires we maintain the same type for the return value.
6727   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6728 }
6729 
6730 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6731 // form of the given Opcode.
6732 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6733   switch (Opcode) {
6734   default:
6735     llvm_unreachable("Unexpected opcode");
6736   case ISD::SHL:
6737     return RISCVISD::SLLW;
6738   case ISD::SRA:
6739     return RISCVISD::SRAW;
6740   case ISD::SRL:
6741     return RISCVISD::SRLW;
6742   case ISD::SDIV:
6743     return RISCVISD::DIVW;
6744   case ISD::UDIV:
6745     return RISCVISD::DIVUW;
6746   case ISD::UREM:
6747     return RISCVISD::REMUW;
6748   case ISD::ROTL:
6749     return RISCVISD::ROLW;
6750   case ISD::ROTR:
6751     return RISCVISD::RORW;
6752   }
6753 }
6754 
6755 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6756 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6757 // otherwise be promoted to i64, making it difficult to select the
6758 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6759 // type i8/i16/i32 is lost.
6760 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6761                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6762   SDLoc DL(N);
6763   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6764   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6765   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6766   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6767   // ReplaceNodeResults requires we maintain the same type for the return value.
6768   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6769 }
6770 
6771 // Converts the given 32-bit operation to a i64 operation with signed extension
6772 // semantic to reduce the signed extension instructions.
6773 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6774   SDLoc DL(N);
6775   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6776   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6777   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6778   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6779                                DAG.getValueType(MVT::i32));
6780   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6781 }
6782 
6783 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6784                                              SmallVectorImpl<SDValue> &Results,
6785                                              SelectionDAG &DAG) const {
6786   SDLoc DL(N);
6787   switch (N->getOpcode()) {
6788   default:
6789     llvm_unreachable("Don't know how to custom type legalize this operation!");
6790   case ISD::STRICT_FP_TO_SINT:
6791   case ISD::STRICT_FP_TO_UINT:
6792   case ISD::FP_TO_SINT:
6793   case ISD::FP_TO_UINT: {
6794     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6795            "Unexpected custom legalisation");
6796     bool IsStrict = N->isStrictFPOpcode();
6797     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6798                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6799     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6800     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6801         TargetLowering::TypeSoftenFloat) {
6802       if (!isTypeLegal(Op0.getValueType()))
6803         return;
6804       if (IsStrict) {
6805         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6806                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6807         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6808         SDValue Res = DAG.getNode(
6809             Opc, DL, VTs, N->getOperand(0), Op0,
6810             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6811         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6812         Results.push_back(Res.getValue(1));
6813         return;
6814       }
6815       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6816       SDValue Res =
6817           DAG.getNode(Opc, DL, MVT::i64, Op0,
6818                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6819       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6820       return;
6821     }
6822     // If the FP type needs to be softened, emit a library call using the 'si'
6823     // version. If we left it to default legalization we'd end up with 'di'. If
6824     // the FP type doesn't need to be softened just let generic type
6825     // legalization promote the result type.
6826     RTLIB::Libcall LC;
6827     if (IsSigned)
6828       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6829     else
6830       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6831     MakeLibCallOptions CallOptions;
6832     EVT OpVT = Op0.getValueType();
6833     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6834     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6835     SDValue Result;
6836     std::tie(Result, Chain) =
6837         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6838     Results.push_back(Result);
6839     if (IsStrict)
6840       Results.push_back(Chain);
6841     break;
6842   }
6843   case ISD::READCYCLECOUNTER: {
6844     assert(!Subtarget.is64Bit() &&
6845            "READCYCLECOUNTER only has custom type legalization on riscv32");
6846 
6847     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6848     SDValue RCW =
6849         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6850 
6851     Results.push_back(
6852         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6853     Results.push_back(RCW.getValue(2));
6854     break;
6855   }
6856   case ISD::MUL: {
6857     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6858     unsigned XLen = Subtarget.getXLen();
6859     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6860     if (Size > XLen) {
6861       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6862       SDValue LHS = N->getOperand(0);
6863       SDValue RHS = N->getOperand(1);
6864       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6865 
6866       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6867       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6868       // We need exactly one side to be unsigned.
6869       if (LHSIsU == RHSIsU)
6870         return;
6871 
6872       auto MakeMULPair = [&](SDValue S, SDValue U) {
6873         MVT XLenVT = Subtarget.getXLenVT();
6874         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6875         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6876         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6877         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6878         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6879       };
6880 
6881       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6882       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6883 
6884       // The other operand should be signed, but still prefer MULH when
6885       // possible.
6886       if (RHSIsU && LHSIsS && !RHSIsS)
6887         Results.push_back(MakeMULPair(LHS, RHS));
6888       else if (LHSIsU && RHSIsS && !LHSIsS)
6889         Results.push_back(MakeMULPair(RHS, LHS));
6890 
6891       return;
6892     }
6893     LLVM_FALLTHROUGH;
6894   }
6895   case ISD::ADD:
6896   case ISD::SUB:
6897     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6898            "Unexpected custom legalisation");
6899     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6900     break;
6901   case ISD::SHL:
6902   case ISD::SRA:
6903   case ISD::SRL:
6904     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6905            "Unexpected custom legalisation");
6906     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6907       // If we can use a BSET instruction, allow default promotion to apply.
6908       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
6909           isOneConstant(N->getOperand(0)))
6910         break;
6911       Results.push_back(customLegalizeToWOp(N, DAG));
6912       break;
6913     }
6914 
6915     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6916     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6917     // shift amount.
6918     if (N->getOpcode() == ISD::SHL) {
6919       SDLoc DL(N);
6920       SDValue NewOp0 =
6921           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6922       SDValue NewOp1 =
6923           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6924       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6925       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6926                                    DAG.getValueType(MVT::i32));
6927       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6928     }
6929 
6930     break;
6931   case ISD::ROTL:
6932   case ISD::ROTR:
6933     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6934            "Unexpected custom legalisation");
6935     Results.push_back(customLegalizeToWOp(N, DAG));
6936     break;
6937   case ISD::CTTZ:
6938   case ISD::CTTZ_ZERO_UNDEF:
6939   case ISD::CTLZ:
6940   case ISD::CTLZ_ZERO_UNDEF: {
6941     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6942            "Unexpected custom legalisation");
6943 
6944     SDValue NewOp0 =
6945         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6946     bool IsCTZ =
6947         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6948     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6949     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6950     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6951     return;
6952   }
6953   case ISD::SDIV:
6954   case ISD::UDIV:
6955   case ISD::UREM: {
6956     MVT VT = N->getSimpleValueType(0);
6957     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6958            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6959            "Unexpected custom legalisation");
6960     // Don't promote division/remainder by constant since we should expand those
6961     // to multiply by magic constant.
6962     // FIXME: What if the expansion is disabled for minsize.
6963     if (N->getOperand(1).getOpcode() == ISD::Constant)
6964       return;
6965 
6966     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6967     // the upper 32 bits. For other types we need to sign or zero extend
6968     // based on the opcode.
6969     unsigned ExtOpc = ISD::ANY_EXTEND;
6970     if (VT != MVT::i32)
6971       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6972                                            : ISD::ZERO_EXTEND;
6973 
6974     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6975     break;
6976   }
6977   case ISD::UADDO:
6978   case ISD::USUBO: {
6979     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6980            "Unexpected custom legalisation");
6981     bool IsAdd = N->getOpcode() == ISD::UADDO;
6982     // Create an ADDW or SUBW.
6983     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6984     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6985     SDValue Res =
6986         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6987     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6988                       DAG.getValueType(MVT::i32));
6989 
6990     SDValue Overflow;
6991     if (IsAdd && isOneConstant(RHS)) {
6992       // Special case uaddo X, 1 overflowed if the addition result is 0.
6993       // The general case (X + C) < C is not necessarily beneficial. Although we
6994       // reduce the live range of X, we may introduce the materialization of
6995       // constant C, especially when the setcc result is used by branch. We have
6996       // no compare with constant and branch instructions.
6997       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6998                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6999     } else {
7000       // Sign extend the LHS and perform an unsigned compare with the ADDW
7001       // result. Since the inputs are sign extended from i32, this is equivalent
7002       // to comparing the lower 32 bits.
7003       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
7004       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
7005                               IsAdd ? ISD::SETULT : ISD::SETUGT);
7006     }
7007 
7008     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7009     Results.push_back(Overflow);
7010     return;
7011   }
7012   case ISD::UADDSAT:
7013   case ISD::USUBSAT: {
7014     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7015            "Unexpected custom legalisation");
7016     if (Subtarget.hasStdExtZbb()) {
7017       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
7018       // sign extend allows overflow of the lower 32 bits to be detected on
7019       // the promoted size.
7020       SDValue LHS =
7021           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
7022       SDValue RHS =
7023           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
7024       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
7025       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7026       return;
7027     }
7028 
7029     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
7030     // promotion for UADDO/USUBO.
7031     Results.push_back(expandAddSubSat(N, DAG));
7032     return;
7033   }
7034   case ISD::ABS: {
7035     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7036            "Unexpected custom legalisation");
7037 
7038     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
7039 
7040     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7041 
7042     // Freeze the source so we can increase it's use count.
7043     Src = DAG.getFreeze(Src);
7044 
7045     // Copy sign bit to all bits using the sraiw pattern.
7046     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
7047                                    DAG.getValueType(MVT::i32));
7048     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
7049                            DAG.getConstant(31, DL, MVT::i64));
7050 
7051     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
7052     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
7053 
7054     // NOTE: The result is only required to be anyextended, but sext is
7055     // consistent with type legalization of sub.
7056     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
7057                          DAG.getValueType(MVT::i32));
7058     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
7059     return;
7060   }
7061   case ISD::BITCAST: {
7062     EVT VT = N->getValueType(0);
7063     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
7064     SDValue Op0 = N->getOperand(0);
7065     EVT Op0VT = Op0.getValueType();
7066     MVT XLenVT = Subtarget.getXLenVT();
7067     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
7068       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
7069       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
7070     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
7071                Subtarget.hasStdExtF()) {
7072       SDValue FPConv =
7073           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
7074       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
7075     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
7076                isTypeLegal(Op0VT)) {
7077       // Custom-legalize bitcasts from fixed-length vector types to illegal
7078       // scalar types in order to improve codegen. Bitcast the vector to a
7079       // one-element vector type whose element type is the same as the result
7080       // type, and extract the first element.
7081       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
7082       if (isTypeLegal(BVT)) {
7083         SDValue BVec = DAG.getBitcast(BVT, Op0);
7084         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
7085                                       DAG.getConstant(0, DL, XLenVT)));
7086       }
7087     }
7088     break;
7089   }
7090   case RISCVISD::GREV:
7091   case RISCVISD::GORC:
7092   case RISCVISD::SHFL: {
7093     MVT VT = N->getSimpleValueType(0);
7094     MVT XLenVT = Subtarget.getXLenVT();
7095     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
7096            "Unexpected custom legalisation");
7097     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
7098     assert((Subtarget.hasStdExtZbp() ||
7099             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
7100              N->getConstantOperandVal(1) == 7)) &&
7101            "Unexpected extension");
7102     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7103     SDValue NewOp1 =
7104         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7105     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7106     // ReplaceNodeResults requires we maintain the same type for the return
7107     // value.
7108     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7109     break;
7110   }
7111   case ISD::BSWAP:
7112   case ISD::BITREVERSE: {
7113     MVT VT = N->getSimpleValueType(0);
7114     MVT XLenVT = Subtarget.getXLenVT();
7115     assert((VT == MVT::i8 || VT == MVT::i16 ||
7116             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7117            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7118     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7119     unsigned Imm = VT.getSizeInBits() - 1;
7120     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7121     if (N->getOpcode() == ISD::BSWAP)
7122       Imm &= ~0x7U;
7123     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7124                                 DAG.getConstant(Imm, DL, XLenVT));
7125     // ReplaceNodeResults requires we maintain the same type for the return
7126     // value.
7127     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7128     break;
7129   }
7130   case ISD::FSHL:
7131   case ISD::FSHR: {
7132     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7133            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7134     SDValue NewOp0 =
7135         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7136     SDValue NewOp1 =
7137         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7138     SDValue NewShAmt =
7139         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7140     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7141     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7142     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7143                            DAG.getConstant(0x1f, DL, MVT::i64));
7144     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7145     // instruction use different orders. fshl will return its first operand for
7146     // shift of zero, fshr will return its second operand. fsl and fsr both
7147     // return rs1 so the ISD nodes need to have different operand orders.
7148     // Shift amount is in rs2.
7149     unsigned Opc = RISCVISD::FSLW;
7150     if (N->getOpcode() == ISD::FSHR) {
7151       std::swap(NewOp0, NewOp1);
7152       Opc = RISCVISD::FSRW;
7153     }
7154     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7155     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7156     break;
7157   }
7158   case ISD::EXTRACT_VECTOR_ELT: {
7159     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7160     // type is illegal (currently only vXi64 RV32).
7161     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7162     // transferred to the destination register. We issue two of these from the
7163     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7164     // first element.
7165     SDValue Vec = N->getOperand(0);
7166     SDValue Idx = N->getOperand(1);
7167 
7168     // The vector type hasn't been legalized yet so we can't issue target
7169     // specific nodes if it needs legalization.
7170     // FIXME: We would manually legalize if it's important.
7171     if (!isTypeLegal(Vec.getValueType()))
7172       return;
7173 
7174     MVT VecVT = Vec.getSimpleValueType();
7175 
7176     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7177            VecVT.getVectorElementType() == MVT::i64 &&
7178            "Unexpected EXTRACT_VECTOR_ELT legalization");
7179 
7180     // If this is a fixed vector, we need to convert it to a scalable vector.
7181     MVT ContainerVT = VecVT;
7182     if (VecVT.isFixedLengthVector()) {
7183       ContainerVT = getContainerForFixedLengthVector(VecVT);
7184       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7185     }
7186 
7187     MVT XLenVT = Subtarget.getXLenVT();
7188 
7189     // Use a VL of 1 to avoid processing more elements than we need.
7190     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7191     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
7192 
7193     // Unless the index is known to be 0, we must slide the vector down to get
7194     // the desired element into index 0.
7195     if (!isNullConstant(Idx)) {
7196       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7197                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7198     }
7199 
7200     // Extract the lower XLEN bits of the correct vector element.
7201     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7202 
7203     // To extract the upper XLEN bits of the vector element, shift the first
7204     // element right by 32 bits and re-extract the lower XLEN bits.
7205     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7206                                      DAG.getUNDEF(ContainerVT),
7207                                      DAG.getConstant(32, DL, XLenVT), VL);
7208     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7209                                  ThirtyTwoV, Mask, VL);
7210 
7211     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7212 
7213     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7214     break;
7215   }
7216   case ISD::INTRINSIC_WO_CHAIN: {
7217     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7218     switch (IntNo) {
7219     default:
7220       llvm_unreachable(
7221           "Don't know how to custom type legalize this intrinsic!");
7222     case Intrinsic::riscv_grev:
7223     case Intrinsic::riscv_gorc: {
7224       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7225              "Unexpected custom legalisation");
7226       SDValue NewOp1 =
7227           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7228       SDValue NewOp2 =
7229           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7230       unsigned Opc =
7231           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7232       // If the control is a constant, promote the node by clearing any extra
7233       // bits bits in the control. isel will form greviw/gorciw if the result is
7234       // sign extended.
7235       if (isa<ConstantSDNode>(NewOp2)) {
7236         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7237                              DAG.getConstant(0x1f, DL, MVT::i64));
7238         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7239       }
7240       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7241       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7242       break;
7243     }
7244     case Intrinsic::riscv_bcompress:
7245     case Intrinsic::riscv_bdecompress:
7246     case Intrinsic::riscv_bfp:
7247     case Intrinsic::riscv_fsl:
7248     case Intrinsic::riscv_fsr: {
7249       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7250              "Unexpected custom legalisation");
7251       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7252       break;
7253     }
7254     case Intrinsic::riscv_orc_b: {
7255       // Lower to the GORCI encoding for orc.b with the operand extended.
7256       SDValue NewOp =
7257           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7258       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7259                                 DAG.getConstant(7, DL, MVT::i64));
7260       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7261       return;
7262     }
7263     case Intrinsic::riscv_shfl:
7264     case Intrinsic::riscv_unshfl: {
7265       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7266              "Unexpected custom legalisation");
7267       SDValue NewOp1 =
7268           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7269       SDValue NewOp2 =
7270           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7271       unsigned Opc =
7272           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7273       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7274       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7275       // will be shuffled the same way as the lower 32 bit half, but the two
7276       // halves won't cross.
7277       if (isa<ConstantSDNode>(NewOp2)) {
7278         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7279                              DAG.getConstant(0xf, DL, MVT::i64));
7280         Opc =
7281             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7282       }
7283       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7284       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7285       break;
7286     }
7287     case Intrinsic::riscv_vmv_x_s: {
7288       EVT VT = N->getValueType(0);
7289       MVT XLenVT = Subtarget.getXLenVT();
7290       if (VT.bitsLT(XLenVT)) {
7291         // Simple case just extract using vmv.x.s and truncate.
7292         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7293                                       Subtarget.getXLenVT(), N->getOperand(1));
7294         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7295         return;
7296       }
7297 
7298       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7299              "Unexpected custom legalization");
7300 
7301       // We need to do the move in two steps.
7302       SDValue Vec = N->getOperand(1);
7303       MVT VecVT = Vec.getSimpleValueType();
7304 
7305       // First extract the lower XLEN bits of the element.
7306       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7307 
7308       // To extract the upper XLEN bits of the vector element, shift the first
7309       // element right by 32 bits and re-extract the lower XLEN bits.
7310       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7311       SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
7312 
7313       SDValue ThirtyTwoV =
7314           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7315                       DAG.getConstant(32, DL, XLenVT), VL);
7316       SDValue LShr32 =
7317           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7318       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7319 
7320       Results.push_back(
7321           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7322       break;
7323     }
7324     }
7325     break;
7326   }
7327   case ISD::VECREDUCE_ADD:
7328   case ISD::VECREDUCE_AND:
7329   case ISD::VECREDUCE_OR:
7330   case ISD::VECREDUCE_XOR:
7331   case ISD::VECREDUCE_SMAX:
7332   case ISD::VECREDUCE_UMAX:
7333   case ISD::VECREDUCE_SMIN:
7334   case ISD::VECREDUCE_UMIN:
7335     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7336       Results.push_back(V);
7337     break;
7338   case ISD::VP_REDUCE_ADD:
7339   case ISD::VP_REDUCE_AND:
7340   case ISD::VP_REDUCE_OR:
7341   case ISD::VP_REDUCE_XOR:
7342   case ISD::VP_REDUCE_SMAX:
7343   case ISD::VP_REDUCE_UMAX:
7344   case ISD::VP_REDUCE_SMIN:
7345   case ISD::VP_REDUCE_UMIN:
7346     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7347       Results.push_back(V);
7348     break;
7349   case ISD::FLT_ROUNDS_: {
7350     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7351     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7352     Results.push_back(Res.getValue(0));
7353     Results.push_back(Res.getValue(1));
7354     break;
7355   }
7356   }
7357 }
7358 
7359 // A structure to hold one of the bit-manipulation patterns below. Together, a
7360 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7361 //   (or (and (shl x, 1), 0xAAAAAAAA),
7362 //       (and (srl x, 1), 0x55555555))
7363 struct RISCVBitmanipPat {
7364   SDValue Op;
7365   unsigned ShAmt;
7366   bool IsSHL;
7367 
7368   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7369     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7370   }
7371 };
7372 
7373 // Matches patterns of the form
7374 //   (and (shl x, C2), (C1 << C2))
7375 //   (and (srl x, C2), C1)
7376 //   (shl (and x, C1), C2)
7377 //   (srl (and x, (C1 << C2)), C2)
7378 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7379 // The expected masks for each shift amount are specified in BitmanipMasks where
7380 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7381 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7382 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7383 // XLen is 64.
7384 static Optional<RISCVBitmanipPat>
7385 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7386   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7387          "Unexpected number of masks");
7388   Optional<uint64_t> Mask;
7389   // Optionally consume a mask around the shift operation.
7390   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7391     Mask = Op.getConstantOperandVal(1);
7392     Op = Op.getOperand(0);
7393   }
7394   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7395     return None;
7396   bool IsSHL = Op.getOpcode() == ISD::SHL;
7397 
7398   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7399     return None;
7400   uint64_t ShAmt = Op.getConstantOperandVal(1);
7401 
7402   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7403   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7404     return None;
7405   // If we don't have enough masks for 64 bit, then we must be trying to
7406   // match SHFL so we're only allowed to shift 1/4 of the width.
7407   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7408     return None;
7409 
7410   SDValue Src = Op.getOperand(0);
7411 
7412   // The expected mask is shifted left when the AND is found around SHL
7413   // patterns.
7414   //   ((x >> 1) & 0x55555555)
7415   //   ((x << 1) & 0xAAAAAAAA)
7416   bool SHLExpMask = IsSHL;
7417 
7418   if (!Mask) {
7419     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7420     // the mask is all ones: consume that now.
7421     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7422       Mask = Src.getConstantOperandVal(1);
7423       Src = Src.getOperand(0);
7424       // The expected mask is now in fact shifted left for SRL, so reverse the
7425       // decision.
7426       //   ((x & 0xAAAAAAAA) >> 1)
7427       //   ((x & 0x55555555) << 1)
7428       SHLExpMask = !SHLExpMask;
7429     } else {
7430       // Use a default shifted mask of all-ones if there's no AND, truncated
7431       // down to the expected width. This simplifies the logic later on.
7432       Mask = maskTrailingOnes<uint64_t>(Width);
7433       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7434     }
7435   }
7436 
7437   unsigned MaskIdx = Log2_32(ShAmt);
7438   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7439 
7440   if (SHLExpMask)
7441     ExpMask <<= ShAmt;
7442 
7443   if (Mask != ExpMask)
7444     return None;
7445 
7446   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7447 }
7448 
7449 // Matches any of the following bit-manipulation patterns:
7450 //   (and (shl x, 1), (0x55555555 << 1))
7451 //   (and (srl x, 1), 0x55555555)
7452 //   (shl (and x, 0x55555555), 1)
7453 //   (srl (and x, (0x55555555 << 1)), 1)
7454 // where the shift amount and mask may vary thus:
7455 //   [1]  = 0x55555555 / 0xAAAAAAAA
7456 //   [2]  = 0x33333333 / 0xCCCCCCCC
7457 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7458 //   [8]  = 0x00FF00FF / 0xFF00FF00
7459 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7460 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7461 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7462   // These are the unshifted masks which we use to match bit-manipulation
7463   // patterns. They may be shifted left in certain circumstances.
7464   static const uint64_t BitmanipMasks[] = {
7465       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7466       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7467 
7468   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7469 }
7470 
7471 // Try to fold (<bop> x, (reduction.<bop> vec, start))
7472 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
7473   auto BinOpToRVVReduce = [](unsigned Opc) {
7474     switch (Opc) {
7475     default:
7476       llvm_unreachable("Unhandled binary to transfrom reduction");
7477     case ISD::ADD:
7478       return RISCVISD::VECREDUCE_ADD_VL;
7479     case ISD::UMAX:
7480       return RISCVISD::VECREDUCE_UMAX_VL;
7481     case ISD::SMAX:
7482       return RISCVISD::VECREDUCE_SMAX_VL;
7483     case ISD::UMIN:
7484       return RISCVISD::VECREDUCE_UMIN_VL;
7485     case ISD::SMIN:
7486       return RISCVISD::VECREDUCE_SMIN_VL;
7487     case ISD::AND:
7488       return RISCVISD::VECREDUCE_AND_VL;
7489     case ISD::OR:
7490       return RISCVISD::VECREDUCE_OR_VL;
7491     case ISD::XOR:
7492       return RISCVISD::VECREDUCE_XOR_VL;
7493     case ISD::FADD:
7494       return RISCVISD::VECREDUCE_FADD_VL;
7495     case ISD::FMAXNUM:
7496       return RISCVISD::VECREDUCE_FMAX_VL;
7497     case ISD::FMINNUM:
7498       return RISCVISD::VECREDUCE_FMIN_VL;
7499     }
7500   };
7501 
7502   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
7503     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7504            isNullConstant(V.getOperand(1)) &&
7505            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
7506   };
7507 
7508   unsigned Opc = N->getOpcode();
7509   unsigned ReduceIdx;
7510   if (IsReduction(N->getOperand(0), Opc))
7511     ReduceIdx = 0;
7512   else if (IsReduction(N->getOperand(1), Opc))
7513     ReduceIdx = 1;
7514   else
7515     return SDValue();
7516 
7517   // Skip if FADD disallows reassociation but the combiner needs.
7518   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
7519     return SDValue();
7520 
7521   SDValue Extract = N->getOperand(ReduceIdx);
7522   SDValue Reduce = Extract.getOperand(0);
7523   if (!Reduce.hasOneUse())
7524     return SDValue();
7525 
7526   SDValue ScalarV = Reduce.getOperand(2);
7527 
7528   // Make sure that ScalarV is a splat with VL=1.
7529   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
7530       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
7531       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
7532     return SDValue();
7533 
7534   if (!isOneConstant(ScalarV.getOperand(2)))
7535     return SDValue();
7536 
7537   // TODO: Deal with value other than neutral element.
7538   auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) {
7539     if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() &&
7540         isNullFPConstant(V))
7541       return true;
7542     return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(),
7543                                  N->getFlags()) == V;
7544   };
7545 
7546   // Check the scalar of ScalarV is neutral element
7547   if (!IsRVVNeutralElement(N, ScalarV.getOperand(1)))
7548     return SDValue();
7549 
7550   if (!ScalarV.hasOneUse())
7551     return SDValue();
7552 
7553   EVT SplatVT = ScalarV.getValueType();
7554   SDValue NewStart = N->getOperand(1 - ReduceIdx);
7555   unsigned SplatOpc = RISCVISD::VFMV_S_F_VL;
7556   if (SplatVT.isInteger()) {
7557     auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode());
7558     if (!C || C->isZero() || !isInt<5>(C->getSExtValue()))
7559       SplatOpc = RISCVISD::VMV_S_X_VL;
7560     else
7561       SplatOpc = RISCVISD::VMV_V_X_VL;
7562   }
7563 
7564   SDValue NewScalarV =
7565       DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart,
7566                   ScalarV.getOperand(2));
7567   SDValue NewReduce =
7568       DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(),
7569                   Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
7570                   Reduce.getOperand(3), Reduce.getOperand(4));
7571   return DAG.getNode(Extract.getOpcode(), SDLoc(Extract),
7572                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
7573 }
7574 
7575 // Match the following pattern as a GREVI(W) operation
7576 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7577 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7578                                const RISCVSubtarget &Subtarget) {
7579   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7580   EVT VT = Op.getValueType();
7581 
7582   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7583     auto LHS = matchGREVIPat(Op.getOperand(0));
7584     auto RHS = matchGREVIPat(Op.getOperand(1));
7585     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7586       SDLoc DL(Op);
7587       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7588                          DAG.getConstant(LHS->ShAmt, DL, VT));
7589     }
7590   }
7591   return SDValue();
7592 }
7593 
7594 // Matches any the following pattern as a GORCI(W) operation
7595 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7596 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7597 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7598 // Note that with the variant of 3.,
7599 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7600 // the inner pattern will first be matched as GREVI and then the outer
7601 // pattern will be matched to GORC via the first rule above.
7602 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7603 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7604                                const RISCVSubtarget &Subtarget) {
7605   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7606   EVT VT = Op.getValueType();
7607 
7608   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7609     SDLoc DL(Op);
7610     SDValue Op0 = Op.getOperand(0);
7611     SDValue Op1 = Op.getOperand(1);
7612 
7613     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7614       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7615           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7616           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7617         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7618       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7619       if ((Reverse.getOpcode() == ISD::ROTL ||
7620            Reverse.getOpcode() == ISD::ROTR) &&
7621           Reverse.getOperand(0) == X &&
7622           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7623         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7624         if (RotAmt == (VT.getSizeInBits() / 2))
7625           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7626                              DAG.getConstant(RotAmt, DL, VT));
7627       }
7628       return SDValue();
7629     };
7630 
7631     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7632     if (SDValue V = MatchOROfReverse(Op0, Op1))
7633       return V;
7634     if (SDValue V = MatchOROfReverse(Op1, Op0))
7635       return V;
7636 
7637     // OR is commutable so canonicalize its OR operand to the left
7638     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7639       std::swap(Op0, Op1);
7640     if (Op0.getOpcode() != ISD::OR)
7641       return SDValue();
7642     SDValue OrOp0 = Op0.getOperand(0);
7643     SDValue OrOp1 = Op0.getOperand(1);
7644     auto LHS = matchGREVIPat(OrOp0);
7645     // OR is commutable so swap the operands and try again: x might have been
7646     // on the left
7647     if (!LHS) {
7648       std::swap(OrOp0, OrOp1);
7649       LHS = matchGREVIPat(OrOp0);
7650     }
7651     auto RHS = matchGREVIPat(Op1);
7652     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7653       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7654                          DAG.getConstant(LHS->ShAmt, DL, VT));
7655     }
7656   }
7657   return SDValue();
7658 }
7659 
7660 // Matches any of the following bit-manipulation patterns:
7661 //   (and (shl x, 1), (0x22222222 << 1))
7662 //   (and (srl x, 1), 0x22222222)
7663 //   (shl (and x, 0x22222222), 1)
7664 //   (srl (and x, (0x22222222 << 1)), 1)
7665 // where the shift amount and mask may vary thus:
7666 //   [1]  = 0x22222222 / 0x44444444
7667 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7668 //   [4]  = 0x00F000F0 / 0x0F000F00
7669 //   [8]  = 0x0000FF00 / 0x00FF0000
7670 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7671 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7672   // These are the unshifted masks which we use to match bit-manipulation
7673   // patterns. They may be shifted left in certain circumstances.
7674   static const uint64_t BitmanipMasks[] = {
7675       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7676       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7677 
7678   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7679 }
7680 
7681 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7682 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7683                                const RISCVSubtarget &Subtarget) {
7684   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7685   EVT VT = Op.getValueType();
7686 
7687   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7688     return SDValue();
7689 
7690   SDValue Op0 = Op.getOperand(0);
7691   SDValue Op1 = Op.getOperand(1);
7692 
7693   // Or is commutable so canonicalize the second OR to the LHS.
7694   if (Op0.getOpcode() != ISD::OR)
7695     std::swap(Op0, Op1);
7696   if (Op0.getOpcode() != ISD::OR)
7697     return SDValue();
7698 
7699   // We found an inner OR, so our operands are the operands of the inner OR
7700   // and the other operand of the outer OR.
7701   SDValue A = Op0.getOperand(0);
7702   SDValue B = Op0.getOperand(1);
7703   SDValue C = Op1;
7704 
7705   auto Match1 = matchSHFLPat(A);
7706   auto Match2 = matchSHFLPat(B);
7707 
7708   // If neither matched, we failed.
7709   if (!Match1 && !Match2)
7710     return SDValue();
7711 
7712   // We had at least one match. if one failed, try the remaining C operand.
7713   if (!Match1) {
7714     std::swap(A, C);
7715     Match1 = matchSHFLPat(A);
7716     if (!Match1)
7717       return SDValue();
7718   } else if (!Match2) {
7719     std::swap(B, C);
7720     Match2 = matchSHFLPat(B);
7721     if (!Match2)
7722       return SDValue();
7723   }
7724   assert(Match1 && Match2);
7725 
7726   // Make sure our matches pair up.
7727   if (!Match1->formsPairWith(*Match2))
7728     return SDValue();
7729 
7730   // All the remains is to make sure C is an AND with the same input, that masks
7731   // out the bits that are being shuffled.
7732   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7733       C.getOperand(0) != Match1->Op)
7734     return SDValue();
7735 
7736   uint64_t Mask = C.getConstantOperandVal(1);
7737 
7738   static const uint64_t BitmanipMasks[] = {
7739       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7740       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7741   };
7742 
7743   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7744   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7745   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7746 
7747   if (Mask != ExpMask)
7748     return SDValue();
7749 
7750   SDLoc DL(Op);
7751   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7752                      DAG.getConstant(Match1->ShAmt, DL, VT));
7753 }
7754 
7755 // Optimize (add (shl x, c0), (shl y, c1)) ->
7756 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7757 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7758                                   const RISCVSubtarget &Subtarget) {
7759   // Perform this optimization only in the zba extension.
7760   if (!Subtarget.hasStdExtZba())
7761     return SDValue();
7762 
7763   // Skip for vector types and larger types.
7764   EVT VT = N->getValueType(0);
7765   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7766     return SDValue();
7767 
7768   // The two operand nodes must be SHL and have no other use.
7769   SDValue N0 = N->getOperand(0);
7770   SDValue N1 = N->getOperand(1);
7771   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7772       !N0->hasOneUse() || !N1->hasOneUse())
7773     return SDValue();
7774 
7775   // Check c0 and c1.
7776   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7777   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7778   if (!N0C || !N1C)
7779     return SDValue();
7780   int64_t C0 = N0C->getSExtValue();
7781   int64_t C1 = N1C->getSExtValue();
7782   if (C0 <= 0 || C1 <= 0)
7783     return SDValue();
7784 
7785   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7786   int64_t Bits = std::min(C0, C1);
7787   int64_t Diff = std::abs(C0 - C1);
7788   if (Diff != 1 && Diff != 2 && Diff != 3)
7789     return SDValue();
7790 
7791   // Build nodes.
7792   SDLoc DL(N);
7793   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7794   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7795   SDValue NA0 =
7796       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7797   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7798   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7799 }
7800 
7801 // Combine
7802 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7803 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7804 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7805 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7806 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7807 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7808 // The grev patterns represents BSWAP.
7809 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7810 // off the grev.
7811 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7812                                           const RISCVSubtarget &Subtarget) {
7813   bool IsWInstruction =
7814       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7815   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7816           IsWInstruction) &&
7817          "Unexpected opcode!");
7818   SDValue Src = N->getOperand(0);
7819   EVT VT = N->getValueType(0);
7820   SDLoc DL(N);
7821 
7822   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7823     return SDValue();
7824 
7825   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7826       !isa<ConstantSDNode>(Src.getOperand(1)))
7827     return SDValue();
7828 
7829   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7830   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7831 
7832   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7833   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7834   unsigned ShAmt1 = N->getConstantOperandVal(1);
7835   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7836   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7837     return SDValue();
7838 
7839   Src = Src.getOperand(0);
7840 
7841   // Toggle bit the MSB of the shift.
7842   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7843   if (CombinedShAmt == 0)
7844     return Src;
7845 
7846   SDValue Res = DAG.getNode(
7847       RISCVISD::GREV, DL, VT, Src,
7848       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7849   if (!IsWInstruction)
7850     return Res;
7851 
7852   // Sign extend the result to match the behavior of the rotate. This will be
7853   // selected to GREVIW in isel.
7854   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7855                      DAG.getValueType(MVT::i32));
7856 }
7857 
7858 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7859 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7860 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7861 // not undo itself, but they are redundant.
7862 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7863   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7864   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7865   SDValue Src = N->getOperand(0);
7866 
7867   if (Src.getOpcode() != N->getOpcode())
7868     return SDValue();
7869 
7870   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7871       !isa<ConstantSDNode>(Src.getOperand(1)))
7872     return SDValue();
7873 
7874   unsigned ShAmt1 = N->getConstantOperandVal(1);
7875   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7876   Src = Src.getOperand(0);
7877 
7878   unsigned CombinedShAmt;
7879   if (IsGORC)
7880     CombinedShAmt = ShAmt1 | ShAmt2;
7881   else
7882     CombinedShAmt = ShAmt1 ^ ShAmt2;
7883 
7884   if (CombinedShAmt == 0)
7885     return Src;
7886 
7887   SDLoc DL(N);
7888   return DAG.getNode(
7889       N->getOpcode(), DL, N->getValueType(0), Src,
7890       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7891 }
7892 
7893 // Combine a constant select operand into its use:
7894 //
7895 // (and (select cond, -1, c), x)
7896 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7897 // (or  (select cond, 0, c), x)
7898 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7899 // (xor (select cond, 0, c), x)
7900 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7901 // (add (select cond, 0, c), x)
7902 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7903 // (sub x, (select cond, 0, c))
7904 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7905 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7906                                    SelectionDAG &DAG, bool AllOnes) {
7907   EVT VT = N->getValueType(0);
7908 
7909   // Skip vectors.
7910   if (VT.isVector())
7911     return SDValue();
7912 
7913   if ((Slct.getOpcode() != ISD::SELECT &&
7914        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7915       !Slct.hasOneUse())
7916     return SDValue();
7917 
7918   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7919     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7920   };
7921 
7922   bool SwapSelectOps;
7923   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7924   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7925   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7926   SDValue NonConstantVal;
7927   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7928     SwapSelectOps = false;
7929     NonConstantVal = FalseVal;
7930   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7931     SwapSelectOps = true;
7932     NonConstantVal = TrueVal;
7933   } else
7934     return SDValue();
7935 
7936   // Slct is now know to be the desired identity constant when CC is true.
7937   TrueVal = OtherOp;
7938   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7939   // Unless SwapSelectOps says the condition should be false.
7940   if (SwapSelectOps)
7941     std::swap(TrueVal, FalseVal);
7942 
7943   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7944     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7945                        {Slct.getOperand(0), Slct.getOperand(1),
7946                         Slct.getOperand(2), TrueVal, FalseVal});
7947 
7948   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7949                      {Slct.getOperand(0), TrueVal, FalseVal});
7950 }
7951 
7952 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7953 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7954                                               bool AllOnes) {
7955   SDValue N0 = N->getOperand(0);
7956   SDValue N1 = N->getOperand(1);
7957   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7958     return Result;
7959   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7960     return Result;
7961   return SDValue();
7962 }
7963 
7964 // Transform (add (mul x, c0), c1) ->
7965 //           (add (mul (add x, c1/c0), c0), c1%c0).
7966 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7967 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7968 // to an infinite loop in DAGCombine if transformed.
7969 // Or transform (add (mul x, c0), c1) ->
7970 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7971 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7972 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7973 // lead to an infinite loop in DAGCombine if transformed.
7974 // Or transform (add (mul x, c0), c1) ->
7975 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7976 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7977 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7978 // lead to an infinite loop in DAGCombine if transformed.
7979 // Or transform (add (mul x, c0), c1) ->
7980 //              (mul (add x, c1/c0), c0).
7981 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7982 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7983                                      const RISCVSubtarget &Subtarget) {
7984   // Skip for vector types and larger types.
7985   EVT VT = N->getValueType(0);
7986   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7987     return SDValue();
7988   // The first operand node must be a MUL and has no other use.
7989   SDValue N0 = N->getOperand(0);
7990   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7991     return SDValue();
7992   // Check if c0 and c1 match above conditions.
7993   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7994   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7995   if (!N0C || !N1C)
7996     return SDValue();
7997   // If N0C has multiple uses it's possible one of the cases in
7998   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7999   // in an infinite loop.
8000   if (!N0C->hasOneUse())
8001     return SDValue();
8002   int64_t C0 = N0C->getSExtValue();
8003   int64_t C1 = N1C->getSExtValue();
8004   int64_t CA, CB;
8005   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
8006     return SDValue();
8007   // Search for proper CA (non-zero) and CB that both are simm12.
8008   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
8009       !isInt<12>(C0 * (C1 / C0))) {
8010     CA = C1 / C0;
8011     CB = C1 % C0;
8012   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
8013              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
8014     CA = C1 / C0 + 1;
8015     CB = C1 % C0 - C0;
8016   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
8017              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
8018     CA = C1 / C0 - 1;
8019     CB = C1 % C0 + C0;
8020   } else
8021     return SDValue();
8022   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
8023   SDLoc DL(N);
8024   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
8025                              DAG.getConstant(CA, DL, VT));
8026   SDValue New1 =
8027       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
8028   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
8029 }
8030 
8031 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
8032                                  const RISCVSubtarget &Subtarget) {
8033   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
8034     return V;
8035   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
8036     return V;
8037   if (SDValue V = combineBinOpToReduce(N, DAG))
8038     return V;
8039   // fold (add (select lhs, rhs, cc, 0, y), x) ->
8040   //      (select lhs, rhs, cc, x, (add x, y))
8041   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8042 }
8043 
8044 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
8045   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
8046   //      (select lhs, rhs, cc, x, (sub x, y))
8047   SDValue N0 = N->getOperand(0);
8048   SDValue N1 = N->getOperand(1);
8049   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
8050 }
8051 
8052 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
8053                                  const RISCVSubtarget &Subtarget) {
8054   SDValue N0 = N->getOperand(0);
8055   // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
8056   // extending X. This is safe since we only need the LSB after the shift and
8057   // shift amounts larger than 31 would produce poison. If we wait until
8058   // type legalization, we'll create RISCVISD::SRLW and we can't recover it
8059   // to use a BEXT instruction.
8060   if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
8061       N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
8062       N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
8063       N0.hasOneUse()) {
8064     SDLoc DL(N);
8065     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
8066     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
8067     SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
8068     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl,
8069                               DAG.getConstant(1, DL, MVT::i64));
8070     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
8071   }
8072 
8073   if (SDValue V = combineBinOpToReduce(N, DAG))
8074     return V;
8075 
8076   // fold (and (select lhs, rhs, cc, -1, y), x) ->
8077   //      (select lhs, rhs, cc, x, (and x, y))
8078   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
8079 }
8080 
8081 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
8082                                 const RISCVSubtarget &Subtarget) {
8083   if (Subtarget.hasStdExtZbp()) {
8084     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
8085       return GREV;
8086     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
8087       return GORC;
8088     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
8089       return SHFL;
8090   }
8091 
8092   if (SDValue V = combineBinOpToReduce(N, DAG))
8093     return V;
8094   // fold (or (select cond, 0, y), x) ->
8095   //      (select cond, x, (or x, y))
8096   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8097 }
8098 
8099 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
8100   SDValue N0 = N->getOperand(0);
8101   SDValue N1 = N->getOperand(1);
8102 
8103   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
8104   // NOTE: Assumes ROL being legal means ROLW is legal.
8105   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8106   if (N0.getOpcode() == RISCVISD::SLLW &&
8107       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
8108       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
8109     SDLoc DL(N);
8110     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
8111                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
8112   }
8113 
8114   if (SDValue V = combineBinOpToReduce(N, DAG))
8115     return V;
8116   // fold (xor (select cond, 0, y), x) ->
8117   //      (select cond, x, (xor x, y))
8118   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8119 }
8120 
8121 static SDValue
8122 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
8123                                 const RISCVSubtarget &Subtarget) {
8124   SDValue Src = N->getOperand(0);
8125   EVT VT = N->getValueType(0);
8126 
8127   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
8128   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8129       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
8130     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
8131                        Src.getOperand(0));
8132 
8133   // Fold (i64 (sext_inreg (abs X), i32)) ->
8134   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
8135   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
8136   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
8137   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
8138   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
8139   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
8140   // may get combined into an earlier operation so we need to use
8141   // ComputeNumSignBits.
8142   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
8143   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
8144   // we can't assume that X has 33 sign bits. We must check.
8145   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
8146       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
8147       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
8148       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
8149     SDLoc DL(N);
8150     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
8151     SDValue Neg =
8152         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
8153     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
8154                       DAG.getValueType(MVT::i32));
8155     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
8156   }
8157 
8158   return SDValue();
8159 }
8160 
8161 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
8162 // vwadd(u).vv/vx or vwsub(u).vv/vx.
8163 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
8164                                              bool Commute = false) {
8165   assert((N->getOpcode() == RISCVISD::ADD_VL ||
8166           N->getOpcode() == RISCVISD::SUB_VL) &&
8167          "Unexpected opcode");
8168   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
8169   SDValue Op0 = N->getOperand(0);
8170   SDValue Op1 = N->getOperand(1);
8171   if (Commute)
8172     std::swap(Op0, Op1);
8173 
8174   MVT VT = N->getSimpleValueType(0);
8175 
8176   // Determine the narrow size for a widening add/sub.
8177   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8178   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8179                                   VT.getVectorElementCount());
8180 
8181   SDValue Mask = N->getOperand(2);
8182   SDValue VL = N->getOperand(3);
8183 
8184   SDLoc DL(N);
8185 
8186   // If the RHS is a sext or zext, we can form a widening op.
8187   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
8188        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
8189       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
8190     unsigned ExtOpc = Op1.getOpcode();
8191     Op1 = Op1.getOperand(0);
8192     // Re-introduce narrower extends if needed.
8193     if (Op1.getValueType() != NarrowVT)
8194       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8195 
8196     unsigned WOpc;
8197     if (ExtOpc == RISCVISD::VSEXT_VL)
8198       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
8199     else
8200       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
8201 
8202     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
8203   }
8204 
8205   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
8206   // sext/zext?
8207 
8208   return SDValue();
8209 }
8210 
8211 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
8212 // vwsub(u).vv/vx.
8213 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
8214   SDValue Op0 = N->getOperand(0);
8215   SDValue Op1 = N->getOperand(1);
8216   SDValue Mask = N->getOperand(2);
8217   SDValue VL = N->getOperand(3);
8218 
8219   MVT VT = N->getSimpleValueType(0);
8220   MVT NarrowVT = Op1.getSimpleValueType();
8221   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
8222 
8223   unsigned VOpc;
8224   switch (N->getOpcode()) {
8225   default: llvm_unreachable("Unexpected opcode");
8226   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
8227   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8228   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8229   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8230   }
8231 
8232   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8233                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8234 
8235   SDLoc DL(N);
8236 
8237   // If the LHS is a sext or zext, we can narrow this op to the same size as
8238   // the RHS.
8239   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8240        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8241       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8242     unsigned ExtOpc = Op0.getOpcode();
8243     Op0 = Op0.getOperand(0);
8244     // Re-introduce narrower extends if needed.
8245     if (Op0.getValueType() != NarrowVT)
8246       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8247     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8248   }
8249 
8250   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8251                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8252 
8253   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8254   // to commute and use a vwadd(u).vx instead.
8255   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8256       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8257     Op0 = Op0.getOperand(1);
8258 
8259     // See if have enough sign bits or zero bits in the scalar to use a
8260     // widening add/sub by splatting to smaller element size.
8261     unsigned EltBits = VT.getScalarSizeInBits();
8262     unsigned ScalarBits = Op0.getValueSizeInBits();
8263     // Make sure we're getting all element bits from the scalar register.
8264     // FIXME: Support implicit sign extension of vmv.v.x?
8265     if (ScalarBits < EltBits)
8266       return SDValue();
8267 
8268     if (IsSigned) {
8269       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8270         return SDValue();
8271     } else {
8272       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8273       if (!DAG.MaskedValueIsZero(Op0, Mask))
8274         return SDValue();
8275     }
8276 
8277     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8278                       DAG.getUNDEF(NarrowVT), Op0, VL);
8279     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8280   }
8281 
8282   return SDValue();
8283 }
8284 
8285 // Try to form VWMUL, VWMULU or VWMULSU.
8286 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8287 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8288                                        bool Commute) {
8289   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8290   SDValue Op0 = N->getOperand(0);
8291   SDValue Op1 = N->getOperand(1);
8292   if (Commute)
8293     std::swap(Op0, Op1);
8294 
8295   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8296   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8297   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8298   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8299     return SDValue();
8300 
8301   SDValue Mask = N->getOperand(2);
8302   SDValue VL = N->getOperand(3);
8303 
8304   // Make sure the mask and VL match.
8305   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8306     return SDValue();
8307 
8308   MVT VT = N->getSimpleValueType(0);
8309 
8310   // Determine the narrow size for a widening multiply.
8311   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8312   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8313                                   VT.getVectorElementCount());
8314 
8315   SDLoc DL(N);
8316 
8317   // See if the other operand is the same opcode.
8318   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8319     if (!Op1.hasOneUse())
8320       return SDValue();
8321 
8322     // Make sure the mask and VL match.
8323     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8324       return SDValue();
8325 
8326     Op1 = Op1.getOperand(0);
8327   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8328     // The operand is a splat of a scalar.
8329 
8330     // The pasthru must be undef for tail agnostic
8331     if (!Op1.getOperand(0).isUndef())
8332       return SDValue();
8333     // The VL must be the same.
8334     if (Op1.getOperand(2) != VL)
8335       return SDValue();
8336 
8337     // Get the scalar value.
8338     Op1 = Op1.getOperand(1);
8339 
8340     // See if have enough sign bits or zero bits in the scalar to use a
8341     // widening multiply by splatting to smaller element size.
8342     unsigned EltBits = VT.getScalarSizeInBits();
8343     unsigned ScalarBits = Op1.getValueSizeInBits();
8344     // Make sure we're getting all element bits from the scalar register.
8345     // FIXME: Support implicit sign extension of vmv.v.x?
8346     if (ScalarBits < EltBits)
8347       return SDValue();
8348 
8349     // If the LHS is a sign extend, try to use vwmul.
8350     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8351       // Can use vwmul.
8352     } else {
8353       // Otherwise try to use vwmulu or vwmulsu.
8354       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8355       if (DAG.MaskedValueIsZero(Op1, Mask))
8356         IsVWMULSU = IsSignExt;
8357       else
8358         return SDValue();
8359     }
8360 
8361     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8362                       DAG.getUNDEF(NarrowVT), Op1, VL);
8363   } else
8364     return SDValue();
8365 
8366   Op0 = Op0.getOperand(0);
8367 
8368   // Re-introduce narrower extends if needed.
8369   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8370   if (Op0.getValueType() != NarrowVT)
8371     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8372   // vwmulsu requires second operand to be zero extended.
8373   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8374   if (Op1.getValueType() != NarrowVT)
8375     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8376 
8377   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8378   if (!IsVWMULSU)
8379     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8380   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8381 }
8382 
8383 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8384   switch (Op.getOpcode()) {
8385   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8386   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8387   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8388   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8389   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8390   }
8391 
8392   return RISCVFPRndMode::Invalid;
8393 }
8394 
8395 // Fold
8396 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8397 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8398 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8399 //   (fp_to_int (fceil X))      -> fcvt X, rup
8400 //   (fp_to_int (fround X))     -> fcvt X, rmm
8401 static SDValue performFP_TO_INTCombine(SDNode *N,
8402                                        TargetLowering::DAGCombinerInfo &DCI,
8403                                        const RISCVSubtarget &Subtarget) {
8404   SelectionDAG &DAG = DCI.DAG;
8405   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8406   MVT XLenVT = Subtarget.getXLenVT();
8407 
8408   // Only handle XLen or i32 types. Other types narrower than XLen will
8409   // eventually be legalized to XLenVT.
8410   EVT VT = N->getValueType(0);
8411   if (VT != MVT::i32 && VT != XLenVT)
8412     return SDValue();
8413 
8414   SDValue Src = N->getOperand(0);
8415 
8416   // Ensure the FP type is also legal.
8417   if (!TLI.isTypeLegal(Src.getValueType()))
8418     return SDValue();
8419 
8420   // Don't do this for f16 with Zfhmin and not Zfh.
8421   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8422     return SDValue();
8423 
8424   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8425   if (FRM == RISCVFPRndMode::Invalid)
8426     return SDValue();
8427 
8428   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8429 
8430   unsigned Opc;
8431   if (VT == XLenVT)
8432     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8433   else
8434     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8435 
8436   SDLoc DL(N);
8437   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8438                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8439   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8440 }
8441 
8442 // Fold
8443 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8444 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8445 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8446 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8447 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8448 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8449                                        TargetLowering::DAGCombinerInfo &DCI,
8450                                        const RISCVSubtarget &Subtarget) {
8451   SelectionDAG &DAG = DCI.DAG;
8452   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8453   MVT XLenVT = Subtarget.getXLenVT();
8454 
8455   // Only handle XLen types. Other types narrower than XLen will eventually be
8456   // legalized to XLenVT.
8457   EVT DstVT = N->getValueType(0);
8458   if (DstVT != XLenVT)
8459     return SDValue();
8460 
8461   SDValue Src = N->getOperand(0);
8462 
8463   // Ensure the FP type is also legal.
8464   if (!TLI.isTypeLegal(Src.getValueType()))
8465     return SDValue();
8466 
8467   // Don't do this for f16 with Zfhmin and not Zfh.
8468   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8469     return SDValue();
8470 
8471   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8472 
8473   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8474   if (FRM == RISCVFPRndMode::Invalid)
8475     return SDValue();
8476 
8477   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8478 
8479   unsigned Opc;
8480   if (SatVT == DstVT)
8481     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8482   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8483     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8484   else
8485     return SDValue();
8486   // FIXME: Support other SatVTs by clamping before or after the conversion.
8487 
8488   Src = Src.getOperand(0);
8489 
8490   SDLoc DL(N);
8491   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8492                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8493 
8494   // RISCV FP-to-int conversions saturate to the destination register size, but
8495   // don't produce 0 for nan.
8496   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8497   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8498 }
8499 
8500 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8501 // smaller than XLenVT.
8502 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8503                                         const RISCVSubtarget &Subtarget) {
8504   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8505 
8506   SDValue Src = N->getOperand(0);
8507   if (Src.getOpcode() != ISD::BSWAP)
8508     return SDValue();
8509 
8510   EVT VT = N->getValueType(0);
8511   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8512       !isPowerOf2_32(VT.getSizeInBits()))
8513     return SDValue();
8514 
8515   SDLoc DL(N);
8516   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8517                      DAG.getConstant(7, DL, VT));
8518 }
8519 
8520 // Convert from one FMA opcode to another based on whether we are negating the
8521 // multiply result and/or the accumulator.
8522 // NOTE: Only supports RVV operations with VL.
8523 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
8524   assert((NegMul || NegAcc) && "Not negating anything?");
8525 
8526   // Negating the multiply result changes ADD<->SUB and toggles 'N'.
8527   if (NegMul) {
8528     // clang-format off
8529     switch (Opcode) {
8530     default: llvm_unreachable("Unexpected opcode");
8531     case RISCVISD::VFMADD_VL:  Opcode = RISCVISD::VFNMSUB_VL; break;
8532     case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFMADD_VL;  break;
8533     case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFMSUB_VL;  break;
8534     case RISCVISD::VFMSUB_VL:  Opcode = RISCVISD::VFNMADD_VL; break;
8535     }
8536     // clang-format on
8537   }
8538 
8539   // Negating the accumulator changes ADD<->SUB.
8540   if (NegAcc) {
8541     // clang-format off
8542     switch (Opcode) {
8543     default: llvm_unreachable("Unexpected opcode");
8544     case RISCVISD::VFMADD_VL:  Opcode = RISCVISD::VFMSUB_VL;  break;
8545     case RISCVISD::VFMSUB_VL:  Opcode = RISCVISD::VFMADD_VL;  break;
8546     case RISCVISD::VFNMADD_VL: Opcode = RISCVISD::VFNMSUB_VL; break;
8547     case RISCVISD::VFNMSUB_VL: Opcode = RISCVISD::VFNMADD_VL; break;
8548     }
8549     // clang-format on
8550   }
8551 
8552   return Opcode;
8553 }
8554 
8555 static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
8556                                  const RISCVSubtarget &Subtarget) {
8557   assert(N->getOpcode() == ISD::SRA && "Unexpected opcode");
8558 
8559   if (N->getValueType(0) != MVT::i64 || !Subtarget.is64Bit())
8560     return SDValue();
8561 
8562   if (!isa<ConstantSDNode>(N->getOperand(1)))
8563     return SDValue();
8564   uint64_t ShAmt = N->getConstantOperandVal(1);
8565   if (ShAmt > 32)
8566     return SDValue();
8567 
8568   SDValue N0 = N->getOperand(0);
8569 
8570   // Combine (sra (sext_inreg (shl X, C1), i32), C2) ->
8571   // (sra (shl X, C1+32), C2+32) so it gets selected as SLLI+SRAI instead of
8572   // SLLIW+SRAIW. SLLI+SRAI have compressed forms.
8573   if (ShAmt < 32 &&
8574       N0.getOpcode() == ISD::SIGN_EXTEND_INREG && N0.hasOneUse() &&
8575       cast<VTSDNode>(N0.getOperand(1))->getVT() == MVT::i32 &&
8576       N0.getOperand(0).getOpcode() == ISD::SHL && N0.getOperand(0).hasOneUse() &&
8577       isa<ConstantSDNode>(N0.getOperand(0).getOperand(1))) {
8578     uint64_t LShAmt = N0.getOperand(0).getConstantOperandVal(1);
8579     if (LShAmt < 32) {
8580       SDLoc ShlDL(N0.getOperand(0));
8581       SDValue Shl = DAG.getNode(ISD::SHL, ShlDL, MVT::i64,
8582                                 N0.getOperand(0).getOperand(0),
8583                                 DAG.getConstant(LShAmt + 32, ShlDL, MVT::i64));
8584       SDLoc DL(N);
8585       return DAG.getNode(ISD::SRA, DL, MVT::i64, Shl,
8586                          DAG.getConstant(ShAmt + 32, DL, MVT::i64));
8587     }
8588   }
8589 
8590   // Combine (sra (shl X, 32), 32 - C) -> (shl (sext_inreg X, i32), C)
8591   // FIXME: Should this be a generic combine? There's a similar combine on X86.
8592   //
8593   // Also try these folds where an add or sub is in the middle.
8594   // (sra (add (shl X, 32), C1), 32 - C) -> (shl (sext_inreg (add X, C1), C)
8595   // (sra (sub C1, (shl X, 32)), 32 - C) -> (shl (sext_inreg (sub C1, X), C)
8596   SDValue Shl;
8597   ConstantSDNode *AddC = nullptr;
8598 
8599   // We might have an ADD or SUB between the SRA and SHL.
8600   bool IsAdd = N0.getOpcode() == ISD::ADD;
8601   if ((IsAdd || N0.getOpcode() == ISD::SUB)) {
8602     if (!N0.hasOneUse())
8603       return SDValue();
8604     // Other operand needs to be a constant we can modify.
8605     AddC = dyn_cast<ConstantSDNode>(N0.getOperand(IsAdd ? 1 : 0));
8606     if (!AddC)
8607       return SDValue();
8608 
8609     // AddC needs to have at least 32 trailing zeros.
8610     if (AddC->getAPIntValue().countTrailingZeros() < 32)
8611       return SDValue();
8612 
8613     Shl = N0.getOperand(IsAdd ? 0 : 1);
8614   } else {
8615     // Not an ADD or SUB.
8616     Shl = N0;
8617   }
8618 
8619   // Look for a shift left by 32.
8620   if (Shl.getOpcode() != ISD::SHL || !Shl.hasOneUse() ||
8621       !isa<ConstantSDNode>(Shl.getOperand(1)) ||
8622       Shl.getConstantOperandVal(1) != 32)
8623     return SDValue();
8624 
8625   SDLoc DL(N);
8626   SDValue In = Shl.getOperand(0);
8627 
8628   // If we looked through an ADD or SUB, we need to rebuild it with the shifted
8629   // constant.
8630   if (AddC) {
8631     SDValue ShiftedAddC =
8632         DAG.getConstant(AddC->getAPIntValue().lshr(32), DL, MVT::i64);
8633     if (IsAdd)
8634       In = DAG.getNode(ISD::ADD, DL, MVT::i64, In, ShiftedAddC);
8635     else
8636       In = DAG.getNode(ISD::SUB, DL, MVT::i64, ShiftedAddC, In);
8637   }
8638 
8639   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, In,
8640                              DAG.getValueType(MVT::i32));
8641   if (ShAmt == 32)
8642     return SExt;
8643 
8644   return DAG.getNode(
8645       ISD::SHL, DL, MVT::i64, SExt,
8646       DAG.getConstant(32 - ShAmt, DL, MVT::i64));
8647 }
8648 
8649 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8650                                                DAGCombinerInfo &DCI) const {
8651   SelectionDAG &DAG = DCI.DAG;
8652 
8653   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8654   // bits are demanded. N will be added to the Worklist if it was not deleted.
8655   // Caller should return SDValue(N, 0) if this returns true.
8656   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8657     SDValue Op = N->getOperand(OpNo);
8658     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8659     if (!SimplifyDemandedBits(Op, Mask, DCI))
8660       return false;
8661 
8662     if (N->getOpcode() != ISD::DELETED_NODE)
8663       DCI.AddToWorklist(N);
8664     return true;
8665   };
8666 
8667   switch (N->getOpcode()) {
8668   default:
8669     break;
8670   case RISCVISD::SplitF64: {
8671     SDValue Op0 = N->getOperand(0);
8672     // If the input to SplitF64 is just BuildPairF64 then the operation is
8673     // redundant. Instead, use BuildPairF64's operands directly.
8674     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8675       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8676 
8677     if (Op0->isUndef()) {
8678       SDValue Lo = DAG.getUNDEF(MVT::i32);
8679       SDValue Hi = DAG.getUNDEF(MVT::i32);
8680       return DCI.CombineTo(N, Lo, Hi);
8681     }
8682 
8683     SDLoc DL(N);
8684 
8685     // It's cheaper to materialise two 32-bit integers than to load a double
8686     // from the constant pool and transfer it to integer registers through the
8687     // stack.
8688     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8689       APInt V = C->getValueAPF().bitcastToAPInt();
8690       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8691       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8692       return DCI.CombineTo(N, Lo, Hi);
8693     }
8694 
8695     // This is a target-specific version of a DAGCombine performed in
8696     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8697     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8698     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8699     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8700         !Op0.getNode()->hasOneUse())
8701       break;
8702     SDValue NewSplitF64 =
8703         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8704                     Op0.getOperand(0));
8705     SDValue Lo = NewSplitF64.getValue(0);
8706     SDValue Hi = NewSplitF64.getValue(1);
8707     APInt SignBit = APInt::getSignMask(32);
8708     if (Op0.getOpcode() == ISD::FNEG) {
8709       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8710                                   DAG.getConstant(SignBit, DL, MVT::i32));
8711       return DCI.CombineTo(N, Lo, NewHi);
8712     }
8713     assert(Op0.getOpcode() == ISD::FABS);
8714     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8715                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8716     return DCI.CombineTo(N, Lo, NewHi);
8717   }
8718   case RISCVISD::SLLW:
8719   case RISCVISD::SRAW:
8720   case RISCVISD::SRLW: {
8721     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8722     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8723         SimplifyDemandedLowBitsHelper(1, 5))
8724       return SDValue(N, 0);
8725 
8726     break;
8727   }
8728   case ISD::ROTR:
8729   case ISD::ROTL:
8730   case RISCVISD::RORW:
8731   case RISCVISD::ROLW: {
8732     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8733       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8734       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8735           SimplifyDemandedLowBitsHelper(1, 5))
8736         return SDValue(N, 0);
8737     }
8738 
8739     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8740   }
8741   case RISCVISD::CLZW:
8742   case RISCVISD::CTZW: {
8743     // Only the lower 32 bits of the first operand are read
8744     if (SimplifyDemandedLowBitsHelper(0, 32))
8745       return SDValue(N, 0);
8746     break;
8747   }
8748   case RISCVISD::GREV:
8749   case RISCVISD::GORC: {
8750     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8751     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8752     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8753     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8754       return SDValue(N, 0);
8755 
8756     return combineGREVI_GORCI(N, DAG);
8757   }
8758   case RISCVISD::GREVW:
8759   case RISCVISD::GORCW: {
8760     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8761     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8762         SimplifyDemandedLowBitsHelper(1, 5))
8763       return SDValue(N, 0);
8764 
8765     break;
8766   }
8767   case RISCVISD::SHFL:
8768   case RISCVISD::UNSHFL: {
8769     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8770     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8771     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8772     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8773       return SDValue(N, 0);
8774 
8775     break;
8776   }
8777   case RISCVISD::SHFLW:
8778   case RISCVISD::UNSHFLW: {
8779     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8780     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8781         SimplifyDemandedLowBitsHelper(1, 4))
8782       return SDValue(N, 0);
8783 
8784     break;
8785   }
8786   case RISCVISD::BCOMPRESSW:
8787   case RISCVISD::BDECOMPRESSW: {
8788     // Only the lower 32 bits of LHS and RHS are read.
8789     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8790         SimplifyDemandedLowBitsHelper(1, 32))
8791       return SDValue(N, 0);
8792 
8793     break;
8794   }
8795   case RISCVISD::FSR:
8796   case RISCVISD::FSL:
8797   case RISCVISD::FSRW:
8798   case RISCVISD::FSLW: {
8799     bool IsWInstruction =
8800         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8801     unsigned BitWidth =
8802         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8803     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8804     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8805     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8806       return SDValue(N, 0);
8807 
8808     break;
8809   }
8810   case RISCVISD::FMV_X_ANYEXTH:
8811   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8812     SDLoc DL(N);
8813     SDValue Op0 = N->getOperand(0);
8814     MVT VT = N->getSimpleValueType(0);
8815     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8816     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8817     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8818     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8819          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8820         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8821          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8822       assert(Op0.getOperand(0).getValueType() == VT &&
8823              "Unexpected value type!");
8824       return Op0.getOperand(0);
8825     }
8826 
8827     // This is a target-specific version of a DAGCombine performed in
8828     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8829     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8830     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8831     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8832         !Op0.getNode()->hasOneUse())
8833       break;
8834     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8835     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8836     APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits());
8837     if (Op0.getOpcode() == ISD::FNEG)
8838       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8839                          DAG.getConstant(SignBit, DL, VT));
8840 
8841     assert(Op0.getOpcode() == ISD::FABS);
8842     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8843                        DAG.getConstant(~SignBit, DL, VT));
8844   }
8845   case ISD::ADD:
8846     return performADDCombine(N, DAG, Subtarget);
8847   case ISD::SUB:
8848     return performSUBCombine(N, DAG);
8849   case ISD::AND:
8850     return performANDCombine(N, DAG, Subtarget);
8851   case ISD::OR:
8852     return performORCombine(N, DAG, Subtarget);
8853   case ISD::XOR:
8854     return performXORCombine(N, DAG);
8855   case ISD::FADD:
8856   case ISD::UMAX:
8857   case ISD::UMIN:
8858   case ISD::SMAX:
8859   case ISD::SMIN:
8860   case ISD::FMAXNUM:
8861   case ISD::FMINNUM:
8862     return combineBinOpToReduce(N, DAG);
8863   case ISD::SIGN_EXTEND_INREG:
8864     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8865   case ISD::ZERO_EXTEND:
8866     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8867     // type legalization. This is safe because fp_to_uint produces poison if
8868     // it overflows.
8869     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8870       SDValue Src = N->getOperand(0);
8871       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8872           isTypeLegal(Src.getOperand(0).getValueType()))
8873         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8874                            Src.getOperand(0));
8875       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8876           isTypeLegal(Src.getOperand(1).getValueType())) {
8877         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8878         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8879                                   Src.getOperand(0), Src.getOperand(1));
8880         DCI.CombineTo(N, Res);
8881         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8882         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8883         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8884       }
8885     }
8886     return SDValue();
8887   case RISCVISD::SELECT_CC: {
8888     // Transform
8889     SDValue LHS = N->getOperand(0);
8890     SDValue RHS = N->getOperand(1);
8891     SDValue TrueV = N->getOperand(3);
8892     SDValue FalseV = N->getOperand(4);
8893 
8894     // If the True and False values are the same, we don't need a select_cc.
8895     if (TrueV == FalseV)
8896       return TrueV;
8897 
8898     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8899     if (!ISD::isIntEqualitySetCC(CCVal))
8900       break;
8901 
8902     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8903     //      (select_cc X, Y, lt, trueV, falseV)
8904     // Sometimes the setcc is introduced after select_cc has been formed.
8905     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8906         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8907       // If we're looking for eq 0 instead of ne 0, we need to invert the
8908       // condition.
8909       bool Invert = CCVal == ISD::SETEQ;
8910       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8911       if (Invert)
8912         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8913 
8914       SDLoc DL(N);
8915       RHS = LHS.getOperand(1);
8916       LHS = LHS.getOperand(0);
8917       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8918 
8919       SDValue TargetCC = DAG.getCondCode(CCVal);
8920       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8921                          {LHS, RHS, TargetCC, TrueV, FalseV});
8922     }
8923 
8924     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8925     //      (select_cc X, Y, eq/ne, trueV, falseV)
8926     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8927       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8928                          {LHS.getOperand(0), LHS.getOperand(1),
8929                           N->getOperand(2), TrueV, FalseV});
8930     // (select_cc X, 1, setne, trueV, falseV) ->
8931     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8932     // This can occur when legalizing some floating point comparisons.
8933     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8934     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8935       SDLoc DL(N);
8936       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8937       SDValue TargetCC = DAG.getCondCode(CCVal);
8938       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8939       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8940                          {LHS, RHS, TargetCC, TrueV, FalseV});
8941     }
8942 
8943     break;
8944   }
8945   case RISCVISD::BR_CC: {
8946     SDValue LHS = N->getOperand(1);
8947     SDValue RHS = N->getOperand(2);
8948     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8949     if (!ISD::isIntEqualitySetCC(CCVal))
8950       break;
8951 
8952     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8953     //      (br_cc X, Y, lt, dest)
8954     // Sometimes the setcc is introduced after br_cc has been formed.
8955     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8956         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8957       // If we're looking for eq 0 instead of ne 0, we need to invert the
8958       // condition.
8959       bool Invert = CCVal == ISD::SETEQ;
8960       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8961       if (Invert)
8962         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8963 
8964       SDLoc DL(N);
8965       RHS = LHS.getOperand(1);
8966       LHS = LHS.getOperand(0);
8967       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8968 
8969       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8970                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8971                          N->getOperand(4));
8972     }
8973 
8974     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8975     //      (br_cc X, Y, eq/ne, trueV, falseV)
8976     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8977       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8978                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8979                          N->getOperand(3), N->getOperand(4));
8980 
8981     // (br_cc X, 1, setne, br_cc) ->
8982     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8983     // This can occur when legalizing some floating point comparisons.
8984     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8985     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8986       SDLoc DL(N);
8987       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8988       SDValue TargetCC = DAG.getCondCode(CCVal);
8989       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8990       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8991                          N->getOperand(0), LHS, RHS, TargetCC,
8992                          N->getOperand(4));
8993     }
8994     break;
8995   }
8996   case ISD::BITREVERSE:
8997     return performBITREVERSECombine(N, DAG, Subtarget);
8998   case ISD::FP_TO_SINT:
8999   case ISD::FP_TO_UINT:
9000     return performFP_TO_INTCombine(N, DCI, Subtarget);
9001   case ISD::FP_TO_SINT_SAT:
9002   case ISD::FP_TO_UINT_SAT:
9003     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
9004   case ISD::FCOPYSIGN: {
9005     EVT VT = N->getValueType(0);
9006     if (!VT.isVector())
9007       break;
9008     // There is a form of VFSGNJ which injects the negated sign of its second
9009     // operand. Try and bubble any FNEG up after the extend/round to produce
9010     // this optimized pattern. Avoid modifying cases where FP_ROUND and
9011     // TRUNC=1.
9012     SDValue In2 = N->getOperand(1);
9013     // Avoid cases where the extend/round has multiple uses, as duplicating
9014     // those is typically more expensive than removing a fneg.
9015     if (!In2.hasOneUse())
9016       break;
9017     if (In2.getOpcode() != ISD::FP_EXTEND &&
9018         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
9019       break;
9020     In2 = In2.getOperand(0);
9021     if (In2.getOpcode() != ISD::FNEG)
9022       break;
9023     SDLoc DL(N);
9024     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
9025     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
9026                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
9027   }
9028   case ISD::MGATHER:
9029   case ISD::MSCATTER:
9030   case ISD::VP_GATHER:
9031   case ISD::VP_SCATTER: {
9032     if (!DCI.isBeforeLegalize())
9033       break;
9034     SDValue Index, ScaleOp;
9035     bool IsIndexScaled = false;
9036     bool IsIndexSigned = false;
9037     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
9038       Index = VPGSN->getIndex();
9039       ScaleOp = VPGSN->getScale();
9040       IsIndexScaled = VPGSN->isIndexScaled();
9041       IsIndexSigned = VPGSN->isIndexSigned();
9042     } else {
9043       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
9044       Index = MGSN->getIndex();
9045       ScaleOp = MGSN->getScale();
9046       IsIndexScaled = MGSN->isIndexScaled();
9047       IsIndexSigned = MGSN->isIndexSigned();
9048     }
9049     EVT IndexVT = Index.getValueType();
9050     MVT XLenVT = Subtarget.getXLenVT();
9051     // RISCV indexed loads only support the "unsigned unscaled" addressing
9052     // mode, so anything else must be manually legalized.
9053     bool NeedsIdxLegalization =
9054         IsIndexScaled ||
9055         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
9056     if (!NeedsIdxLegalization)
9057       break;
9058 
9059     SDLoc DL(N);
9060 
9061     // Any index legalization should first promote to XLenVT, so we don't lose
9062     // bits when scaling. This may create an illegal index type so we let
9063     // LLVM's legalization take care of the splitting.
9064     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
9065     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
9066       IndexVT = IndexVT.changeVectorElementType(XLenVT);
9067       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
9068                           DL, IndexVT, Index);
9069     }
9070 
9071     if (IsIndexScaled) {
9072       // Manually scale the indices.
9073       // TODO: Sanitize the scale operand here?
9074       // TODO: For VP nodes, should we use VP_SHL here?
9075       unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
9076       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
9077       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
9078       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
9079       ScaleOp = DAG.getTargetConstant(1, DL, ScaleOp.getValueType());
9080     }
9081 
9082     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED;
9083     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
9084       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
9085                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
9086                               ScaleOp, VPGN->getMask(),
9087                               VPGN->getVectorLength()},
9088                              VPGN->getMemOperand(), NewIndexTy);
9089     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
9090       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
9091                               {VPSN->getChain(), VPSN->getValue(),
9092                                VPSN->getBasePtr(), Index, ScaleOp,
9093                                VPSN->getMask(), VPSN->getVectorLength()},
9094                               VPSN->getMemOperand(), NewIndexTy);
9095     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
9096       return DAG.getMaskedGather(
9097           N->getVTList(), MGN->getMemoryVT(), DL,
9098           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
9099            MGN->getBasePtr(), Index, ScaleOp},
9100           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
9101     const auto *MSN = cast<MaskedScatterSDNode>(N);
9102     return DAG.getMaskedScatter(
9103         N->getVTList(), MSN->getMemoryVT(), DL,
9104         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
9105          Index, ScaleOp},
9106         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
9107   }
9108   case RISCVISD::SRA_VL:
9109   case RISCVISD::SRL_VL:
9110   case RISCVISD::SHL_VL: {
9111     SDValue ShAmt = N->getOperand(1);
9112     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
9113       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
9114       SDLoc DL(N);
9115       SDValue VL = N->getOperand(3);
9116       EVT VT = N->getValueType(0);
9117       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
9118                           ShAmt.getOperand(1), VL);
9119       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
9120                          N->getOperand(2), N->getOperand(3));
9121     }
9122     break;
9123   }
9124   case ISD::SRA:
9125     if (SDValue V = performSRACombine(N, DAG, Subtarget))
9126       return V;
9127     LLVM_FALLTHROUGH;
9128   case ISD::SRL:
9129   case ISD::SHL: {
9130     SDValue ShAmt = N->getOperand(1);
9131     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
9132       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
9133       SDLoc DL(N);
9134       EVT VT = N->getValueType(0);
9135       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
9136                           ShAmt.getOperand(1),
9137                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
9138       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
9139     }
9140     break;
9141   }
9142   case RISCVISD::ADD_VL:
9143     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
9144       return V;
9145     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
9146   case RISCVISD::SUB_VL:
9147     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
9148   case RISCVISD::VWADD_W_VL:
9149   case RISCVISD::VWADDU_W_VL:
9150   case RISCVISD::VWSUB_W_VL:
9151   case RISCVISD::VWSUBU_W_VL:
9152     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
9153   case RISCVISD::MUL_VL:
9154     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
9155       return V;
9156     // Mul is commutative.
9157     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
9158   case RISCVISD::VFMADD_VL:
9159   case RISCVISD::VFNMADD_VL:
9160   case RISCVISD::VFMSUB_VL:
9161   case RISCVISD::VFNMSUB_VL: {
9162     // Fold FNEG_VL into FMA opcodes.
9163     SDValue A = N->getOperand(0);
9164     SDValue B = N->getOperand(1);
9165     SDValue C = N->getOperand(2);
9166     SDValue Mask = N->getOperand(3);
9167     SDValue VL = N->getOperand(4);
9168 
9169     auto invertIfNegative = [&Mask, &VL](SDValue &V) {
9170       if (V.getOpcode() == RISCVISD::FNEG_VL && V.getOperand(1) == Mask &&
9171           V.getOperand(2) == VL) {
9172         // Return the negated input.
9173         V = V.getOperand(0);
9174         return true;
9175       }
9176 
9177       return false;
9178     };
9179 
9180     bool NegA = invertIfNegative(A);
9181     bool NegB = invertIfNegative(B);
9182     bool NegC = invertIfNegative(C);
9183 
9184     // If no operands are negated, we're done.
9185     if (!NegA && !NegB && !NegC)
9186       return SDValue();
9187 
9188     unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
9189     return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), A, B, C, Mask,
9190                        VL);
9191   }
9192   case ISD::STORE: {
9193     auto *Store = cast<StoreSDNode>(N);
9194     SDValue Val = Store->getValue();
9195     // Combine store of vmv.x.s to vse with VL of 1.
9196     // FIXME: Support FP.
9197     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
9198       SDValue Src = Val.getOperand(0);
9199       MVT VecVT = Src.getSimpleValueType();
9200       EVT MemVT = Store->getMemoryVT();
9201       // The memory VT and the element type must match.
9202       if (MemVT == VecVT.getVectorElementType()) {
9203         SDLoc DL(N);
9204         MVT MaskVT = getMaskTypeFor(VecVT);
9205         return DAG.getStoreVP(
9206             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
9207             DAG.getConstant(1, DL, MaskVT),
9208             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
9209             Store->getMemOperand(), Store->getAddressingMode(),
9210             Store->isTruncatingStore(), /*IsCompress*/ false);
9211       }
9212     }
9213 
9214     break;
9215   }
9216   case ISD::SPLAT_VECTOR: {
9217     EVT VT = N->getValueType(0);
9218     // Only perform this combine on legal MVT types.
9219     if (!isTypeLegal(VT))
9220       break;
9221     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
9222                                          DAG, Subtarget))
9223       return Gather;
9224     break;
9225   }
9226   case RISCVISD::VMV_V_X_VL: {
9227     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
9228     // scalar input.
9229     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
9230     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
9231     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
9232       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
9233         return SDValue(N, 0);
9234 
9235     break;
9236   }
9237   case ISD::INTRINSIC_WO_CHAIN: {
9238     unsigned IntNo = N->getConstantOperandVal(0);
9239     switch (IntNo) {
9240       // By default we do not combine any intrinsic.
9241     default:
9242       return SDValue();
9243     case Intrinsic::riscv_vcpop:
9244     case Intrinsic::riscv_vcpop_mask:
9245     case Intrinsic::riscv_vfirst:
9246     case Intrinsic::riscv_vfirst_mask: {
9247       SDValue VL = N->getOperand(2);
9248       if (IntNo == Intrinsic::riscv_vcpop_mask ||
9249           IntNo == Intrinsic::riscv_vfirst_mask)
9250         VL = N->getOperand(3);
9251       if (!isNullConstant(VL))
9252         return SDValue();
9253       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
9254       SDLoc DL(N);
9255       EVT VT = N->getValueType(0);
9256       if (IntNo == Intrinsic::riscv_vfirst ||
9257           IntNo == Intrinsic::riscv_vfirst_mask)
9258         return DAG.getConstant(-1, DL, VT);
9259       return DAG.getConstant(0, DL, VT);
9260     }
9261     }
9262   }
9263   case ISD::BITCAST: {
9264     assert(Subtarget.useRVVForFixedLengthVectors());
9265     SDValue N0 = N->getOperand(0);
9266     EVT VT = N->getValueType(0);
9267     EVT SrcVT = N0.getValueType();
9268     // If this is a bitcast between a MVT::v4i1/v2i1/v1i1 and an illegal integer
9269     // type, widen both sides to avoid a trip through memory.
9270     if ((SrcVT == MVT::v1i1 || SrcVT == MVT::v2i1 || SrcVT == MVT::v4i1) &&
9271         VT.isScalarInteger()) {
9272       unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
9273       SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
9274       Ops[0] = N0;
9275       SDLoc DL(N);
9276       N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i1, Ops);
9277       N0 = DAG.getBitcast(MVT::i8, N0);
9278       return DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
9279     }
9280 
9281     return SDValue();
9282   }
9283   }
9284 
9285   return SDValue();
9286 }
9287 
9288 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
9289     const SDNode *N, CombineLevel Level) const {
9290   assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
9291           N->getOpcode() == ISD::SRL) &&
9292          "Expected shift op");
9293 
9294   // The following folds are only desirable if `(OP _, c1 << c2)` can be
9295   // materialised in fewer instructions than `(OP _, c1)`:
9296   //
9297   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
9298   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
9299   SDValue N0 = N->getOperand(0);
9300   EVT Ty = N0.getValueType();
9301   if (Ty.isScalarInteger() &&
9302       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
9303     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
9304     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
9305     if (C1 && C2) {
9306       const APInt &C1Int = C1->getAPIntValue();
9307       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
9308 
9309       // We can materialise `c1 << c2` into an add immediate, so it's "free",
9310       // and the combine should happen, to potentially allow further combines
9311       // later.
9312       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
9313           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
9314         return true;
9315 
9316       // We can materialise `c1` in an add immediate, so it's "free", and the
9317       // combine should be prevented.
9318       if (C1Int.getMinSignedBits() <= 64 &&
9319           isLegalAddImmediate(C1Int.getSExtValue()))
9320         return false;
9321 
9322       // Neither constant will fit into an immediate, so find materialisation
9323       // costs.
9324       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
9325                                               Subtarget.getFeatureBits(),
9326                                               /*CompressionCost*/true);
9327       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
9328           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
9329           /*CompressionCost*/true);
9330 
9331       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
9332       // combine should be prevented.
9333       if (C1Cost < ShiftedC1Cost)
9334         return false;
9335     }
9336   }
9337   return true;
9338 }
9339 
9340 bool RISCVTargetLowering::targetShrinkDemandedConstant(
9341     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
9342     TargetLoweringOpt &TLO) const {
9343   // Delay this optimization as late as possible.
9344   if (!TLO.LegalOps)
9345     return false;
9346 
9347   EVT VT = Op.getValueType();
9348   if (VT.isVector())
9349     return false;
9350 
9351   // Only handle AND for now.
9352   unsigned Opcode = Op.getOpcode();
9353   if (Opcode != ISD::AND && Opcode != ISD::OR && Opcode != ISD::XOR)
9354     return false;
9355 
9356   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
9357   if (!C)
9358     return false;
9359 
9360   const APInt &Mask = C->getAPIntValue();
9361 
9362   // Clear all non-demanded bits initially.
9363   APInt ShrunkMask = Mask & DemandedBits;
9364 
9365   // Try to make a smaller immediate by setting undemanded bits.
9366 
9367   APInt ExpandedMask = Mask | ~DemandedBits;
9368 
9369   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
9370     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
9371   };
9372   auto UseMask = [Mask, Op, &TLO](const APInt &NewMask) -> bool {
9373     if (NewMask == Mask)
9374       return true;
9375     SDLoc DL(Op);
9376     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, Op.getValueType());
9377     SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
9378                                     Op.getOperand(0), NewC);
9379     return TLO.CombineTo(Op, NewOp);
9380   };
9381 
9382   // If the shrunk mask fits in sign extended 12 bits, let the target
9383   // independent code apply it.
9384   if (ShrunkMask.isSignedIntN(12))
9385     return false;
9386 
9387   // And has a few special cases for zext.
9388   if (Opcode == ISD::AND) {
9389     // Preserve (and X, 0xffff) when zext.h is supported.
9390     if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
9391       APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
9392       if (IsLegalMask(NewMask))
9393         return UseMask(NewMask);
9394     }
9395 
9396     // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
9397     if (VT == MVT::i64) {
9398       APInt NewMask = APInt(64, 0xffffffff);
9399       if (IsLegalMask(NewMask))
9400         return UseMask(NewMask);
9401     }
9402   }
9403 
9404   // For the remaining optimizations, we need to be able to make a negative
9405   // number through a combination of mask and undemanded bits.
9406   if (!ExpandedMask.isNegative())
9407     return false;
9408 
9409   // What is the fewest number of bits we need to represent the negative number.
9410   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
9411 
9412   // Try to make a 12 bit negative immediate. If that fails try to make a 32
9413   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
9414   // If we can't create a simm12, we shouldn't change opaque constants.
9415   APInt NewMask = ShrunkMask;
9416   if (MinSignedBits <= 12)
9417     NewMask.setBitsFrom(11);
9418   else if (!C->isOpaque() && MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
9419     NewMask.setBitsFrom(31);
9420   else
9421     return false;
9422 
9423   // Check that our new mask is a subset of the demanded mask.
9424   assert(IsLegalMask(NewMask));
9425   return UseMask(NewMask);
9426 }
9427 
9428 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
9429   static const uint64_t GREVMasks[] = {
9430       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
9431       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
9432 
9433   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9434     unsigned Shift = 1 << Stage;
9435     if (ShAmt & Shift) {
9436       uint64_t Mask = GREVMasks[Stage];
9437       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9438       if (IsGORC)
9439         Res |= x;
9440       x = Res;
9441     }
9442   }
9443 
9444   return x;
9445 }
9446 
9447 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9448                                                         KnownBits &Known,
9449                                                         const APInt &DemandedElts,
9450                                                         const SelectionDAG &DAG,
9451                                                         unsigned Depth) const {
9452   unsigned BitWidth = Known.getBitWidth();
9453   unsigned Opc = Op.getOpcode();
9454   assert((Opc >= ISD::BUILTIN_OP_END ||
9455           Opc == ISD::INTRINSIC_WO_CHAIN ||
9456           Opc == ISD::INTRINSIC_W_CHAIN ||
9457           Opc == ISD::INTRINSIC_VOID) &&
9458          "Should use MaskedValueIsZero if you don't know whether Op"
9459          " is a target node!");
9460 
9461   Known.resetAll();
9462   switch (Opc) {
9463   default: break;
9464   case RISCVISD::SELECT_CC: {
9465     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9466     // If we don't know any bits, early out.
9467     if (Known.isUnknown())
9468       break;
9469     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9470 
9471     // Only known if known in both the LHS and RHS.
9472     Known = KnownBits::commonBits(Known, Known2);
9473     break;
9474   }
9475   case RISCVISD::REMUW: {
9476     KnownBits Known2;
9477     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9478     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9479     // We only care about the lower 32 bits.
9480     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9481     // Restore the original width by sign extending.
9482     Known = Known.sext(BitWidth);
9483     break;
9484   }
9485   case RISCVISD::DIVUW: {
9486     KnownBits Known2;
9487     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9488     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9489     // We only care about the lower 32 bits.
9490     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9491     // Restore the original width by sign extending.
9492     Known = Known.sext(BitWidth);
9493     break;
9494   }
9495   case RISCVISD::CTZW: {
9496     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9497     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9498     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9499     Known.Zero.setBitsFrom(LowBits);
9500     break;
9501   }
9502   case RISCVISD::CLZW: {
9503     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9504     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9505     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9506     Known.Zero.setBitsFrom(LowBits);
9507     break;
9508   }
9509   case RISCVISD::GREV:
9510   case RISCVISD::GORC: {
9511     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9512       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9513       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9514       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9515       // To compute zeros, we need to invert the value and invert it back after.
9516       Known.Zero =
9517           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9518       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9519     }
9520     break;
9521   }
9522   case RISCVISD::READ_VLENB: {
9523     // We can use the minimum and maximum VLEN values to bound VLENB.  We
9524     // know VLEN must be a power of two.
9525     const unsigned MinVLenB = Subtarget.getRealMinVLen() / 8;
9526     const unsigned MaxVLenB = Subtarget.getRealMaxVLen() / 8;
9527     assert(MinVLenB > 0 && "READ_VLENB without vector extension enabled?");
9528     Known.Zero.setLowBits(Log2_32(MinVLenB));
9529     Known.Zero.setBitsFrom(Log2_32(MaxVLenB)+1);
9530     if (MaxVLenB == MinVLenB)
9531       Known.One.setBit(Log2_32(MinVLenB));
9532     break;
9533   }
9534   case ISD::INTRINSIC_W_CHAIN:
9535   case ISD::INTRINSIC_WO_CHAIN: {
9536     unsigned IntNo =
9537         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9538     switch (IntNo) {
9539     default:
9540       // We can't do anything for most intrinsics.
9541       break;
9542     case Intrinsic::riscv_vsetvli:
9543     case Intrinsic::riscv_vsetvlimax:
9544     case Intrinsic::riscv_vsetvli_opt:
9545     case Intrinsic::riscv_vsetvlimax_opt:
9546       // Assume that VL output is positive and would fit in an int32_t.
9547       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9548       if (BitWidth >= 32)
9549         Known.Zero.setBitsFrom(31);
9550       break;
9551     }
9552     break;
9553   }
9554   }
9555 }
9556 
9557 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9558     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9559     unsigned Depth) const {
9560   switch (Op.getOpcode()) {
9561   default:
9562     break;
9563   case RISCVISD::SELECT_CC: {
9564     unsigned Tmp =
9565         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9566     if (Tmp == 1) return 1;  // Early out.
9567     unsigned Tmp2 =
9568         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9569     return std::min(Tmp, Tmp2);
9570   }
9571   case RISCVISD::SLLW:
9572   case RISCVISD::SRAW:
9573   case RISCVISD::SRLW:
9574   case RISCVISD::DIVW:
9575   case RISCVISD::DIVUW:
9576   case RISCVISD::REMUW:
9577   case RISCVISD::ROLW:
9578   case RISCVISD::RORW:
9579   case RISCVISD::GREVW:
9580   case RISCVISD::GORCW:
9581   case RISCVISD::FSLW:
9582   case RISCVISD::FSRW:
9583   case RISCVISD::SHFLW:
9584   case RISCVISD::UNSHFLW:
9585   case RISCVISD::BCOMPRESSW:
9586   case RISCVISD::BDECOMPRESSW:
9587   case RISCVISD::BFPW:
9588   case RISCVISD::FCVT_W_RV64:
9589   case RISCVISD::FCVT_WU_RV64:
9590   case RISCVISD::STRICT_FCVT_W_RV64:
9591   case RISCVISD::STRICT_FCVT_WU_RV64:
9592     // TODO: As the result is sign-extended, this is conservatively correct. A
9593     // more precise answer could be calculated for SRAW depending on known
9594     // bits in the shift amount.
9595     return 33;
9596   case RISCVISD::SHFL:
9597   case RISCVISD::UNSHFL: {
9598     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9599     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9600     // will stay within the upper 32 bits. If there were more than 32 sign bits
9601     // before there will be at least 33 sign bits after.
9602     if (Op.getValueType() == MVT::i64 &&
9603         isa<ConstantSDNode>(Op.getOperand(1)) &&
9604         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9605       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9606       if (Tmp > 32)
9607         return 33;
9608     }
9609     break;
9610   }
9611   case RISCVISD::VMV_X_S: {
9612     // The number of sign bits of the scalar result is computed by obtaining the
9613     // element type of the input vector operand, subtracting its width from the
9614     // XLEN, and then adding one (sign bit within the element type). If the
9615     // element type is wider than XLen, the least-significant XLEN bits are
9616     // taken.
9617     unsigned XLen = Subtarget.getXLen();
9618     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9619     if (EltBits <= XLen)
9620       return XLen - EltBits + 1;
9621     break;
9622   }
9623   }
9624 
9625   return 1;
9626 }
9627 
9628 const Constant *
9629 RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {
9630   assert(Ld && "Unexpected null LoadSDNode");
9631   if (!ISD::isNormalLoad(Ld))
9632     return nullptr;
9633 
9634   SDValue Ptr = Ld->getBasePtr();
9635 
9636   // Only constant pools with no offset are supported.
9637   auto GetSupportedConstantPool = [](SDValue Ptr) -> ConstantPoolSDNode * {
9638     auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
9639     if (!CNode || CNode->isMachineConstantPoolEntry() ||
9640         CNode->getOffset() != 0)
9641       return nullptr;
9642 
9643     return CNode;
9644   };
9645 
9646   // Simple case, LLA.
9647   if (Ptr.getOpcode() == RISCVISD::LLA) {
9648     auto *CNode = GetSupportedConstantPool(Ptr);
9649     if (!CNode || CNode->getTargetFlags() != 0)
9650       return nullptr;
9651 
9652     return CNode->getConstVal();
9653   }
9654 
9655   // Look for a HI and ADD_LO pair.
9656   if (Ptr.getOpcode() != RISCVISD::ADD_LO ||
9657       Ptr.getOperand(0).getOpcode() != RISCVISD::HI)
9658     return nullptr;
9659 
9660   auto *CNodeLo = GetSupportedConstantPool(Ptr.getOperand(1));
9661   auto *CNodeHi = GetSupportedConstantPool(Ptr.getOperand(0).getOperand(0));
9662 
9663   if (!CNodeLo || CNodeLo->getTargetFlags() != RISCVII::MO_LO ||
9664       !CNodeHi || CNodeHi->getTargetFlags() != RISCVII::MO_HI)
9665     return nullptr;
9666 
9667   if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
9668     return nullptr;
9669 
9670   return CNodeLo->getConstVal();
9671 }
9672 
9673 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9674                                                   MachineBasicBlock *BB) {
9675   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9676 
9677   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9678   // Should the count have wrapped while it was being read, we need to try
9679   // again.
9680   // ...
9681   // read:
9682   // rdcycleh x3 # load high word of cycle
9683   // rdcycle  x2 # load low word of cycle
9684   // rdcycleh x4 # load high word of cycle
9685   // bne x3, x4, read # check if high word reads match, otherwise try again
9686   // ...
9687 
9688   MachineFunction &MF = *BB->getParent();
9689   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9690   MachineFunction::iterator It = ++BB->getIterator();
9691 
9692   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9693   MF.insert(It, LoopMBB);
9694 
9695   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9696   MF.insert(It, DoneMBB);
9697 
9698   // Transfer the remainder of BB and its successor edges to DoneMBB.
9699   DoneMBB->splice(DoneMBB->begin(), BB,
9700                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9701   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9702 
9703   BB->addSuccessor(LoopMBB);
9704 
9705   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9706   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9707   Register LoReg = MI.getOperand(0).getReg();
9708   Register HiReg = MI.getOperand(1).getReg();
9709   DebugLoc DL = MI.getDebugLoc();
9710 
9711   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9712   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9713       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9714       .addReg(RISCV::X0);
9715   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9716       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9717       .addReg(RISCV::X0);
9718   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9719       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9720       .addReg(RISCV::X0);
9721 
9722   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9723       .addReg(HiReg)
9724       .addReg(ReadAgainReg)
9725       .addMBB(LoopMBB);
9726 
9727   LoopMBB->addSuccessor(LoopMBB);
9728   LoopMBB->addSuccessor(DoneMBB);
9729 
9730   MI.eraseFromParent();
9731 
9732   return DoneMBB;
9733 }
9734 
9735 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9736                                              MachineBasicBlock *BB) {
9737   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9738 
9739   MachineFunction &MF = *BB->getParent();
9740   DebugLoc DL = MI.getDebugLoc();
9741   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9742   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9743   Register LoReg = MI.getOperand(0).getReg();
9744   Register HiReg = MI.getOperand(1).getReg();
9745   Register SrcReg = MI.getOperand(2).getReg();
9746   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9747   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9748 
9749   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9750                           RI);
9751   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9752   MachineMemOperand *MMOLo =
9753       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9754   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9755       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9756   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9757       .addFrameIndex(FI)
9758       .addImm(0)
9759       .addMemOperand(MMOLo);
9760   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9761       .addFrameIndex(FI)
9762       .addImm(4)
9763       .addMemOperand(MMOHi);
9764   MI.eraseFromParent(); // The pseudo instruction is gone now.
9765   return BB;
9766 }
9767 
9768 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9769                                                  MachineBasicBlock *BB) {
9770   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9771          "Unexpected instruction");
9772 
9773   MachineFunction &MF = *BB->getParent();
9774   DebugLoc DL = MI.getDebugLoc();
9775   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9776   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9777   Register DstReg = MI.getOperand(0).getReg();
9778   Register LoReg = MI.getOperand(1).getReg();
9779   Register HiReg = MI.getOperand(2).getReg();
9780   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9781   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9782 
9783   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9784   MachineMemOperand *MMOLo =
9785       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9786   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9787       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9788   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9789       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9790       .addFrameIndex(FI)
9791       .addImm(0)
9792       .addMemOperand(MMOLo);
9793   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9794       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9795       .addFrameIndex(FI)
9796       .addImm(4)
9797       .addMemOperand(MMOHi);
9798   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9799   MI.eraseFromParent(); // The pseudo instruction is gone now.
9800   return BB;
9801 }
9802 
9803 static bool isSelectPseudo(MachineInstr &MI) {
9804   switch (MI.getOpcode()) {
9805   default:
9806     return false;
9807   case RISCV::Select_GPR_Using_CC_GPR:
9808   case RISCV::Select_FPR16_Using_CC_GPR:
9809   case RISCV::Select_FPR32_Using_CC_GPR:
9810   case RISCV::Select_FPR64_Using_CC_GPR:
9811     return true;
9812   }
9813 }
9814 
9815 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9816                                         unsigned RelOpcode, unsigned EqOpcode,
9817                                         const RISCVSubtarget &Subtarget) {
9818   DebugLoc DL = MI.getDebugLoc();
9819   Register DstReg = MI.getOperand(0).getReg();
9820   Register Src1Reg = MI.getOperand(1).getReg();
9821   Register Src2Reg = MI.getOperand(2).getReg();
9822   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9823   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9824   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9825 
9826   // Save the current FFLAGS.
9827   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9828 
9829   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9830                  .addReg(Src1Reg)
9831                  .addReg(Src2Reg);
9832   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9833     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9834 
9835   // Restore the FFLAGS.
9836   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9837       .addReg(SavedFFlags, RegState::Kill);
9838 
9839   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9840   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9841                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9842                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9843   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9844     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9845 
9846   // Erase the pseudoinstruction.
9847   MI.eraseFromParent();
9848   return BB;
9849 }
9850 
9851 static MachineBasicBlock *
9852 EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second,
9853                           MachineBasicBlock *ThisMBB,
9854                           const RISCVSubtarget &Subtarget) {
9855   // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5)
9856   // Without this, custom-inserter would have generated:
9857   //
9858   //   A
9859   //   | \
9860   //   |  B
9861   //   | /
9862   //   C
9863   //   | \
9864   //   |  D
9865   //   | /
9866   //   E
9867   //
9868   // A: X = ...; Y = ...
9869   // B: empty
9870   // C: Z = PHI [X, A], [Y, B]
9871   // D: empty
9872   // E: PHI [X, C], [Z, D]
9873   //
9874   // If we lower both Select_FPRX_ in a single step, we can instead generate:
9875   //
9876   //   A
9877   //   | \
9878   //   |  C
9879   //   | /|
9880   //   |/ |
9881   //   |  |
9882   //   |  D
9883   //   | /
9884   //   E
9885   //
9886   // A: X = ...; Y = ...
9887   // D: empty
9888   // E: PHI [X, A], [X, C], [Y, D]
9889 
9890   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9891   const DebugLoc &DL = First.getDebugLoc();
9892   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
9893   MachineFunction *F = ThisMBB->getParent();
9894   MachineBasicBlock *FirstMBB = F->CreateMachineBasicBlock(LLVM_BB);
9895   MachineBasicBlock *SecondMBB = F->CreateMachineBasicBlock(LLVM_BB);
9896   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
9897   MachineFunction::iterator It = ++ThisMBB->getIterator();
9898   F->insert(It, FirstMBB);
9899   F->insert(It, SecondMBB);
9900   F->insert(It, SinkMBB);
9901 
9902   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
9903   SinkMBB->splice(SinkMBB->begin(), ThisMBB,
9904                   std::next(MachineBasicBlock::iterator(First)),
9905                   ThisMBB->end());
9906   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
9907 
9908   // Fallthrough block for ThisMBB.
9909   ThisMBB->addSuccessor(FirstMBB);
9910   // Fallthrough block for FirstMBB.
9911   FirstMBB->addSuccessor(SecondMBB);
9912   ThisMBB->addSuccessor(SinkMBB);
9913   FirstMBB->addSuccessor(SinkMBB);
9914   // This is fallthrough.
9915   SecondMBB->addSuccessor(SinkMBB);
9916 
9917   auto FirstCC = static_cast<RISCVCC::CondCode>(First.getOperand(3).getImm());
9918   Register FLHS = First.getOperand(1).getReg();
9919   Register FRHS = First.getOperand(2).getReg();
9920   // Insert appropriate branch.
9921   BuildMI(FirstMBB, DL, TII.getBrCond(FirstCC))
9922       .addReg(FLHS)
9923       .addReg(FRHS)
9924       .addMBB(SinkMBB);
9925 
9926   Register SLHS = Second.getOperand(1).getReg();
9927   Register SRHS = Second.getOperand(2).getReg();
9928   Register Op1Reg4 = First.getOperand(4).getReg();
9929   Register Op1Reg5 = First.getOperand(5).getReg();
9930 
9931   auto SecondCC = static_cast<RISCVCC::CondCode>(Second.getOperand(3).getImm());
9932   // Insert appropriate branch.
9933   BuildMI(ThisMBB, DL, TII.getBrCond(SecondCC))
9934       .addReg(SLHS)
9935       .addReg(SRHS)
9936       .addMBB(SinkMBB);
9937 
9938   Register DestReg = Second.getOperand(0).getReg();
9939   Register Op2Reg4 = Second.getOperand(4).getReg();
9940   BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII.get(RISCV::PHI), DestReg)
9941       .addReg(Op2Reg4)
9942       .addMBB(ThisMBB)
9943       .addReg(Op1Reg4)
9944       .addMBB(FirstMBB)
9945       .addReg(Op1Reg5)
9946       .addMBB(SecondMBB);
9947 
9948   // Now remove the Select_FPRX_s.
9949   First.eraseFromParent();
9950   Second.eraseFromParent();
9951   return SinkMBB;
9952 }
9953 
9954 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9955                                            MachineBasicBlock *BB,
9956                                            const RISCVSubtarget &Subtarget) {
9957   // To "insert" Select_* instructions, we actually have to insert the triangle
9958   // control-flow pattern.  The incoming instructions know the destination vreg
9959   // to set, the condition code register to branch on, the true/false values to
9960   // select between, and the condcode to use to select the appropriate branch.
9961   //
9962   // We produce the following control flow:
9963   //     HeadMBB
9964   //     |  \
9965   //     |  IfFalseMBB
9966   //     | /
9967   //    TailMBB
9968   //
9969   // When we find a sequence of selects we attempt to optimize their emission
9970   // by sharing the control flow. Currently we only handle cases where we have
9971   // multiple selects with the exact same condition (same LHS, RHS and CC).
9972   // The selects may be interleaved with other instructions if the other
9973   // instructions meet some requirements we deem safe:
9974   // - They are debug instructions. Otherwise,
9975   // - They do not have side-effects, do not access memory and their inputs do
9976   //   not depend on the results of the select pseudo-instructions.
9977   // The TrueV/FalseV operands of the selects cannot depend on the result of
9978   // previous selects in the sequence.
9979   // These conditions could be further relaxed. See the X86 target for a
9980   // related approach and more information.
9981   //
9982   // Select_FPRX_ (rs1, rs2, imm, rs4, (Select_FPRX_ rs1, rs2, imm, rs4, rs5))
9983   // is checked here and handled by a separate function -
9984   // EmitLoweredCascadedSelect.
9985   Register LHS = MI.getOperand(1).getReg();
9986   Register RHS = MI.getOperand(2).getReg();
9987   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9988 
9989   SmallVector<MachineInstr *, 4> SelectDebugValues;
9990   SmallSet<Register, 4> SelectDests;
9991   SelectDests.insert(MI.getOperand(0).getReg());
9992 
9993   MachineInstr *LastSelectPseudo = &MI;
9994   auto Next = next_nodbg(MI.getIterator(), BB->instr_end());
9995   if (MI.getOpcode() != RISCV::Select_GPR_Using_CC_GPR && Next != BB->end() &&
9996       Next->getOpcode() == MI.getOpcode() &&
9997       Next->getOperand(5).getReg() == MI.getOperand(0).getReg() &&
9998       Next->getOperand(5).isKill()) {
9999     return EmitLoweredCascadedSelect(MI, *Next, BB, Subtarget);
10000   }
10001 
10002   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
10003        SequenceMBBI != E; ++SequenceMBBI) {
10004     if (SequenceMBBI->isDebugInstr())
10005       continue;
10006     if (isSelectPseudo(*SequenceMBBI)) {
10007       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
10008           SequenceMBBI->getOperand(2).getReg() != RHS ||
10009           SequenceMBBI->getOperand(3).getImm() != CC ||
10010           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
10011           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
10012         break;
10013       LastSelectPseudo = &*SequenceMBBI;
10014       SequenceMBBI->collectDebugValues(SelectDebugValues);
10015       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
10016     } else {
10017       if (SequenceMBBI->hasUnmodeledSideEffects() ||
10018           SequenceMBBI->mayLoadOrStore())
10019         break;
10020       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
10021             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
10022           }))
10023         break;
10024     }
10025   }
10026 
10027   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
10028   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10029   DebugLoc DL = MI.getDebugLoc();
10030   MachineFunction::iterator I = ++BB->getIterator();
10031 
10032   MachineBasicBlock *HeadMBB = BB;
10033   MachineFunction *F = BB->getParent();
10034   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
10035   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
10036 
10037   F->insert(I, IfFalseMBB);
10038   F->insert(I, TailMBB);
10039 
10040   // Transfer debug instructions associated with the selects to TailMBB.
10041   for (MachineInstr *DebugInstr : SelectDebugValues) {
10042     TailMBB->push_back(DebugInstr->removeFromParent());
10043   }
10044 
10045   // Move all instructions after the sequence to TailMBB.
10046   TailMBB->splice(TailMBB->end(), HeadMBB,
10047                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
10048   // Update machine-CFG edges by transferring all successors of the current
10049   // block to the new block which will contain the Phi nodes for the selects.
10050   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
10051   // Set the successors for HeadMBB.
10052   HeadMBB->addSuccessor(IfFalseMBB);
10053   HeadMBB->addSuccessor(TailMBB);
10054 
10055   // Insert appropriate branch.
10056   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
10057     .addReg(LHS)
10058     .addReg(RHS)
10059     .addMBB(TailMBB);
10060 
10061   // IfFalseMBB just falls through to TailMBB.
10062   IfFalseMBB->addSuccessor(TailMBB);
10063 
10064   // Create PHIs for all of the select pseudo-instructions.
10065   auto SelectMBBI = MI.getIterator();
10066   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
10067   auto InsertionPoint = TailMBB->begin();
10068   while (SelectMBBI != SelectEnd) {
10069     auto Next = std::next(SelectMBBI);
10070     if (isSelectPseudo(*SelectMBBI)) {
10071       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
10072       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
10073               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
10074           .addReg(SelectMBBI->getOperand(4).getReg())
10075           .addMBB(HeadMBB)
10076           .addReg(SelectMBBI->getOperand(5).getReg())
10077           .addMBB(IfFalseMBB);
10078       SelectMBBI->eraseFromParent();
10079     }
10080     SelectMBBI = Next;
10081   }
10082 
10083   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
10084   return TailMBB;
10085 }
10086 
10087 MachineBasicBlock *
10088 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10089                                                  MachineBasicBlock *BB) const {
10090   switch (MI.getOpcode()) {
10091   default:
10092     llvm_unreachable("Unexpected instr type to insert");
10093   case RISCV::ReadCycleWide:
10094     assert(!Subtarget.is64Bit() &&
10095            "ReadCycleWrite is only to be used on riscv32");
10096     return emitReadCycleWidePseudo(MI, BB);
10097   case RISCV::Select_GPR_Using_CC_GPR:
10098   case RISCV::Select_FPR16_Using_CC_GPR:
10099   case RISCV::Select_FPR32_Using_CC_GPR:
10100   case RISCV::Select_FPR64_Using_CC_GPR:
10101     return emitSelectPseudo(MI, BB, Subtarget);
10102   case RISCV::BuildPairF64Pseudo:
10103     return emitBuildPairF64Pseudo(MI, BB);
10104   case RISCV::SplitF64Pseudo:
10105     return emitSplitF64Pseudo(MI, BB);
10106   case RISCV::PseudoQuietFLE_H:
10107     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
10108   case RISCV::PseudoQuietFLT_H:
10109     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
10110   case RISCV::PseudoQuietFLE_S:
10111     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
10112   case RISCV::PseudoQuietFLT_S:
10113     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
10114   case RISCV::PseudoQuietFLE_D:
10115     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
10116   case RISCV::PseudoQuietFLT_D:
10117     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
10118   }
10119 }
10120 
10121 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10122                                                         SDNode *Node) const {
10123   // Add FRM dependency to any instructions with dynamic rounding mode.
10124   unsigned Opc = MI.getOpcode();
10125   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
10126   if (Idx < 0)
10127     return;
10128   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
10129     return;
10130   // If the instruction already reads FRM, don't add another read.
10131   if (MI.readsRegister(RISCV::FRM))
10132     return;
10133   MI.addOperand(
10134       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
10135 }
10136 
10137 // Calling Convention Implementation.
10138 // The expectations for frontend ABI lowering vary from target to target.
10139 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
10140 // details, but this is a longer term goal. For now, we simply try to keep the
10141 // role of the frontend as simple and well-defined as possible. The rules can
10142 // be summarised as:
10143 // * Never split up large scalar arguments. We handle them here.
10144 // * If a hardfloat calling convention is being used, and the struct may be
10145 // passed in a pair of registers (fp+fp, int+fp), and both registers are
10146 // available, then pass as two separate arguments. If either the GPRs or FPRs
10147 // are exhausted, then pass according to the rule below.
10148 // * If a struct could never be passed in registers or directly in a stack
10149 // slot (as it is larger than 2*XLEN and the floating point rules don't
10150 // apply), then pass it using a pointer with the byval attribute.
10151 // * If a struct is less than 2*XLEN, then coerce to either a two-element
10152 // word-sized array or a 2*XLEN scalar (depending on alignment).
10153 // * The frontend can determine whether a struct is returned by reference or
10154 // not based on its size and fields. If it will be returned by reference, the
10155 // frontend must modify the prototype so a pointer with the sret annotation is
10156 // passed as the first argument. This is not necessary for large scalar
10157 // returns.
10158 // * Struct return values and varargs should be coerced to structs containing
10159 // register-size fields in the same situations they would be for fixed
10160 // arguments.
10161 
10162 static const MCPhysReg ArgGPRs[] = {
10163   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
10164   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
10165 };
10166 static const MCPhysReg ArgFPR16s[] = {
10167   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
10168   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
10169 };
10170 static const MCPhysReg ArgFPR32s[] = {
10171   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
10172   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
10173 };
10174 static const MCPhysReg ArgFPR64s[] = {
10175   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
10176   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
10177 };
10178 // This is an interim calling convention and it may be changed in the future.
10179 static const MCPhysReg ArgVRs[] = {
10180     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
10181     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
10182     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
10183 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
10184                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
10185                                      RISCV::V20M2, RISCV::V22M2};
10186 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
10187                                      RISCV::V20M4};
10188 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
10189 
10190 // Pass a 2*XLEN argument that has been split into two XLEN values through
10191 // registers or the stack as necessary.
10192 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
10193                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
10194                                 MVT ValVT2, MVT LocVT2,
10195                                 ISD::ArgFlagsTy ArgFlags2) {
10196   unsigned XLenInBytes = XLen / 8;
10197   if (Register Reg = State.AllocateReg(ArgGPRs)) {
10198     // At least one half can be passed via register.
10199     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
10200                                      VA1.getLocVT(), CCValAssign::Full));
10201   } else {
10202     // Both halves must be passed on the stack, with proper alignment.
10203     Align StackAlign =
10204         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
10205     State.addLoc(
10206         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
10207                             State.AllocateStack(XLenInBytes, StackAlign),
10208                             VA1.getLocVT(), CCValAssign::Full));
10209     State.addLoc(CCValAssign::getMem(
10210         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
10211         LocVT2, CCValAssign::Full));
10212     return false;
10213   }
10214 
10215   if (Register Reg = State.AllocateReg(ArgGPRs)) {
10216     // The second half can also be passed via register.
10217     State.addLoc(
10218         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
10219   } else {
10220     // The second half is passed via the stack, without additional alignment.
10221     State.addLoc(CCValAssign::getMem(
10222         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
10223         LocVT2, CCValAssign::Full));
10224   }
10225 
10226   return false;
10227 }
10228 
10229 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
10230                                Optional<unsigned> FirstMaskArgument,
10231                                CCState &State, const RISCVTargetLowering &TLI) {
10232   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
10233   if (RC == &RISCV::VRRegClass) {
10234     // Assign the first mask argument to V0.
10235     // This is an interim calling convention and it may be changed in the
10236     // future.
10237     if (FirstMaskArgument && ValNo == *FirstMaskArgument)
10238       return State.AllocateReg(RISCV::V0);
10239     return State.AllocateReg(ArgVRs);
10240   }
10241   if (RC == &RISCV::VRM2RegClass)
10242     return State.AllocateReg(ArgVRM2s);
10243   if (RC == &RISCV::VRM4RegClass)
10244     return State.AllocateReg(ArgVRM4s);
10245   if (RC == &RISCV::VRM8RegClass)
10246     return State.AllocateReg(ArgVRM8s);
10247   llvm_unreachable("Unhandled register class for ValueType");
10248 }
10249 
10250 // Implements the RISC-V calling convention. Returns true upon failure.
10251 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
10252                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
10253                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
10254                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
10255                      Optional<unsigned> FirstMaskArgument) {
10256   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
10257   assert(XLen == 32 || XLen == 64);
10258   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
10259 
10260   // Any return value split in to more than two values can't be returned
10261   // directly. Vectors are returned via the available vector registers.
10262   if (!LocVT.isVector() && IsRet && ValNo > 1)
10263     return true;
10264 
10265   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
10266   // variadic argument, or if no F16/F32 argument registers are available.
10267   bool UseGPRForF16_F32 = true;
10268   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
10269   // variadic argument, or if no F64 argument registers are available.
10270   bool UseGPRForF64 = true;
10271 
10272   switch (ABI) {
10273   default:
10274     llvm_unreachable("Unexpected ABI");
10275   case RISCVABI::ABI_ILP32:
10276   case RISCVABI::ABI_LP64:
10277     break;
10278   case RISCVABI::ABI_ILP32F:
10279   case RISCVABI::ABI_LP64F:
10280     UseGPRForF16_F32 = !IsFixed;
10281     break;
10282   case RISCVABI::ABI_ILP32D:
10283   case RISCVABI::ABI_LP64D:
10284     UseGPRForF16_F32 = !IsFixed;
10285     UseGPRForF64 = !IsFixed;
10286     break;
10287   }
10288 
10289   // FPR16, FPR32, and FPR64 alias each other.
10290   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
10291     UseGPRForF16_F32 = true;
10292     UseGPRForF64 = true;
10293   }
10294 
10295   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
10296   // similar local variables rather than directly checking against the target
10297   // ABI.
10298 
10299   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
10300     LocVT = XLenVT;
10301     LocInfo = CCValAssign::BCvt;
10302   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
10303     LocVT = MVT::i64;
10304     LocInfo = CCValAssign::BCvt;
10305   }
10306 
10307   // If this is a variadic argument, the RISC-V calling convention requires
10308   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
10309   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
10310   // be used regardless of whether the original argument was split during
10311   // legalisation or not. The argument will not be passed by registers if the
10312   // original type is larger than 2*XLEN, so the register alignment rule does
10313   // not apply.
10314   unsigned TwoXLenInBytes = (2 * XLen) / 8;
10315   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
10316       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
10317     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
10318     // Skip 'odd' register if necessary.
10319     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
10320       State.AllocateReg(ArgGPRs);
10321   }
10322 
10323   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
10324   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
10325       State.getPendingArgFlags();
10326 
10327   assert(PendingLocs.size() == PendingArgFlags.size() &&
10328          "PendingLocs and PendingArgFlags out of sync");
10329 
10330   // Handle passing f64 on RV32D with a soft float ABI or when floating point
10331   // registers are exhausted.
10332   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
10333     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
10334            "Can't lower f64 if it is split");
10335     // Depending on available argument GPRS, f64 may be passed in a pair of
10336     // GPRs, split between a GPR and the stack, or passed completely on the
10337     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
10338     // cases.
10339     Register Reg = State.AllocateReg(ArgGPRs);
10340     LocVT = MVT::i32;
10341     if (!Reg) {
10342       unsigned StackOffset = State.AllocateStack(8, Align(8));
10343       State.addLoc(
10344           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10345       return false;
10346     }
10347     if (!State.AllocateReg(ArgGPRs))
10348       State.AllocateStack(4, Align(4));
10349     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10350     return false;
10351   }
10352 
10353   // Fixed-length vectors are located in the corresponding scalable-vector
10354   // container types.
10355   if (ValVT.isFixedLengthVector())
10356     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10357 
10358   // Split arguments might be passed indirectly, so keep track of the pending
10359   // values. Split vectors are passed via a mix of registers and indirectly, so
10360   // treat them as we would any other argument.
10361   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
10362     LocVT = XLenVT;
10363     LocInfo = CCValAssign::Indirect;
10364     PendingLocs.push_back(
10365         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
10366     PendingArgFlags.push_back(ArgFlags);
10367     if (!ArgFlags.isSplitEnd()) {
10368       return false;
10369     }
10370   }
10371 
10372   // If the split argument only had two elements, it should be passed directly
10373   // in registers or on the stack.
10374   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
10375       PendingLocs.size() <= 2) {
10376     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
10377     // Apply the normal calling convention rules to the first half of the
10378     // split argument.
10379     CCValAssign VA = PendingLocs[0];
10380     ISD::ArgFlagsTy AF = PendingArgFlags[0];
10381     PendingLocs.clear();
10382     PendingArgFlags.clear();
10383     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
10384                                ArgFlags);
10385   }
10386 
10387   // Allocate to a register if possible, or else a stack slot.
10388   Register Reg;
10389   unsigned StoreSizeBytes = XLen / 8;
10390   Align StackAlign = Align(XLen / 8);
10391 
10392   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
10393     Reg = State.AllocateReg(ArgFPR16s);
10394   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
10395     Reg = State.AllocateReg(ArgFPR32s);
10396   else if (ValVT == MVT::f64 && !UseGPRForF64)
10397     Reg = State.AllocateReg(ArgFPR64s);
10398   else if (ValVT.isVector()) {
10399     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
10400     if (!Reg) {
10401       // For return values, the vector must be passed fully via registers or
10402       // via the stack.
10403       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
10404       // but we're using all of them.
10405       if (IsRet)
10406         return true;
10407       // Try using a GPR to pass the address
10408       if ((Reg = State.AllocateReg(ArgGPRs))) {
10409         LocVT = XLenVT;
10410         LocInfo = CCValAssign::Indirect;
10411       } else if (ValVT.isScalableVector()) {
10412         LocVT = XLenVT;
10413         LocInfo = CCValAssign::Indirect;
10414       } else {
10415         // Pass fixed-length vectors on the stack.
10416         LocVT = ValVT;
10417         StoreSizeBytes = ValVT.getStoreSize();
10418         // Align vectors to their element sizes, being careful for vXi1
10419         // vectors.
10420         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10421       }
10422     }
10423   } else {
10424     Reg = State.AllocateReg(ArgGPRs);
10425   }
10426 
10427   unsigned StackOffset =
10428       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
10429 
10430   // If we reach this point and PendingLocs is non-empty, we must be at the
10431   // end of a split argument that must be passed indirectly.
10432   if (!PendingLocs.empty()) {
10433     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
10434     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
10435 
10436     for (auto &It : PendingLocs) {
10437       if (Reg)
10438         It.convertToReg(Reg);
10439       else
10440         It.convertToMem(StackOffset);
10441       State.addLoc(It);
10442     }
10443     PendingLocs.clear();
10444     PendingArgFlags.clear();
10445     return false;
10446   }
10447 
10448   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
10449           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
10450          "Expected an XLenVT or vector types at this stage");
10451 
10452   if (Reg) {
10453     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10454     return false;
10455   }
10456 
10457   // When a floating-point value is passed on the stack, no bit-conversion is
10458   // needed.
10459   if (ValVT.isFloatingPoint()) {
10460     LocVT = ValVT;
10461     LocInfo = CCValAssign::Full;
10462   }
10463   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10464   return false;
10465 }
10466 
10467 template <typename ArgTy>
10468 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
10469   for (const auto &ArgIdx : enumerate(Args)) {
10470     MVT ArgVT = ArgIdx.value().VT;
10471     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
10472       return ArgIdx.index();
10473   }
10474   return None;
10475 }
10476 
10477 void RISCVTargetLowering::analyzeInputArgs(
10478     MachineFunction &MF, CCState &CCInfo,
10479     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
10480     RISCVCCAssignFn Fn) const {
10481   unsigned NumArgs = Ins.size();
10482   FunctionType *FType = MF.getFunction().getFunctionType();
10483 
10484   Optional<unsigned> FirstMaskArgument;
10485   if (Subtarget.hasVInstructions())
10486     FirstMaskArgument = preAssignMask(Ins);
10487 
10488   for (unsigned i = 0; i != NumArgs; ++i) {
10489     MVT ArgVT = Ins[i].VT;
10490     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
10491 
10492     Type *ArgTy = nullptr;
10493     if (IsRet)
10494       ArgTy = FType->getReturnType();
10495     else if (Ins[i].isOrigArg())
10496       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
10497 
10498     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10499     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10500            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
10501            FirstMaskArgument)) {
10502       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
10503                         << EVT(ArgVT).getEVTString() << '\n');
10504       llvm_unreachable(nullptr);
10505     }
10506   }
10507 }
10508 
10509 void RISCVTargetLowering::analyzeOutputArgs(
10510     MachineFunction &MF, CCState &CCInfo,
10511     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
10512     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
10513   unsigned NumArgs = Outs.size();
10514 
10515   Optional<unsigned> FirstMaskArgument;
10516   if (Subtarget.hasVInstructions())
10517     FirstMaskArgument = preAssignMask(Outs);
10518 
10519   for (unsigned i = 0; i != NumArgs; i++) {
10520     MVT ArgVT = Outs[i].VT;
10521     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10522     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
10523 
10524     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10525     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10526            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
10527            FirstMaskArgument)) {
10528       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
10529                         << EVT(ArgVT).getEVTString() << "\n");
10530       llvm_unreachable(nullptr);
10531     }
10532   }
10533 }
10534 
10535 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
10536 // values.
10537 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
10538                                    const CCValAssign &VA, const SDLoc &DL,
10539                                    const RISCVSubtarget &Subtarget) {
10540   switch (VA.getLocInfo()) {
10541   default:
10542     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10543   case CCValAssign::Full:
10544     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
10545       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
10546     break;
10547   case CCValAssign::BCvt:
10548     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10549       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
10550     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10551       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
10552     else
10553       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
10554     break;
10555   }
10556   return Val;
10557 }
10558 
10559 // The caller is responsible for loading the full value if the argument is
10560 // passed with CCValAssign::Indirect.
10561 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
10562                                 const CCValAssign &VA, const SDLoc &DL,
10563                                 const RISCVTargetLowering &TLI) {
10564   MachineFunction &MF = DAG.getMachineFunction();
10565   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10566   EVT LocVT = VA.getLocVT();
10567   SDValue Val;
10568   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
10569   Register VReg = RegInfo.createVirtualRegister(RC);
10570   RegInfo.addLiveIn(VA.getLocReg(), VReg);
10571   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
10572 
10573   if (VA.getLocInfo() == CCValAssign::Indirect)
10574     return Val;
10575 
10576   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
10577 }
10578 
10579 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
10580                                    const CCValAssign &VA, const SDLoc &DL,
10581                                    const RISCVSubtarget &Subtarget) {
10582   EVT LocVT = VA.getLocVT();
10583 
10584   switch (VA.getLocInfo()) {
10585   default:
10586     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10587   case CCValAssign::Full:
10588     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
10589       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
10590     break;
10591   case CCValAssign::BCvt:
10592     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10593       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10594     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10595       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10596     else
10597       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10598     break;
10599   }
10600   return Val;
10601 }
10602 
10603 // The caller is responsible for loading the full value if the argument is
10604 // passed with CCValAssign::Indirect.
10605 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10606                                 const CCValAssign &VA, const SDLoc &DL) {
10607   MachineFunction &MF = DAG.getMachineFunction();
10608   MachineFrameInfo &MFI = MF.getFrameInfo();
10609   EVT LocVT = VA.getLocVT();
10610   EVT ValVT = VA.getValVT();
10611   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10612   if (ValVT.isScalableVector()) {
10613     // When the value is a scalable vector, we save the pointer which points to
10614     // the scalable vector value in the stack. The ValVT will be the pointer
10615     // type, instead of the scalable vector type.
10616     ValVT = LocVT;
10617   }
10618   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10619                                  /*IsImmutable=*/true);
10620   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10621   SDValue Val;
10622 
10623   ISD::LoadExtType ExtType;
10624   switch (VA.getLocInfo()) {
10625   default:
10626     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10627   case CCValAssign::Full:
10628   case CCValAssign::Indirect:
10629   case CCValAssign::BCvt:
10630     ExtType = ISD::NON_EXTLOAD;
10631     break;
10632   }
10633   Val = DAG.getExtLoad(
10634       ExtType, DL, LocVT, Chain, FIN,
10635       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10636   return Val;
10637 }
10638 
10639 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10640                                        const CCValAssign &VA, const SDLoc &DL) {
10641   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10642          "Unexpected VA");
10643   MachineFunction &MF = DAG.getMachineFunction();
10644   MachineFrameInfo &MFI = MF.getFrameInfo();
10645   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10646 
10647   if (VA.isMemLoc()) {
10648     // f64 is passed on the stack.
10649     int FI =
10650         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10651     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10652     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10653                        MachinePointerInfo::getFixedStack(MF, FI));
10654   }
10655 
10656   assert(VA.isRegLoc() && "Expected register VA assignment");
10657 
10658   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10659   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10660   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10661   SDValue Hi;
10662   if (VA.getLocReg() == RISCV::X17) {
10663     // Second half of f64 is passed on the stack.
10664     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10665     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10666     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10667                      MachinePointerInfo::getFixedStack(MF, FI));
10668   } else {
10669     // Second half of f64 is passed in another GPR.
10670     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10671     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10672     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10673   }
10674   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10675 }
10676 
10677 // FastCC has less than 1% performance improvement for some particular
10678 // benchmark. But theoretically, it may has benenfit for some cases.
10679 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10680                             unsigned ValNo, MVT ValVT, MVT LocVT,
10681                             CCValAssign::LocInfo LocInfo,
10682                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10683                             bool IsFixed, bool IsRet, Type *OrigTy,
10684                             const RISCVTargetLowering &TLI,
10685                             Optional<unsigned> FirstMaskArgument) {
10686 
10687   // X5 and X6 might be used for save-restore libcall.
10688   static const MCPhysReg GPRList[] = {
10689       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10690       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10691       RISCV::X29, RISCV::X30, RISCV::X31};
10692 
10693   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10694     if (unsigned Reg = State.AllocateReg(GPRList)) {
10695       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10696       return false;
10697     }
10698   }
10699 
10700   if (LocVT == MVT::f16) {
10701     static const MCPhysReg FPR16List[] = {
10702         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10703         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10704         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10705         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10706     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10707       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10708       return false;
10709     }
10710   }
10711 
10712   if (LocVT == MVT::f32) {
10713     static const MCPhysReg FPR32List[] = {
10714         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10715         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10716         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10717         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10718     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10719       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10720       return false;
10721     }
10722   }
10723 
10724   if (LocVT == MVT::f64) {
10725     static const MCPhysReg FPR64List[] = {
10726         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10727         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10728         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10729         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10730     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10731       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10732       return false;
10733     }
10734   }
10735 
10736   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10737     unsigned Offset4 = State.AllocateStack(4, Align(4));
10738     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10739     return false;
10740   }
10741 
10742   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10743     unsigned Offset5 = State.AllocateStack(8, Align(8));
10744     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10745     return false;
10746   }
10747 
10748   if (LocVT.isVector()) {
10749     if (unsigned Reg =
10750             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10751       // Fixed-length vectors are located in the corresponding scalable-vector
10752       // container types.
10753       if (ValVT.isFixedLengthVector())
10754         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10755       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10756     } else {
10757       // Try and pass the address via a "fast" GPR.
10758       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10759         LocInfo = CCValAssign::Indirect;
10760         LocVT = TLI.getSubtarget().getXLenVT();
10761         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10762       } else if (ValVT.isFixedLengthVector()) {
10763         auto StackAlign =
10764             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10765         unsigned StackOffset =
10766             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10767         State.addLoc(
10768             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10769       } else {
10770         // Can't pass scalable vectors on the stack.
10771         return true;
10772       }
10773     }
10774 
10775     return false;
10776   }
10777 
10778   return true; // CC didn't match.
10779 }
10780 
10781 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10782                          CCValAssign::LocInfo LocInfo,
10783                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10784 
10785   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10786     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10787     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10788     static const MCPhysReg GPRList[] = {
10789         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10790         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10791     if (unsigned Reg = State.AllocateReg(GPRList)) {
10792       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10793       return false;
10794     }
10795   }
10796 
10797   if (LocVT == MVT::f32) {
10798     // Pass in STG registers: F1, ..., F6
10799     //                        fs0 ... fs5
10800     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10801                                           RISCV::F18_F, RISCV::F19_F,
10802                                           RISCV::F20_F, RISCV::F21_F};
10803     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10804       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10805       return false;
10806     }
10807   }
10808 
10809   if (LocVT == MVT::f64) {
10810     // Pass in STG registers: D1, ..., D6
10811     //                        fs6 ... fs11
10812     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10813                                           RISCV::F24_D, RISCV::F25_D,
10814                                           RISCV::F26_D, RISCV::F27_D};
10815     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10816       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10817       return false;
10818     }
10819   }
10820 
10821   report_fatal_error("No registers left in GHC calling convention");
10822   return true;
10823 }
10824 
10825 // Transform physical registers into virtual registers.
10826 SDValue RISCVTargetLowering::LowerFormalArguments(
10827     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10828     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10829     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10830 
10831   MachineFunction &MF = DAG.getMachineFunction();
10832 
10833   switch (CallConv) {
10834   default:
10835     report_fatal_error("Unsupported calling convention");
10836   case CallingConv::C:
10837   case CallingConv::Fast:
10838     break;
10839   case CallingConv::GHC:
10840     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10841         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10842       report_fatal_error(
10843         "GHC calling convention requires the F and D instruction set extensions");
10844   }
10845 
10846   const Function &Func = MF.getFunction();
10847   if (Func.hasFnAttribute("interrupt")) {
10848     if (!Func.arg_empty())
10849       report_fatal_error(
10850         "Functions with the interrupt attribute cannot have arguments!");
10851 
10852     StringRef Kind =
10853       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10854 
10855     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10856       report_fatal_error(
10857         "Function interrupt attribute argument not supported!");
10858   }
10859 
10860   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10861   MVT XLenVT = Subtarget.getXLenVT();
10862   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10863   // Used with vargs to acumulate store chains.
10864   std::vector<SDValue> OutChains;
10865 
10866   // Assign locations to all of the incoming arguments.
10867   SmallVector<CCValAssign, 16> ArgLocs;
10868   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10869 
10870   if (CallConv == CallingConv::GHC)
10871     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10872   else
10873     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10874                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10875                                                    : CC_RISCV);
10876 
10877   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10878     CCValAssign &VA = ArgLocs[i];
10879     SDValue ArgValue;
10880     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10881     // case.
10882     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10883       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10884     else if (VA.isRegLoc())
10885       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10886     else
10887       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10888 
10889     if (VA.getLocInfo() == CCValAssign::Indirect) {
10890       // If the original argument was split and passed by reference (e.g. i128
10891       // on RV32), we need to load all parts of it here (using the same
10892       // address). Vectors may be partly split to registers and partly to the
10893       // stack, in which case the base address is partly offset and subsequent
10894       // stores are relative to that.
10895       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10896                                    MachinePointerInfo()));
10897       unsigned ArgIndex = Ins[i].OrigArgIndex;
10898       unsigned ArgPartOffset = Ins[i].PartOffset;
10899       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10900       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10901         CCValAssign &PartVA = ArgLocs[i + 1];
10902         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10903         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10904         if (PartVA.getValVT().isScalableVector())
10905           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10906         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10907         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10908                                      MachinePointerInfo()));
10909         ++i;
10910       }
10911       continue;
10912     }
10913     InVals.push_back(ArgValue);
10914   }
10915 
10916   if (IsVarArg) {
10917     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10918     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10919     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10920     MachineFrameInfo &MFI = MF.getFrameInfo();
10921     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10922     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10923 
10924     // Offset of the first variable argument from stack pointer, and size of
10925     // the vararg save area. For now, the varargs save area is either zero or
10926     // large enough to hold a0-a7.
10927     int VaArgOffset, VarArgsSaveSize;
10928 
10929     // If all registers are allocated, then all varargs must be passed on the
10930     // stack and we don't need to save any argregs.
10931     if (ArgRegs.size() == Idx) {
10932       VaArgOffset = CCInfo.getNextStackOffset();
10933       VarArgsSaveSize = 0;
10934     } else {
10935       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10936       VaArgOffset = -VarArgsSaveSize;
10937     }
10938 
10939     // Record the frame index of the first variable argument
10940     // which is a value necessary to VASTART.
10941     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10942     RVFI->setVarArgsFrameIndex(FI);
10943 
10944     // If saving an odd number of registers then create an extra stack slot to
10945     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10946     // offsets to even-numbered registered remain 2*XLEN-aligned.
10947     if (Idx % 2) {
10948       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10949       VarArgsSaveSize += XLenInBytes;
10950     }
10951 
10952     // Copy the integer registers that may have been used for passing varargs
10953     // to the vararg save area.
10954     for (unsigned I = Idx; I < ArgRegs.size();
10955          ++I, VaArgOffset += XLenInBytes) {
10956       const Register Reg = RegInfo.createVirtualRegister(RC);
10957       RegInfo.addLiveIn(ArgRegs[I], Reg);
10958       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10959       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10960       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10961       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10962                                    MachinePointerInfo::getFixedStack(MF, FI));
10963       cast<StoreSDNode>(Store.getNode())
10964           ->getMemOperand()
10965           ->setValue((Value *)nullptr);
10966       OutChains.push_back(Store);
10967     }
10968     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10969   }
10970 
10971   // All stores are grouped in one node to allow the matching between
10972   // the size of Ins and InVals. This only happens for vararg functions.
10973   if (!OutChains.empty()) {
10974     OutChains.push_back(Chain);
10975     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10976   }
10977 
10978   return Chain;
10979 }
10980 
10981 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10982 /// for tail call optimization.
10983 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10984 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10985     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10986     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10987 
10988   auto &Callee = CLI.Callee;
10989   auto CalleeCC = CLI.CallConv;
10990   auto &Outs = CLI.Outs;
10991   auto &Caller = MF.getFunction();
10992   auto CallerCC = Caller.getCallingConv();
10993 
10994   // Exception-handling functions need a special set of instructions to
10995   // indicate a return to the hardware. Tail-calling another function would
10996   // probably break this.
10997   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10998   // should be expanded as new function attributes are introduced.
10999   if (Caller.hasFnAttribute("interrupt"))
11000     return false;
11001 
11002   // Do not tail call opt if the stack is used to pass parameters.
11003   if (CCInfo.getNextStackOffset() != 0)
11004     return false;
11005 
11006   // Do not tail call opt if any parameters need to be passed indirectly.
11007   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
11008   // passed indirectly. So the address of the value will be passed in a
11009   // register, or if not available, then the address is put on the stack. In
11010   // order to pass indirectly, space on the stack often needs to be allocated
11011   // in order to store the value. In this case the CCInfo.getNextStackOffset()
11012   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
11013   // are passed CCValAssign::Indirect.
11014   for (auto &VA : ArgLocs)
11015     if (VA.getLocInfo() == CCValAssign::Indirect)
11016       return false;
11017 
11018   // Do not tail call opt if either caller or callee uses struct return
11019   // semantics.
11020   auto IsCallerStructRet = Caller.hasStructRetAttr();
11021   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
11022   if (IsCallerStructRet || IsCalleeStructRet)
11023     return false;
11024 
11025   // Externally-defined functions with weak linkage should not be
11026   // tail-called. The behaviour of branch instructions in this situation (as
11027   // used for tail calls) is implementation-defined, so we cannot rely on the
11028   // linker replacing the tail call with a return.
11029   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
11030     const GlobalValue *GV = G->getGlobal();
11031     if (GV->hasExternalWeakLinkage())
11032       return false;
11033   }
11034 
11035   // The callee has to preserve all registers the caller needs to preserve.
11036   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
11037   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
11038   if (CalleeCC != CallerCC) {
11039     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
11040     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
11041       return false;
11042   }
11043 
11044   // Byval parameters hand the function a pointer directly into the stack area
11045   // we want to reuse during a tail call. Working around this *is* possible
11046   // but less efficient and uglier in LowerCall.
11047   for (auto &Arg : Outs)
11048     if (Arg.Flags.isByVal())
11049       return false;
11050 
11051   return true;
11052 }
11053 
11054 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
11055   return DAG.getDataLayout().getPrefTypeAlign(
11056       VT.getTypeForEVT(*DAG.getContext()));
11057 }
11058 
11059 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
11060 // and output parameter nodes.
11061 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
11062                                        SmallVectorImpl<SDValue> &InVals) const {
11063   SelectionDAG &DAG = CLI.DAG;
11064   SDLoc &DL = CLI.DL;
11065   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
11066   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
11067   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
11068   SDValue Chain = CLI.Chain;
11069   SDValue Callee = CLI.Callee;
11070   bool &IsTailCall = CLI.IsTailCall;
11071   CallingConv::ID CallConv = CLI.CallConv;
11072   bool IsVarArg = CLI.IsVarArg;
11073   EVT PtrVT = getPointerTy(DAG.getDataLayout());
11074   MVT XLenVT = Subtarget.getXLenVT();
11075 
11076   MachineFunction &MF = DAG.getMachineFunction();
11077 
11078   // Analyze the operands of the call, assigning locations to each operand.
11079   SmallVector<CCValAssign, 16> ArgLocs;
11080   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
11081 
11082   if (CallConv == CallingConv::GHC)
11083     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
11084   else
11085     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
11086                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
11087                                                     : CC_RISCV);
11088 
11089   // Check if it's really possible to do a tail call.
11090   if (IsTailCall)
11091     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
11092 
11093   if (IsTailCall)
11094     ++NumTailCalls;
11095   else if (CLI.CB && CLI.CB->isMustTailCall())
11096     report_fatal_error("failed to perform tail call elimination on a call "
11097                        "site marked musttail");
11098 
11099   // Get a count of how many bytes are to be pushed on the stack.
11100   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
11101 
11102   // Create local copies for byval args
11103   SmallVector<SDValue, 8> ByValArgs;
11104   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
11105     ISD::ArgFlagsTy Flags = Outs[i].Flags;
11106     if (!Flags.isByVal())
11107       continue;
11108 
11109     SDValue Arg = OutVals[i];
11110     unsigned Size = Flags.getByValSize();
11111     Align Alignment = Flags.getNonZeroByValAlign();
11112 
11113     int FI =
11114         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
11115     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
11116     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
11117 
11118     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
11119                           /*IsVolatile=*/false,
11120                           /*AlwaysInline=*/false, IsTailCall,
11121                           MachinePointerInfo(), MachinePointerInfo());
11122     ByValArgs.push_back(FIPtr);
11123   }
11124 
11125   if (!IsTailCall)
11126     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
11127 
11128   // Copy argument values to their designated locations.
11129   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
11130   SmallVector<SDValue, 8> MemOpChains;
11131   SDValue StackPtr;
11132   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
11133     CCValAssign &VA = ArgLocs[i];
11134     SDValue ArgValue = OutVals[i];
11135     ISD::ArgFlagsTy Flags = Outs[i].Flags;
11136 
11137     // Handle passing f64 on RV32D with a soft float ABI as a special case.
11138     bool IsF64OnRV32DSoftABI =
11139         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
11140     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
11141       SDValue SplitF64 = DAG.getNode(
11142           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
11143       SDValue Lo = SplitF64.getValue(0);
11144       SDValue Hi = SplitF64.getValue(1);
11145 
11146       Register RegLo = VA.getLocReg();
11147       RegsToPass.push_back(std::make_pair(RegLo, Lo));
11148 
11149       if (RegLo == RISCV::X17) {
11150         // Second half of f64 is passed on the stack.
11151         // Work out the address of the stack slot.
11152         if (!StackPtr.getNode())
11153           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
11154         // Emit the store.
11155         MemOpChains.push_back(
11156             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
11157       } else {
11158         // Second half of f64 is passed in another GPR.
11159         assert(RegLo < RISCV::X31 && "Invalid register pair");
11160         Register RegHigh = RegLo + 1;
11161         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
11162       }
11163       continue;
11164     }
11165 
11166     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
11167     // as any other MemLoc.
11168 
11169     // Promote the value if needed.
11170     // For now, only handle fully promoted and indirect arguments.
11171     if (VA.getLocInfo() == CCValAssign::Indirect) {
11172       // Store the argument in a stack slot and pass its address.
11173       Align StackAlign =
11174           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
11175                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
11176       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
11177       // If the original argument was split (e.g. i128), we need
11178       // to store the required parts of it here (and pass just one address).
11179       // Vectors may be partly split to registers and partly to the stack, in
11180       // which case the base address is partly offset and subsequent stores are
11181       // relative to that.
11182       unsigned ArgIndex = Outs[i].OrigArgIndex;
11183       unsigned ArgPartOffset = Outs[i].PartOffset;
11184       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
11185       // Calculate the total size to store. We don't have access to what we're
11186       // actually storing other than performing the loop and collecting the
11187       // info.
11188       SmallVector<std::pair<SDValue, SDValue>> Parts;
11189       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
11190         SDValue PartValue = OutVals[i + 1];
11191         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
11192         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
11193         EVT PartVT = PartValue.getValueType();
11194         if (PartVT.isScalableVector())
11195           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
11196         StoredSize += PartVT.getStoreSize();
11197         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
11198         Parts.push_back(std::make_pair(PartValue, Offset));
11199         ++i;
11200       }
11201       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
11202       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
11203       MemOpChains.push_back(
11204           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
11205                        MachinePointerInfo::getFixedStack(MF, FI)));
11206       for (const auto &Part : Parts) {
11207         SDValue PartValue = Part.first;
11208         SDValue PartOffset = Part.second;
11209         SDValue Address =
11210             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
11211         MemOpChains.push_back(
11212             DAG.getStore(Chain, DL, PartValue, Address,
11213                          MachinePointerInfo::getFixedStack(MF, FI)));
11214       }
11215       ArgValue = SpillSlot;
11216     } else {
11217       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
11218     }
11219 
11220     // Use local copy if it is a byval arg.
11221     if (Flags.isByVal())
11222       ArgValue = ByValArgs[j++];
11223 
11224     if (VA.isRegLoc()) {
11225       // Queue up the argument copies and emit them at the end.
11226       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
11227     } else {
11228       assert(VA.isMemLoc() && "Argument not register or memory");
11229       assert(!IsTailCall && "Tail call not allowed if stack is used "
11230                             "for passing parameters");
11231 
11232       // Work out the address of the stack slot.
11233       if (!StackPtr.getNode())
11234         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
11235       SDValue Address =
11236           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
11237                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
11238 
11239       // Emit the store.
11240       MemOpChains.push_back(
11241           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
11242     }
11243   }
11244 
11245   // Join the stores, which are independent of one another.
11246   if (!MemOpChains.empty())
11247     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
11248 
11249   SDValue Glue;
11250 
11251   // Build a sequence of copy-to-reg nodes, chained and glued together.
11252   for (auto &Reg : RegsToPass) {
11253     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
11254     Glue = Chain.getValue(1);
11255   }
11256 
11257   // Validate that none of the argument registers have been marked as
11258   // reserved, if so report an error. Do the same for the return address if this
11259   // is not a tailcall.
11260   validateCCReservedRegs(RegsToPass, MF);
11261   if (!IsTailCall &&
11262       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
11263     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11264         MF.getFunction(),
11265         "Return address register required, but has been reserved."});
11266 
11267   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
11268   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
11269   // split it and then direct call can be matched by PseudoCALL.
11270   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
11271     const GlobalValue *GV = S->getGlobal();
11272 
11273     unsigned OpFlags = RISCVII::MO_CALL;
11274     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
11275       OpFlags = RISCVII::MO_PLT;
11276 
11277     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
11278   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
11279     unsigned OpFlags = RISCVII::MO_CALL;
11280 
11281     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
11282                                                  nullptr))
11283       OpFlags = RISCVII::MO_PLT;
11284 
11285     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
11286   }
11287 
11288   // The first call operand is the chain and the second is the target address.
11289   SmallVector<SDValue, 8> Ops;
11290   Ops.push_back(Chain);
11291   Ops.push_back(Callee);
11292 
11293   // Add argument registers to the end of the list so that they are
11294   // known live into the call.
11295   for (auto &Reg : RegsToPass)
11296     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
11297 
11298   if (!IsTailCall) {
11299     // Add a register mask operand representing the call-preserved registers.
11300     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
11301     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
11302     assert(Mask && "Missing call preserved mask for calling convention");
11303     Ops.push_back(DAG.getRegisterMask(Mask));
11304   }
11305 
11306   // Glue the call to the argument copies, if any.
11307   if (Glue.getNode())
11308     Ops.push_back(Glue);
11309 
11310   // Emit the call.
11311   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
11312 
11313   if (IsTailCall) {
11314     MF.getFrameInfo().setHasTailCall();
11315     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
11316   }
11317 
11318   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
11319   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
11320   Glue = Chain.getValue(1);
11321 
11322   // Mark the end of the call, which is glued to the call itself.
11323   Chain = DAG.getCALLSEQ_END(Chain,
11324                              DAG.getConstant(NumBytes, DL, PtrVT, true),
11325                              DAG.getConstant(0, DL, PtrVT, true),
11326                              Glue, DL);
11327   Glue = Chain.getValue(1);
11328 
11329   // Assign locations to each value returned by this call.
11330   SmallVector<CCValAssign, 16> RVLocs;
11331   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
11332   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
11333 
11334   // Copy all of the result registers out of their specified physreg.
11335   for (auto &VA : RVLocs) {
11336     // Copy the value out
11337     SDValue RetValue =
11338         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
11339     // Glue the RetValue to the end of the call sequence
11340     Chain = RetValue.getValue(1);
11341     Glue = RetValue.getValue(2);
11342 
11343     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
11344       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
11345       SDValue RetValue2 =
11346           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
11347       Chain = RetValue2.getValue(1);
11348       Glue = RetValue2.getValue(2);
11349       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
11350                              RetValue2);
11351     }
11352 
11353     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
11354 
11355     InVals.push_back(RetValue);
11356   }
11357 
11358   return Chain;
11359 }
11360 
11361 bool RISCVTargetLowering::CanLowerReturn(
11362     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
11363     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
11364   SmallVector<CCValAssign, 16> RVLocs;
11365   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
11366 
11367   Optional<unsigned> FirstMaskArgument;
11368   if (Subtarget.hasVInstructions())
11369     FirstMaskArgument = preAssignMask(Outs);
11370 
11371   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
11372     MVT VT = Outs[i].VT;
11373     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
11374     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
11375     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
11376                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
11377                  *this, FirstMaskArgument))
11378       return false;
11379   }
11380   return true;
11381 }
11382 
11383 SDValue
11384 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
11385                                  bool IsVarArg,
11386                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
11387                                  const SmallVectorImpl<SDValue> &OutVals,
11388                                  const SDLoc &DL, SelectionDAG &DAG) const {
11389   const MachineFunction &MF = DAG.getMachineFunction();
11390   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11391 
11392   // Stores the assignment of the return value to a location.
11393   SmallVector<CCValAssign, 16> RVLocs;
11394 
11395   // Info about the registers and stack slot.
11396   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
11397                  *DAG.getContext());
11398 
11399   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
11400                     nullptr, CC_RISCV);
11401 
11402   if (CallConv == CallingConv::GHC && !RVLocs.empty())
11403     report_fatal_error("GHC functions return void only");
11404 
11405   SDValue Glue;
11406   SmallVector<SDValue, 4> RetOps(1, Chain);
11407 
11408   // Copy the result values into the output registers.
11409   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
11410     SDValue Val = OutVals[i];
11411     CCValAssign &VA = RVLocs[i];
11412     assert(VA.isRegLoc() && "Can only return in registers!");
11413 
11414     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
11415       // Handle returning f64 on RV32D with a soft float ABI.
11416       assert(VA.isRegLoc() && "Expected return via registers");
11417       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
11418                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
11419       SDValue Lo = SplitF64.getValue(0);
11420       SDValue Hi = SplitF64.getValue(1);
11421       Register RegLo = VA.getLocReg();
11422       assert(RegLo < RISCV::X31 && "Invalid register pair");
11423       Register RegHi = RegLo + 1;
11424 
11425       if (STI.isRegisterReservedByUser(RegLo) ||
11426           STI.isRegisterReservedByUser(RegHi))
11427         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11428             MF.getFunction(),
11429             "Return value register required, but has been reserved."});
11430 
11431       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
11432       Glue = Chain.getValue(1);
11433       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
11434       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
11435       Glue = Chain.getValue(1);
11436       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
11437     } else {
11438       // Handle a 'normal' return.
11439       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
11440       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
11441 
11442       if (STI.isRegisterReservedByUser(VA.getLocReg()))
11443         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11444             MF.getFunction(),
11445             "Return value register required, but has been reserved."});
11446 
11447       // Guarantee that all emitted copies are stuck together.
11448       Glue = Chain.getValue(1);
11449       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
11450     }
11451   }
11452 
11453   RetOps[0] = Chain; // Update chain.
11454 
11455   // Add the glue node if we have it.
11456   if (Glue.getNode()) {
11457     RetOps.push_back(Glue);
11458   }
11459 
11460   unsigned RetOpc = RISCVISD::RET_FLAG;
11461   // Interrupt service routines use different return instructions.
11462   const Function &Func = DAG.getMachineFunction().getFunction();
11463   if (Func.hasFnAttribute("interrupt")) {
11464     if (!Func.getReturnType()->isVoidTy())
11465       report_fatal_error(
11466           "Functions with the interrupt attribute must have void return type!");
11467 
11468     MachineFunction &MF = DAG.getMachineFunction();
11469     StringRef Kind =
11470       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
11471 
11472     if (Kind == "user")
11473       RetOpc = RISCVISD::URET_FLAG;
11474     else if (Kind == "supervisor")
11475       RetOpc = RISCVISD::SRET_FLAG;
11476     else
11477       RetOpc = RISCVISD::MRET_FLAG;
11478   }
11479 
11480   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
11481 }
11482 
11483 void RISCVTargetLowering::validateCCReservedRegs(
11484     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
11485     MachineFunction &MF) const {
11486   const Function &F = MF.getFunction();
11487   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11488 
11489   if (llvm::any_of(Regs, [&STI](auto Reg) {
11490         return STI.isRegisterReservedByUser(Reg.first);
11491       }))
11492     F.getContext().diagnose(DiagnosticInfoUnsupported{
11493         F, "Argument register required, but has been reserved."});
11494 }
11495 
11496 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
11497   return CI->isTailCall();
11498 }
11499 
11500 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
11501 #define NODE_NAME_CASE(NODE)                                                   \
11502   case RISCVISD::NODE:                                                         \
11503     return "RISCVISD::" #NODE;
11504   // clang-format off
11505   switch ((RISCVISD::NodeType)Opcode) {
11506   case RISCVISD::FIRST_NUMBER:
11507     break;
11508   NODE_NAME_CASE(RET_FLAG)
11509   NODE_NAME_CASE(URET_FLAG)
11510   NODE_NAME_CASE(SRET_FLAG)
11511   NODE_NAME_CASE(MRET_FLAG)
11512   NODE_NAME_CASE(CALL)
11513   NODE_NAME_CASE(SELECT_CC)
11514   NODE_NAME_CASE(BR_CC)
11515   NODE_NAME_CASE(BuildPairF64)
11516   NODE_NAME_CASE(SplitF64)
11517   NODE_NAME_CASE(TAIL)
11518   NODE_NAME_CASE(ADD_LO)
11519   NODE_NAME_CASE(HI)
11520   NODE_NAME_CASE(LLA)
11521   NODE_NAME_CASE(ADD_TPREL)
11522   NODE_NAME_CASE(LA)
11523   NODE_NAME_CASE(LA_TLS_IE)
11524   NODE_NAME_CASE(LA_TLS_GD)
11525   NODE_NAME_CASE(MULHSU)
11526   NODE_NAME_CASE(SLLW)
11527   NODE_NAME_CASE(SRAW)
11528   NODE_NAME_CASE(SRLW)
11529   NODE_NAME_CASE(DIVW)
11530   NODE_NAME_CASE(DIVUW)
11531   NODE_NAME_CASE(REMUW)
11532   NODE_NAME_CASE(ROLW)
11533   NODE_NAME_CASE(RORW)
11534   NODE_NAME_CASE(CLZW)
11535   NODE_NAME_CASE(CTZW)
11536   NODE_NAME_CASE(FSLW)
11537   NODE_NAME_CASE(FSRW)
11538   NODE_NAME_CASE(FSL)
11539   NODE_NAME_CASE(FSR)
11540   NODE_NAME_CASE(FMV_H_X)
11541   NODE_NAME_CASE(FMV_X_ANYEXTH)
11542   NODE_NAME_CASE(FMV_X_SIGNEXTH)
11543   NODE_NAME_CASE(FMV_W_X_RV64)
11544   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
11545   NODE_NAME_CASE(FCVT_X)
11546   NODE_NAME_CASE(FCVT_XU)
11547   NODE_NAME_CASE(FCVT_W_RV64)
11548   NODE_NAME_CASE(FCVT_WU_RV64)
11549   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
11550   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
11551   NODE_NAME_CASE(READ_CYCLE_WIDE)
11552   NODE_NAME_CASE(GREV)
11553   NODE_NAME_CASE(GREVW)
11554   NODE_NAME_CASE(GORC)
11555   NODE_NAME_CASE(GORCW)
11556   NODE_NAME_CASE(SHFL)
11557   NODE_NAME_CASE(SHFLW)
11558   NODE_NAME_CASE(UNSHFL)
11559   NODE_NAME_CASE(UNSHFLW)
11560   NODE_NAME_CASE(BFP)
11561   NODE_NAME_CASE(BFPW)
11562   NODE_NAME_CASE(BCOMPRESS)
11563   NODE_NAME_CASE(BCOMPRESSW)
11564   NODE_NAME_CASE(BDECOMPRESS)
11565   NODE_NAME_CASE(BDECOMPRESSW)
11566   NODE_NAME_CASE(VMV_V_X_VL)
11567   NODE_NAME_CASE(VFMV_V_F_VL)
11568   NODE_NAME_CASE(VMV_X_S)
11569   NODE_NAME_CASE(VMV_S_X_VL)
11570   NODE_NAME_CASE(VFMV_S_F_VL)
11571   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
11572   NODE_NAME_CASE(READ_VLENB)
11573   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
11574   NODE_NAME_CASE(VSLIDEUP_VL)
11575   NODE_NAME_CASE(VSLIDE1UP_VL)
11576   NODE_NAME_CASE(VSLIDEDOWN_VL)
11577   NODE_NAME_CASE(VSLIDE1DOWN_VL)
11578   NODE_NAME_CASE(VID_VL)
11579   NODE_NAME_CASE(VFNCVT_ROD_VL)
11580   NODE_NAME_CASE(VECREDUCE_ADD_VL)
11581   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
11582   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
11583   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
11584   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
11585   NODE_NAME_CASE(VECREDUCE_AND_VL)
11586   NODE_NAME_CASE(VECREDUCE_OR_VL)
11587   NODE_NAME_CASE(VECREDUCE_XOR_VL)
11588   NODE_NAME_CASE(VECREDUCE_FADD_VL)
11589   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
11590   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
11591   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
11592   NODE_NAME_CASE(ADD_VL)
11593   NODE_NAME_CASE(AND_VL)
11594   NODE_NAME_CASE(MUL_VL)
11595   NODE_NAME_CASE(OR_VL)
11596   NODE_NAME_CASE(SDIV_VL)
11597   NODE_NAME_CASE(SHL_VL)
11598   NODE_NAME_CASE(SREM_VL)
11599   NODE_NAME_CASE(SRA_VL)
11600   NODE_NAME_CASE(SRL_VL)
11601   NODE_NAME_CASE(SUB_VL)
11602   NODE_NAME_CASE(UDIV_VL)
11603   NODE_NAME_CASE(UREM_VL)
11604   NODE_NAME_CASE(XOR_VL)
11605   NODE_NAME_CASE(SADDSAT_VL)
11606   NODE_NAME_CASE(UADDSAT_VL)
11607   NODE_NAME_CASE(SSUBSAT_VL)
11608   NODE_NAME_CASE(USUBSAT_VL)
11609   NODE_NAME_CASE(FADD_VL)
11610   NODE_NAME_CASE(FSUB_VL)
11611   NODE_NAME_CASE(FMUL_VL)
11612   NODE_NAME_CASE(FDIV_VL)
11613   NODE_NAME_CASE(FNEG_VL)
11614   NODE_NAME_CASE(FABS_VL)
11615   NODE_NAME_CASE(FSQRT_VL)
11616   NODE_NAME_CASE(VFMADD_VL)
11617   NODE_NAME_CASE(VFNMADD_VL)
11618   NODE_NAME_CASE(VFMSUB_VL)
11619   NODE_NAME_CASE(VFNMSUB_VL)
11620   NODE_NAME_CASE(FCOPYSIGN_VL)
11621   NODE_NAME_CASE(SMIN_VL)
11622   NODE_NAME_CASE(SMAX_VL)
11623   NODE_NAME_CASE(UMIN_VL)
11624   NODE_NAME_CASE(UMAX_VL)
11625   NODE_NAME_CASE(FMINNUM_VL)
11626   NODE_NAME_CASE(FMAXNUM_VL)
11627   NODE_NAME_CASE(MULHS_VL)
11628   NODE_NAME_CASE(MULHU_VL)
11629   NODE_NAME_CASE(FP_TO_SINT_VL)
11630   NODE_NAME_CASE(FP_TO_UINT_VL)
11631   NODE_NAME_CASE(SINT_TO_FP_VL)
11632   NODE_NAME_CASE(UINT_TO_FP_VL)
11633   NODE_NAME_CASE(FP_EXTEND_VL)
11634   NODE_NAME_CASE(FP_ROUND_VL)
11635   NODE_NAME_CASE(VWMUL_VL)
11636   NODE_NAME_CASE(VWMULU_VL)
11637   NODE_NAME_CASE(VWMULSU_VL)
11638   NODE_NAME_CASE(VWADD_VL)
11639   NODE_NAME_CASE(VWADDU_VL)
11640   NODE_NAME_CASE(VWSUB_VL)
11641   NODE_NAME_CASE(VWSUBU_VL)
11642   NODE_NAME_CASE(VWADD_W_VL)
11643   NODE_NAME_CASE(VWADDU_W_VL)
11644   NODE_NAME_CASE(VWSUB_W_VL)
11645   NODE_NAME_CASE(VWSUBU_W_VL)
11646   NODE_NAME_CASE(SETCC_VL)
11647   NODE_NAME_CASE(VSELECT_VL)
11648   NODE_NAME_CASE(VP_MERGE_VL)
11649   NODE_NAME_CASE(VMAND_VL)
11650   NODE_NAME_CASE(VMOR_VL)
11651   NODE_NAME_CASE(VMXOR_VL)
11652   NODE_NAME_CASE(VMCLR_VL)
11653   NODE_NAME_CASE(VMSET_VL)
11654   NODE_NAME_CASE(VRGATHER_VX_VL)
11655   NODE_NAME_CASE(VRGATHER_VV_VL)
11656   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11657   NODE_NAME_CASE(VSEXT_VL)
11658   NODE_NAME_CASE(VZEXT_VL)
11659   NODE_NAME_CASE(VCPOP_VL)
11660   NODE_NAME_CASE(READ_CSR)
11661   NODE_NAME_CASE(WRITE_CSR)
11662   NODE_NAME_CASE(SWAP_CSR)
11663   }
11664   // clang-format on
11665   return nullptr;
11666 #undef NODE_NAME_CASE
11667 }
11668 
11669 /// getConstraintType - Given a constraint letter, return the type of
11670 /// constraint it is for this target.
11671 RISCVTargetLowering::ConstraintType
11672 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11673   if (Constraint.size() == 1) {
11674     switch (Constraint[0]) {
11675     default:
11676       break;
11677     case 'f':
11678       return C_RegisterClass;
11679     case 'I':
11680     case 'J':
11681     case 'K':
11682       return C_Immediate;
11683     case 'A':
11684       return C_Memory;
11685     case 'S': // A symbolic address
11686       return C_Other;
11687     }
11688   } else {
11689     if (Constraint == "vr" || Constraint == "vm")
11690       return C_RegisterClass;
11691   }
11692   return TargetLowering::getConstraintType(Constraint);
11693 }
11694 
11695 std::pair<unsigned, const TargetRegisterClass *>
11696 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11697                                                   StringRef Constraint,
11698                                                   MVT VT) const {
11699   // First, see if this is a constraint that directly corresponds to a
11700   // RISCV register class.
11701   if (Constraint.size() == 1) {
11702     switch (Constraint[0]) {
11703     case 'r':
11704       // TODO: Support fixed vectors up to XLen for P extension?
11705       if (VT.isVector())
11706         break;
11707       return std::make_pair(0U, &RISCV::GPRRegClass);
11708     case 'f':
11709       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11710         return std::make_pair(0U, &RISCV::FPR16RegClass);
11711       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11712         return std::make_pair(0U, &RISCV::FPR32RegClass);
11713       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11714         return std::make_pair(0U, &RISCV::FPR64RegClass);
11715       break;
11716     default:
11717       break;
11718     }
11719   } else if (Constraint == "vr") {
11720     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11721                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11722       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11723         return std::make_pair(0U, RC);
11724     }
11725   } else if (Constraint == "vm") {
11726     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11727       return std::make_pair(0U, &RISCV::VMV0RegClass);
11728   }
11729 
11730   // Clang will correctly decode the usage of register name aliases into their
11731   // official names. However, other frontends like `rustc` do not. This allows
11732   // users of these frontends to use the ABI names for registers in LLVM-style
11733   // register constraints.
11734   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11735                                .Case("{zero}", RISCV::X0)
11736                                .Case("{ra}", RISCV::X1)
11737                                .Case("{sp}", RISCV::X2)
11738                                .Case("{gp}", RISCV::X3)
11739                                .Case("{tp}", RISCV::X4)
11740                                .Case("{t0}", RISCV::X5)
11741                                .Case("{t1}", RISCV::X6)
11742                                .Case("{t2}", RISCV::X7)
11743                                .Cases("{s0}", "{fp}", RISCV::X8)
11744                                .Case("{s1}", RISCV::X9)
11745                                .Case("{a0}", RISCV::X10)
11746                                .Case("{a1}", RISCV::X11)
11747                                .Case("{a2}", RISCV::X12)
11748                                .Case("{a3}", RISCV::X13)
11749                                .Case("{a4}", RISCV::X14)
11750                                .Case("{a5}", RISCV::X15)
11751                                .Case("{a6}", RISCV::X16)
11752                                .Case("{a7}", RISCV::X17)
11753                                .Case("{s2}", RISCV::X18)
11754                                .Case("{s3}", RISCV::X19)
11755                                .Case("{s4}", RISCV::X20)
11756                                .Case("{s5}", RISCV::X21)
11757                                .Case("{s6}", RISCV::X22)
11758                                .Case("{s7}", RISCV::X23)
11759                                .Case("{s8}", RISCV::X24)
11760                                .Case("{s9}", RISCV::X25)
11761                                .Case("{s10}", RISCV::X26)
11762                                .Case("{s11}", RISCV::X27)
11763                                .Case("{t3}", RISCV::X28)
11764                                .Case("{t4}", RISCV::X29)
11765                                .Case("{t5}", RISCV::X30)
11766                                .Case("{t6}", RISCV::X31)
11767                                .Default(RISCV::NoRegister);
11768   if (XRegFromAlias != RISCV::NoRegister)
11769     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11770 
11771   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11772   // TableGen record rather than the AsmName to choose registers for InlineAsm
11773   // constraints, plus we want to match those names to the widest floating point
11774   // register type available, manually select floating point registers here.
11775   //
11776   // The second case is the ABI name of the register, so that frontends can also
11777   // use the ABI names in register constraint lists.
11778   if (Subtarget.hasStdExtF()) {
11779     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11780                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11781                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11782                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11783                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11784                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11785                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11786                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11787                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11788                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11789                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11790                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11791                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11792                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11793                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11794                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11795                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11796                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11797                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11798                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11799                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11800                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11801                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11802                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11803                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11804                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11805                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11806                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11807                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11808                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11809                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11810                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11811                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11812                         .Default(RISCV::NoRegister);
11813     if (FReg != RISCV::NoRegister) {
11814       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11815       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11816         unsigned RegNo = FReg - RISCV::F0_F;
11817         unsigned DReg = RISCV::F0_D + RegNo;
11818         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11819       }
11820       if (VT == MVT::f32 || VT == MVT::Other)
11821         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11822       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11823         unsigned RegNo = FReg - RISCV::F0_F;
11824         unsigned HReg = RISCV::F0_H + RegNo;
11825         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11826       }
11827     }
11828   }
11829 
11830   if (Subtarget.hasVInstructions()) {
11831     Register VReg = StringSwitch<Register>(Constraint.lower())
11832                         .Case("{v0}", RISCV::V0)
11833                         .Case("{v1}", RISCV::V1)
11834                         .Case("{v2}", RISCV::V2)
11835                         .Case("{v3}", RISCV::V3)
11836                         .Case("{v4}", RISCV::V4)
11837                         .Case("{v5}", RISCV::V5)
11838                         .Case("{v6}", RISCV::V6)
11839                         .Case("{v7}", RISCV::V7)
11840                         .Case("{v8}", RISCV::V8)
11841                         .Case("{v9}", RISCV::V9)
11842                         .Case("{v10}", RISCV::V10)
11843                         .Case("{v11}", RISCV::V11)
11844                         .Case("{v12}", RISCV::V12)
11845                         .Case("{v13}", RISCV::V13)
11846                         .Case("{v14}", RISCV::V14)
11847                         .Case("{v15}", RISCV::V15)
11848                         .Case("{v16}", RISCV::V16)
11849                         .Case("{v17}", RISCV::V17)
11850                         .Case("{v18}", RISCV::V18)
11851                         .Case("{v19}", RISCV::V19)
11852                         .Case("{v20}", RISCV::V20)
11853                         .Case("{v21}", RISCV::V21)
11854                         .Case("{v22}", RISCV::V22)
11855                         .Case("{v23}", RISCV::V23)
11856                         .Case("{v24}", RISCV::V24)
11857                         .Case("{v25}", RISCV::V25)
11858                         .Case("{v26}", RISCV::V26)
11859                         .Case("{v27}", RISCV::V27)
11860                         .Case("{v28}", RISCV::V28)
11861                         .Case("{v29}", RISCV::V29)
11862                         .Case("{v30}", RISCV::V30)
11863                         .Case("{v31}", RISCV::V31)
11864                         .Default(RISCV::NoRegister);
11865     if (VReg != RISCV::NoRegister) {
11866       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11867         return std::make_pair(VReg, &RISCV::VMRegClass);
11868       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11869         return std::make_pair(VReg, &RISCV::VRRegClass);
11870       for (const auto *RC :
11871            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11872         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11873           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11874           return std::make_pair(VReg, RC);
11875         }
11876       }
11877     }
11878   }
11879 
11880   std::pair<Register, const TargetRegisterClass *> Res =
11881       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11882 
11883   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11884   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11885   // Subtarget into account.
11886   if (Res.second == &RISCV::GPRF16RegClass ||
11887       Res.second == &RISCV::GPRF32RegClass ||
11888       Res.second == &RISCV::GPRF64RegClass)
11889     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11890 
11891   return Res;
11892 }
11893 
11894 unsigned
11895 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11896   // Currently only support length 1 constraints.
11897   if (ConstraintCode.size() == 1) {
11898     switch (ConstraintCode[0]) {
11899     case 'A':
11900       return InlineAsm::Constraint_A;
11901     default:
11902       break;
11903     }
11904   }
11905 
11906   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11907 }
11908 
11909 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11910     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11911     SelectionDAG &DAG) const {
11912   // Currently only support length 1 constraints.
11913   if (Constraint.length() == 1) {
11914     switch (Constraint[0]) {
11915     case 'I':
11916       // Validate & create a 12-bit signed immediate operand.
11917       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11918         uint64_t CVal = C->getSExtValue();
11919         if (isInt<12>(CVal))
11920           Ops.push_back(
11921               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11922       }
11923       return;
11924     case 'J':
11925       // Validate & create an integer zero operand.
11926       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11927         if (C->getZExtValue() == 0)
11928           Ops.push_back(
11929               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11930       return;
11931     case 'K':
11932       // Validate & create a 5-bit unsigned immediate operand.
11933       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11934         uint64_t CVal = C->getZExtValue();
11935         if (isUInt<5>(CVal))
11936           Ops.push_back(
11937               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11938       }
11939       return;
11940     case 'S':
11941       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11942         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11943                                                  GA->getValueType(0)));
11944       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11945         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11946                                                 BA->getValueType(0)));
11947       }
11948       return;
11949     default:
11950       break;
11951     }
11952   }
11953   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11954 }
11955 
11956 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11957                                                    Instruction *Inst,
11958                                                    AtomicOrdering Ord) const {
11959   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11960     return Builder.CreateFence(Ord);
11961   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11962     return Builder.CreateFence(AtomicOrdering::Release);
11963   return nullptr;
11964 }
11965 
11966 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11967                                                     Instruction *Inst,
11968                                                     AtomicOrdering Ord) const {
11969   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11970     return Builder.CreateFence(AtomicOrdering::Acquire);
11971   return nullptr;
11972 }
11973 
11974 TargetLowering::AtomicExpansionKind
11975 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11976   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11977   // point operations can't be used in an lr/sc sequence without breaking the
11978   // forward-progress guarantee.
11979   if (AI->isFloatingPointOperation())
11980     return AtomicExpansionKind::CmpXChg;
11981 
11982   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11983   if (Size == 8 || Size == 16)
11984     return AtomicExpansionKind::MaskedIntrinsic;
11985   return AtomicExpansionKind::None;
11986 }
11987 
11988 static Intrinsic::ID
11989 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11990   if (XLen == 32) {
11991     switch (BinOp) {
11992     default:
11993       llvm_unreachable("Unexpected AtomicRMW BinOp");
11994     case AtomicRMWInst::Xchg:
11995       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11996     case AtomicRMWInst::Add:
11997       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11998     case AtomicRMWInst::Sub:
11999       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
12000     case AtomicRMWInst::Nand:
12001       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
12002     case AtomicRMWInst::Max:
12003       return Intrinsic::riscv_masked_atomicrmw_max_i32;
12004     case AtomicRMWInst::Min:
12005       return Intrinsic::riscv_masked_atomicrmw_min_i32;
12006     case AtomicRMWInst::UMax:
12007       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
12008     case AtomicRMWInst::UMin:
12009       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
12010     }
12011   }
12012 
12013   if (XLen == 64) {
12014     switch (BinOp) {
12015     default:
12016       llvm_unreachable("Unexpected AtomicRMW BinOp");
12017     case AtomicRMWInst::Xchg:
12018       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
12019     case AtomicRMWInst::Add:
12020       return Intrinsic::riscv_masked_atomicrmw_add_i64;
12021     case AtomicRMWInst::Sub:
12022       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
12023     case AtomicRMWInst::Nand:
12024       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
12025     case AtomicRMWInst::Max:
12026       return Intrinsic::riscv_masked_atomicrmw_max_i64;
12027     case AtomicRMWInst::Min:
12028       return Intrinsic::riscv_masked_atomicrmw_min_i64;
12029     case AtomicRMWInst::UMax:
12030       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
12031     case AtomicRMWInst::UMin:
12032       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
12033     }
12034   }
12035 
12036   llvm_unreachable("Unexpected XLen\n");
12037 }
12038 
12039 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
12040     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
12041     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
12042   unsigned XLen = Subtarget.getXLen();
12043   Value *Ordering =
12044       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
12045   Type *Tys[] = {AlignedAddr->getType()};
12046   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
12047       AI->getModule(),
12048       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
12049 
12050   if (XLen == 64) {
12051     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
12052     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
12053     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
12054   }
12055 
12056   Value *Result;
12057 
12058   // Must pass the shift amount needed to sign extend the loaded value prior
12059   // to performing a signed comparison for min/max. ShiftAmt is the number of
12060   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
12061   // is the number of bits to left+right shift the value in order to
12062   // sign-extend.
12063   if (AI->getOperation() == AtomicRMWInst::Min ||
12064       AI->getOperation() == AtomicRMWInst::Max) {
12065     const DataLayout &DL = AI->getModule()->getDataLayout();
12066     unsigned ValWidth =
12067         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
12068     Value *SextShamt =
12069         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
12070     Result = Builder.CreateCall(LrwOpScwLoop,
12071                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
12072   } else {
12073     Result =
12074         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
12075   }
12076 
12077   if (XLen == 64)
12078     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
12079   return Result;
12080 }
12081 
12082 TargetLowering::AtomicExpansionKind
12083 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
12084     AtomicCmpXchgInst *CI) const {
12085   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
12086   if (Size == 8 || Size == 16)
12087     return AtomicExpansionKind::MaskedIntrinsic;
12088   return AtomicExpansionKind::None;
12089 }
12090 
12091 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
12092     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
12093     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
12094   unsigned XLen = Subtarget.getXLen();
12095   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
12096   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
12097   if (XLen == 64) {
12098     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
12099     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
12100     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
12101     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
12102   }
12103   Type *Tys[] = {AlignedAddr->getType()};
12104   Function *MaskedCmpXchg =
12105       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
12106   Value *Result = Builder.CreateCall(
12107       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
12108   if (XLen == 64)
12109     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
12110   return Result;
12111 }
12112 
12113 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
12114                                                         EVT DataVT) const {
12115   return false;
12116 }
12117 
12118 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
12119                                                EVT VT) const {
12120   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
12121     return false;
12122 
12123   switch (FPVT.getSimpleVT().SimpleTy) {
12124   case MVT::f16:
12125     return Subtarget.hasStdExtZfh();
12126   case MVT::f32:
12127     return Subtarget.hasStdExtF();
12128   case MVT::f64:
12129     return Subtarget.hasStdExtD();
12130   default:
12131     return false;
12132   }
12133 }
12134 
12135 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
12136   // If we are using the small code model, we can reduce size of jump table
12137   // entry to 4 bytes.
12138   if (Subtarget.is64Bit() && !isPositionIndependent() &&
12139       getTargetMachine().getCodeModel() == CodeModel::Small) {
12140     return MachineJumpTableInfo::EK_Custom32;
12141   }
12142   return TargetLowering::getJumpTableEncoding();
12143 }
12144 
12145 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
12146     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
12147     unsigned uid, MCContext &Ctx) const {
12148   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
12149          getTargetMachine().getCodeModel() == CodeModel::Small);
12150   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
12151 }
12152 
12153 bool RISCVTargetLowering::isVScaleKnownToBeAPowerOfTwo() const {
12154   // We define vscale to be VLEN/RVVBitsPerBlock.  VLEN is always a power
12155   // of two >= 64, and RVVBitsPerBlock is 64.  Thus, vscale must be
12156   // a power of two as well.
12157   // FIXME: This doesn't work for zve32, but that's already broken
12158   // elsewhere for the same reason.
12159   assert(Subtarget.getRealMinVLen() >= 64 && "zve32* unsupported");
12160   assert(RISCV::RVVBitsPerBlock == 64 && "RVVBitsPerBlock changed, audit needed");
12161   return true;
12162 }
12163 
12164 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
12165                                                      EVT VT) const {
12166   VT = VT.getScalarType();
12167 
12168   if (!VT.isSimple())
12169     return false;
12170 
12171   switch (VT.getSimpleVT().SimpleTy) {
12172   case MVT::f16:
12173     return Subtarget.hasStdExtZfh();
12174   case MVT::f32:
12175     return Subtarget.hasStdExtF();
12176   case MVT::f64:
12177     return Subtarget.hasStdExtD();
12178   default:
12179     break;
12180   }
12181 
12182   return false;
12183 }
12184 
12185 Register RISCVTargetLowering::getExceptionPointerRegister(
12186     const Constant *PersonalityFn) const {
12187   return RISCV::X10;
12188 }
12189 
12190 Register RISCVTargetLowering::getExceptionSelectorRegister(
12191     const Constant *PersonalityFn) const {
12192   return RISCV::X11;
12193 }
12194 
12195 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
12196   // Return false to suppress the unnecessary extensions if the LibCall
12197   // arguments or return value is f32 type for LP64 ABI.
12198   RISCVABI::ABI ABI = Subtarget.getTargetABI();
12199   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
12200     return false;
12201 
12202   return true;
12203 }
12204 
12205 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
12206   if (Subtarget.is64Bit() && Type == MVT::i32)
12207     return true;
12208 
12209   return IsSigned;
12210 }
12211 
12212 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
12213                                                  SDValue C) const {
12214   // Check integral scalar types.
12215   if (VT.isScalarInteger()) {
12216     // Omit the optimization if the sub target has the M extension and the data
12217     // size exceeds XLen.
12218     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
12219       return false;
12220     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
12221       // Break the MUL to a SLLI and an ADD/SUB.
12222       const APInt &Imm = ConstNode->getAPIntValue();
12223       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
12224           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
12225         return true;
12226       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
12227       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
12228           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
12229            (Imm - 8).isPowerOf2()))
12230         return true;
12231       // Omit the following optimization if the sub target has the M extension
12232       // and the data size >= XLen.
12233       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
12234         return false;
12235       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
12236       // a pair of LUI/ADDI.
12237       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
12238         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
12239         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
12240             (1 - ImmS).isPowerOf2())
12241           return true;
12242       }
12243     }
12244   }
12245 
12246   return false;
12247 }
12248 
12249 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
12250                                                       SDValue ConstNode) const {
12251   // Let the DAGCombiner decide for vectors.
12252   EVT VT = AddNode.getValueType();
12253   if (VT.isVector())
12254     return true;
12255 
12256   // Let the DAGCombiner decide for larger types.
12257   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
12258     return true;
12259 
12260   // It is worse if c1 is simm12 while c1*c2 is not.
12261   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
12262   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
12263   const APInt &C1 = C1Node->getAPIntValue();
12264   const APInt &C2 = C2Node->getAPIntValue();
12265   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
12266     return false;
12267 
12268   // Default to true and let the DAGCombiner decide.
12269   return true;
12270 }
12271 
12272 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
12273     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
12274     bool *Fast) const {
12275   if (!VT.isVector()) {
12276     if (Fast)
12277       *Fast = false;
12278     return Subtarget.enableUnalignedScalarMem();
12279   }
12280 
12281   // All vector implementations must support element alignment
12282   EVT ElemVT = VT.getVectorElementType();
12283   if (Alignment >= ElemVT.getStoreSize()) {
12284     if (Fast)
12285       *Fast = true;
12286     return true;
12287   }
12288 
12289   return false;
12290 }
12291 
12292 bool RISCVTargetLowering::splitValueIntoRegisterParts(
12293     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
12294     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
12295   bool IsABIRegCopy = CC.has_value();
12296   EVT ValueVT = Val.getValueType();
12297   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
12298     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
12299     // and cast to f32.
12300     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
12301     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
12302     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
12303                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
12304     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
12305     Parts[0] = Val;
12306     return true;
12307   }
12308 
12309   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
12310     LLVMContext &Context = *DAG.getContext();
12311     EVT ValueEltVT = ValueVT.getVectorElementType();
12312     EVT PartEltVT = PartVT.getVectorElementType();
12313     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
12314     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
12315     if (PartVTBitSize % ValueVTBitSize == 0) {
12316       assert(PartVTBitSize >= ValueVTBitSize);
12317       // If the element types are different, bitcast to the same element type of
12318       // PartVT first.
12319       // Give an example here, we want copy a <vscale x 1 x i8> value to
12320       // <vscale x 4 x i16>.
12321       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
12322       // subvector, then we can bitcast to <vscale x 4 x i16>.
12323       if (ValueEltVT != PartEltVT) {
12324         if (PartVTBitSize > ValueVTBitSize) {
12325           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
12326           assert(Count != 0 && "The number of element should not be zero.");
12327           EVT SameEltTypeVT =
12328               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
12329           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
12330                             DAG.getUNDEF(SameEltTypeVT), Val,
12331                             DAG.getVectorIdxConstant(0, DL));
12332         }
12333         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
12334       } else {
12335         Val =
12336             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
12337                         Val, DAG.getVectorIdxConstant(0, DL));
12338       }
12339       Parts[0] = Val;
12340       return true;
12341     }
12342   }
12343   return false;
12344 }
12345 
12346 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
12347     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
12348     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
12349   bool IsABIRegCopy = CC.has_value();
12350   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
12351     SDValue Val = Parts[0];
12352 
12353     // Cast the f32 to i32, truncate to i16, and cast back to f16.
12354     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
12355     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
12356     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
12357     return Val;
12358   }
12359 
12360   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
12361     LLVMContext &Context = *DAG.getContext();
12362     SDValue Val = Parts[0];
12363     EVT ValueEltVT = ValueVT.getVectorElementType();
12364     EVT PartEltVT = PartVT.getVectorElementType();
12365     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
12366     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
12367     if (PartVTBitSize % ValueVTBitSize == 0) {
12368       assert(PartVTBitSize >= ValueVTBitSize);
12369       EVT SameEltTypeVT = ValueVT;
12370       // If the element types are different, convert it to the same element type
12371       // of PartVT.
12372       // Give an example here, we want copy a <vscale x 1 x i8> value from
12373       // <vscale x 4 x i16>.
12374       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
12375       // then we can extract <vscale x 1 x i8>.
12376       if (ValueEltVT != PartEltVT) {
12377         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
12378         assert(Count != 0 && "The number of element should not be zero.");
12379         SameEltTypeVT =
12380             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
12381         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
12382       }
12383       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
12384                         DAG.getVectorIdxConstant(0, DL));
12385       return Val;
12386     }
12387   }
12388   return SDValue();
12389 }
12390 
12391 SDValue
12392 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
12393                                    SelectionDAG &DAG,
12394                                    SmallVectorImpl<SDNode *> &Created) const {
12395   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
12396   if (isIntDivCheap(N->getValueType(0), Attr))
12397     return SDValue(N, 0); // Lower SDIV as SDIV
12398 
12399   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
12400          "Unexpected divisor!");
12401 
12402   // Conditional move is needed, so do the transformation iff Zbt is enabled.
12403   if (!Subtarget.hasStdExtZbt())
12404     return SDValue();
12405 
12406   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
12407   // Besides, more critical path instructions will be generated when dividing
12408   // by 2. So we keep using the original DAGs for these cases.
12409   unsigned Lg2 = Divisor.countTrailingZeros();
12410   if (Lg2 == 1 || Lg2 >= 12)
12411     return SDValue();
12412 
12413   // fold (sdiv X, pow2)
12414   EVT VT = N->getValueType(0);
12415   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
12416     return SDValue();
12417 
12418   SDLoc DL(N);
12419   SDValue N0 = N->getOperand(0);
12420   SDValue Zero = DAG.getConstant(0, DL, VT);
12421   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
12422 
12423   // Add (N0 < 0) ? Pow2 - 1 : 0;
12424   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
12425   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
12426   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
12427 
12428   Created.push_back(Cmp.getNode());
12429   Created.push_back(Add.getNode());
12430   Created.push_back(Sel.getNode());
12431 
12432   // Divide by pow2.
12433   SDValue SRA =
12434       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
12435 
12436   // If we're dividing by a positive value, we're done.  Otherwise, we must
12437   // negate the result.
12438   if (Divisor.isNonNegative())
12439     return SRA;
12440 
12441   Created.push_back(SRA.getNode());
12442   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
12443 }
12444 
12445 #define GET_REGISTER_MATCHER
12446 #include "RISCVGenAsmMatcher.inc"
12447 
12448 Register
12449 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
12450                                        const MachineFunction &MF) const {
12451   Register Reg = MatchRegisterAltName(RegName);
12452   if (Reg == RISCV::NoRegister)
12453     Reg = MatchRegisterName(RegName);
12454   if (Reg == RISCV::NoRegister)
12455     report_fatal_error(
12456         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
12457   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
12458   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
12459     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
12460                              StringRef(RegName) + "\"."));
12461   return Reg;
12462 }
12463 
12464 namespace llvm {
12465 namespace RISCVVIntrinsicsTable {
12466 
12467 #define GET_RISCVVIntrinsicsTable_IMPL
12468 #include "RISCVGenSearchableTables.inc"
12469 
12470 } // namespace RISCVVIntrinsicsTable
12471 
12472 } // namespace llvm
12473